hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
bf1592dfd0dba44e4676cc18785083ddfa1bf3b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Based on: */
/******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include <newmoderngpu/util/mgpucontext.h>
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
hipEventRecord(start);
hipDeviceSynchronize();
}
double CudaTimer::Split() {
hipEventRecord(end);
hipDeviceSynchronize();
float t;
hipEventElapsedTime(&t, start, end);
start.Swap(end);
return (t / 1000.0);
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
hipError_t error = hipGetDeviceCount(&numCudaDevices);
if(hipSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
hipError_t error = hipGetDeviceProperties(&device->_prop,
ordinal);
if(hipSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
hipSetDevice(ordinal);
hipFuncAttributes attr;
error = hipFuncGetAttributes(&attr, KernelVersionShim);
if(hipSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
hipDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
hipError_t error = hipSetDevice(_ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(hipEventDisableTiming /*| hipEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) hipStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
hipError_t error = hipHostMalloc((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = hipStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
hipHostFree(_pageLocked);
if(_ownStream && _stream)
hipStreamDestroy(_stream);
if(_auxStream)
hipStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, hipStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(hipStream_t stream) {
int ordinal;
hipGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
hipError_t CudaAllocSimple::Malloc(size_t size, void** p) {
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, size);
if(hipSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
hipError_t error = hipSuccess;
if(p) error = hipFree(p);
return hipSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
hipError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return hipSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, allocSize);
while((hipErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = hipMalloc(&p, size);
}
if(hipSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return hipSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, hipFree it anyways
// but return false.
if(p) hipFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) hipFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
|
bf1592dfd0dba44e4676cc18785083ddfa1bf3b3.cu
|
/*
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Based on: */
/******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include <newmoderngpu/util/mgpucontext.h>
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
cudaEventRecord(start);
cudaDeviceSynchronize();
}
double CudaTimer::Split() {
cudaEventRecord(end);
cudaDeviceSynchronize();
float t;
cudaEventElapsedTime(&t, start, end);
start.Swap(end);
return (t / 1000.0);
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
cudaError_t error = cudaGetDeviceCount(&numCudaDevices);
if(cudaSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
cudaError_t error = cudaGetDeviceProperties(&device->_prop,
ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
cudaSetDevice(ordinal);
cudaFuncAttributes attr;
error = cudaFuncGetAttributes(&attr, KernelVersionShim);
if(cudaSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
cudaDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
cudaError_t error = cudaSetDevice(_ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(cudaEventDisableTiming /*| cudaEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) cudaStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
cudaError_t error = cudaMallocHost((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = cudaStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
cudaFreeHost(_pageLocked);
if(_ownStream && _stream)
cudaStreamDestroy(_stream);
if(_auxStream)
cudaStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, cudaStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(cudaStream_t stream) {
int ordinal;
cudaGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
cudaError_t CudaAllocSimple::Malloc(size_t size, void** p) {
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, size);
if(cudaSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
cudaError_t error = cudaSuccess;
if(p) error = cudaFree(p);
return cudaSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
cudaError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return cudaSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, allocSize);
while((cudaErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = cudaMalloc(&p, size);
}
if(cudaSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return cudaSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, cudaFree it anyways
// but return false.
if(p) cudaFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) cudaFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
|
350e0833816c857ed2ad104e8c349de17570a118.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
#define N 100
#define MAXCOL 100
#define Nic 5
#define NL_min 0
#define NL_max 70
#define NL_step 1
#define Ng_max 10
#define FILENAME "results.txt"
typedef struct {
int All_sync_count1[NL_max-NL_min][Ng_max];
int All_sync_count2[NL_max-NL_min];
} global_mem;
typedef struct {
unsigned short ic, iL, nL_break, ig;
unsigned short spike_count[N];
double v_init[N];
double tspike[N * MAXCOL];
} simulation_result;
/************************ DEBUG FUNCTIONS ************************/
/* Check the weights on GPU memory *
__global__ void check_weights(double w[(NL_max - NL_min) * Ng_max / NL_step][N][N]) {
printf("hi_check_weights\n");
int n = (NL_max - NL_min) / NL_step * Ng_max;
for(int i = 0; i < n; ++i) {
printf("\nnL = %d\tng = %d\n\t", NL_min + NL_step * i / Ng_max, i % Ng_max);
for(int j = 0; j < N; ++j) {
for(int k = 0; k < N; ++k) {
printf("%.2lf ", w[i][j][k]);
}
printf("\n\t");
}
}
printf("\n");
}
/* Check the global data on GPU memory *
__global__ void check_g_mem(global_mem *g_mem) {
double sum1 = 0.0, sum2 = 0.0;
for(int i = 0; i < NL_max - NL_min; ++i) {
for(int j = 0; j < Ng_max; ++j) {
sum1 += g_mem->All_sync_count1[i][j];
}
sum2 += g_mem->All_sync_count2[i];
}
printf("sum1 = %f\nsum2 = %f\n", sum1, sum2);
}
/*******************************************************************/
/* Generate a adjacency matrix for a connected graph with nL edges missing */
__device__ unsigned short synaptic_weights_connected_network(double w[][N], unsigned short nL, hiprandState_t *rand_state) {
unsigned short i,j,k,kk,neuron1,neuron2;
double w_flag[N][N];
unsigned short syn_to_remove, tot_syn_removed;
short connected_nodes[N];
unsigned short current_ptr, endptr, parent_node;
unsigned short flag_connected = 0;
unsigned short flag_already_connected;
// GENERATE AN ALL-TO-ALL NETWORK ************************************************************************
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
if(j != i){
w[i][j] = 1;
}
else if(j == i){
w[i][j] = 0;
}
}
}
// REMOVE SYNAPSES FROM ABOVE ALL-TO-ALL NETWORK *********************************************************
syn_to_remove = nL;
tot_syn_removed = 0;
// Initialize array w_flag
for(k = 0; k < N; k++) {
for(kk = 0; kk < N; kk++) {
w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
}
}
// Generate a new network by removing synapses randomly
while(tot_syn_removed < syn_to_remove) {
neuron1 = hiprand(rand_state) % N;
neuron2 = hiprand(rand_state) % N;
if(neuron1 != neuron2) {
if(w_flag[neuron1][neuron2] == 0) { // synapse between these two neurons has not been changed.
w_flag[neuron1][neuron2] = 1;
w_flag[neuron2][neuron1] = 1;
w[neuron1][neuron2] = 0;
w[neuron2][neuron1] = w[neuron1][neuron2];
tot_syn_removed++;
}
}
}
// Is the network generated above connected ? /////////////
//w[0][0] = 0; w[0][1] = 1; w[0][2] = 1; w[0][3] = 0; w[0][4] = 1; w[0][5] = 0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] = 0; w[1][4] = 0; w[1][5] = 1;
//w[2][0] = w[0][2]; w[2][1] = w[1][2]; w[2][2] = 0; w[2][3] = 0; w[2][4] = 1; w[2][5] = 0;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3] = 0; w[3][4] = 0; w[3][5] = 0;
//w[4][0] = w[0][4]; w[4][1] = w[1][4]; w[4][2] = w[2][4]; w[4][3] = w[3][4]; w[4][4] = 0; w[4][5] = 1;
//w[5][0] = w[0][5]; w[5][1] = w[1][5]; w[5][2] = w[2][5]; w[5][3] = w[3][5]; w[5][4] = w[4][5]; w[5][5] = 0;
//w[0][0] = 0; w[0][1] = 0; w[0][2] = 1; w[0][3]=0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] =0;
//w[2][0]=w[0][2]; w[2][1]=w[1][2]; w[2][2] =0; w[2][3] = 1;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3]=0;
// for(k = 0; k < N; k++) {
// for(kk = 0; kk < N; kk++) {
// w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
// }
// }
connected_nodes[0] = 0;
for(i=1;i<N;i++) {
connected_nodes[i] = -1;
}
current_ptr = 0;
endptr = 0; // points towards the last non-zero element in the connected_nodes array
while(current_ptr <= endptr) {
for(i = 0; i < N; i++) {
parent_node = connected_nodes[current_ptr];
flag_already_connected = 0;
for(j = 0; j <= endptr; j++) {
if(connected_nodes[j] == i) {
flag_already_connected = 1;
}
}
if(w[parent_node][i] == 1) {
if(w_flag[parent_node][i] == 0) {
if(flag_already_connected ==0) {
endptr ++;
connected_nodes[endptr] = i; // stores node numbers connected to parent_node
w_flag[parent_node][i] = 1;
w_flag[i][parent_node] = w_flag[parent_node][i]; //links already visited
//printf("i= %d \t endptr= %d \t current_ptr= %d \t connected_nodes[endptr] = %d \n",i, endptr,current_ptr,connected_nodes[endptr]);
}
}
}
if (i == N-1) {
current_ptr++;
}
}
}
if(endptr == N-1) {
flag_connected = 1;
}
return flag_connected;
}
/* Create weight matrices in GPU memory */
__global__ void store_weights(double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short nL_break = NL_min + threadId * NL_step;
unsigned short flag_connected;
hiprandState_t rand_state;
hiprand_init(1234, threadId, 0, &rand_state);
for(unsigned short i = 0; i < Ng_max; ++i) {
flag_connected = 0;
do {
flag_connected = synaptic_weights_connected_network(w[threadId * Ng_max + i], nL_break, &rand_state);
} while(flag_connected == 0);
}
}
/* Run a simulation on a single thread */
__global__ void simulate(simulation_result *results, global_mem *g_mem, double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
// Check if this thread a valid one
if(threadId >= num_simulations) {
return;
}
// Initialize and seed the random number generator
hiprandState_t rand_state;
hiprand_init(threadId, clock(), clock(), &rand_state);
double tmax = 20;
double dt = 0.0002;
double epsilon = 0.01;
double vth = 0.8;
double vreset = 0;
double a = 1;
double b = 1;
double tol = 0.0001;
int Nstep = tmax / dt;
unsigned short ic = threadId % Nic;
unsigned short ig = (threadId / Nic) % Ng_max;
unsigned short iL = threadId / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
results[threadId].ic = ic;
results[threadId].ig = ig;
results[threadId].iL = iL;
results[threadId].nL_break = nL_break;
int i;
unsigned short k, kk, InSync_neurons;
unsigned short spike[N], push_up_flag[N];
double f0, f1, f2, f3, tspike_diff1, tspike_diff2, t_old, t_new;
double v_old[N], v_new[N], push_up_amnt[N];
// double v_initnew[100]= {0.1545367758770665, 0.814818668976894, 0.15320113199547414, 0.8353524225981629, 0.08115890455440067, 0.6914756325608367, 0.4130575136157111, 0.5278299763853765, 0.2812216969669379, 0.8062893532936973, 0.9026514070819015, 0.6496189902535245, 0.6286630367202969, 0.6171265038631547, 0.472005565894945, 0.43981531433376, 0.8449193307307433, 0.3499655732796455, 0.6064637293486522, 0.1567131568957726, 0.6917890946540877, 0.19314656121526463, 0.9715334462829239, 0.42821872654614646, 0.5153519308836192, 0.8849979650599988, 0.6757089505722944, 0.31767924448674467, 0.2910320632769062, 0.32862537004994197, 0.45168148961810184, 0.01955708613009799, 0.5696484846788225, 0.450835587565686, 0.026054486371280938, 0.35039306479694443, 0.4040846812243857, 0.27342993028260487, 0.5638358124122043, 0.9484997135038367, 0.4077636621202826, 0.8220935863179847, 0.7196517781502417, 0.5968801478996293, 0.17909455403785213, 0.9071518551971325, 0.49350749777889813, 0.8002803025938409, 0.3071891631672753, 0.5367924012551228, 0.8628384065372916, 0.9147597382639411, 0.5859467778984498, 0.506728558827792, 0.5444346202867876, 0.7105452431393048, 0.8833280213387779, 0.7101823916271959, 0.21378218672881877, 0.2647380984685085, 0.8051689609566608, 0.636661266440235, 0.1284215317086359, 0.8991055384060852, 0.9185260634481671, 0.7505310205211034, 0.5449904790914537, 0.8418539582522988, 0.8227024116656272, 0.8206769102729885, 0.5615504438601934, 0.9070762107580452, 0.37619234543451996, 0.23085180280640882, 0.6623891864245589, 0.9806074893915904, 0.8067560379883594, 0.9895526050531294, 0.5548342062752014, 0.818488769718889, 0.48622692029833214, 0.6501553126075313, 0.3176597622855678, 0.9742850850234102, 0.6065112069910525, 0.37288262643468995, 0.074431646812396, 0.194162041772725, 0.021779459371789267, 0.2856071586947684, 0.5653325199766001, 0.10132723526598542, 0.7041397023518559, 0.6412510211401311, 0.061293406975714726, 0.2728425423344597, 0.6529094748027036, 0.6152282218769618, 0.2633952283711999, 0.44178953896737416};
// Generate initial state
for(kk = 0; kk < N; kk++) {
results[threadId].v_init[kk] = hiprand_uniform_double(&rand_state) * (vth);
v_old[kk] = results[threadId].v_init[kk];
}
// for(kk = 0; kk < N; kk++) {
// results[threadId].v_init[kk] = v_initnew[kk];
// v_old[kk] = results[threadId].v_init[kk];
// }
// initialize arrays
memset(results[threadId].spike_count, 0, N * sizeof(unsigned short));
memset(results[threadId].tspike, 0, N * MAXCOL * sizeof(double));
// Time loop begins
t_old = 0;
for(i = 1; i < Nstep; i++) {
t_new = i*dt;
// Identify (1) the neurons that spiked in previous time step, (2) time of all the spikes of each neuron
// (3) total number of spikes in each neuron so far
for(kk = 0; kk < N; kk++) {
push_up_amnt[kk] = 0; // initialize these arrays at every time step
push_up_flag[kk] = 0;
if(v_old[kk] >= vth) {
spike[kk] = 1; // if neuron spiked
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
else {
spike[kk] = 0; // if neuron did not spike
}
}
for(kk = 0; kk < N; kk++) {
for(k = 0; k < N; k++) {
if(k != kk && spike[kk] != 1 && spike[k]==1) {
push_up_amnt[kk] = push_up_amnt[kk] +
(epsilon) * w[threadId % Nic][kk][k] * spike[k];
push_up_flag[kk] = 1;
}
}
if(v_old[kk] < vth) {
if(push_up_flag[kk] == 1) {
v_new[kk] = v_old[kk] + push_up_amnt[kk];
if(v_new[kk] >= vth) {
v_new[kk] = vreset;
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
}
else if(push_up_flag[kk] == 0) {
f0 = a - b * v_old[kk];
f1 = a - b * (v_old[kk] + f0 * 0.5 * dt);
f2 = a - b * (v_old[kk] + f1 * 0.5 * dt);
f3 = a - b * (v_old[kk] + f2 * dt);
v_new[kk] = v_old[kk] + dt * (f0 + 2 * f1 + 2 * f2 + f3) / 6;
}
}
else if (v_old[kk] >= vth) {
v_new[kk] = vreset;
}
// swap v_old & v_new for next time iteration
v_old[kk] = v_new[kk];
}
// Advance time
t_old = t_new;
} // Time loop ends
// Count number of iL-networks where all neurons fire in sync
InSync_neurons = 1;
for(kk = 1; kk < N; kk++) {
tspike_diff1 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 11] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 11]);
tspike_diff2 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 10] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 10]);
if(tspike_diff1 < tol && tspike_diff2 < tol) {
InSync_neurons++; // count number of neurons firing in sync for the chosen initial condition
}
}
if(InSync_neurons == N) {
//g_mem->All_sync_count1[iL][ig]++; // count number of ic's that yield All-sync for iL-iG network.
g_mem->All_sync_count2[iL]++;
//printf("Number of instances of full sync = %d \n",All_sync_count2[iL]);
//fprintf(all_sync,"Number of instances of full sync = %d \n",All_sync_count2[0]);
}
// Write spike time on file
/*for(kk=0;kk<N;kk++) {
tmp1 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-7];
tmp2 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-8];
tmp3 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-9];
tmp4 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-10];
tmp5 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-11];
tmp6 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-12];
tmp7 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-13];
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t \%d \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,flag_unconnctd_graph);
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6,tmp7);
}*/
printf("Thread #%d finished with tmax = %lf\n", threadId, t_old);
}
int main() {
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
printf("Running %d simulations with N = %d, NL_max = %d, Ng_max = %d, Nic = %d\n\n", num_simulations, N, NL_max, Ng_max, Nic);
// Initialize the weight matrices in the GPU memory
void *d_w;
hipMalloc(&d_w, (NL_max - NL_min) * Ng_max / NL_step * N * N * sizeof(double));
hipLaunchKernelGGL(( store_weights), dim3(1), dim3((NL_max - NL_min) / NL_step), 0, 0, (double (*)[N][N])d_w);
// Initialize the global GPU memory
global_mem g_mem;
global_mem *d_g_mem;
hipMalloc(&d_g_mem, sizeof(global_mem));
for(unsigned short i = 0; i < NL_max - NL_min; ++i) {
for(unsigned short j = 0; j < Ng_max; ++j) {
g_mem.All_sync_count1[i][j] = 0;
}
g_mem.All_sync_count2[i] = 0;
}
hipMemcpy(d_g_mem, &g_mem, sizeof(g_mem), hipMemcpyHostToDevice);
// Allocate memory for storing results
simulation_result *results = (simulation_result *) malloc(sizeof(simulation_result) * num_simulations);
simulation_result *d_results;
hipMalloc(&d_results, sizeof(simulation_result) * num_simulations);
// Get optimal grid and block dimensions
int grid_size, block_size;
hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size, simulate, 0, num_simulations);
printf("Number of blocks = %d, Number of threads in a block = %d\n", grid_size, block_size);
// Start all simulations simultaneously
hipLaunchKernelGGL(( simulate), dim3(grid_size), dim3(block_size), 0, 0, d_results, d_g_mem, (double (*)[N][N])d_w);
// Retrieve the results back from GPU
hipMemcpy(results, d_results, sizeof(simulation_result) * num_simulations, hipMemcpyDeviceToHost);
hipMemcpy(&g_mem, d_g_mem, sizeof(g_mem), hipMemcpyDeviceToHost);
// Open a file to store the results
FILE *file = fopen(FILENAME, "w");
// Write the results to file
for(int i = 0; i < num_simulations; ++i) {
unsigned short ic = i % Nic;
unsigned short ig = (i / Nic) % Ng_max;
unsigned short iL = i / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
fprintf(file, "\n------------------------------------------------------------------\n");
// Simulation parameters
fprintf(file, "\n\n%d. nL_break = %d\tig = %d\tic = %d :\n\n\t", i+1, nL_break, ig, ic);
// TODO: Weight matrix
// Initial voltages
fprintf(file, "Initial voltages:\n\t");
for(unsigned short j = 0; j < N; ++j) {
fprintf(file, "%f ", results[i].v_init[j]);
}
// All_sync_count2
fprintf(file, "\n\n\tAll_sync_count2[%d]: %d\n\n\t", iL, g_mem.All_sync_count2[iL]);
// Spike times
fprintf(file, "Spike times:\n\t");
for(unsigned short j = 0; j < N; ++j) {
for(unsigned short k = 1; k <= results[i].spike_count[j]; ++k) {
fprintf(file, "%f ", results[i].tspike[j * MAXCOL + k]);
}
fprintf(file, "\n\t");
}
}
// Clean-up
fclose(file);
free(results);
hipFree(d_w);
hipFree(d_g_mem);
hipFree(d_results);
return 0;
}
|
350e0833816c857ed2ad104e8c349de17570a118.cu
|
#include <time.h>
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#define N 100
#define MAXCOL 100
#define Nic 5
#define NL_min 0
#define NL_max 70
#define NL_step 1
#define Ng_max 10
#define FILENAME "results.txt"
typedef struct {
int All_sync_count1[NL_max-NL_min][Ng_max];
int All_sync_count2[NL_max-NL_min];
} global_mem;
typedef struct {
unsigned short ic, iL, nL_break, ig;
unsigned short spike_count[N];
double v_init[N];
double tspike[N * MAXCOL];
} simulation_result;
/************************ DEBUG FUNCTIONS ************************/
/* Check the weights on GPU memory *
__global__ void check_weights(double w[(NL_max - NL_min) * Ng_max / NL_step][N][N]) {
printf("hi_check_weights\n");
int n = (NL_max - NL_min) / NL_step * Ng_max;
for(int i = 0; i < n; ++i) {
printf("\nnL = %d\tng = %d\n\t", NL_min + NL_step * i / Ng_max, i % Ng_max);
for(int j = 0; j < N; ++j) {
for(int k = 0; k < N; ++k) {
printf("%.2lf ", w[i][j][k]);
}
printf("\n\t");
}
}
printf("\n");
}
/* Check the global data on GPU memory *
__global__ void check_g_mem(global_mem *g_mem) {
double sum1 = 0.0, sum2 = 0.0;
for(int i = 0; i < NL_max - NL_min; ++i) {
for(int j = 0; j < Ng_max; ++j) {
sum1 += g_mem->All_sync_count1[i][j];
}
sum2 += g_mem->All_sync_count2[i];
}
printf("sum1 = %f\nsum2 = %f\n", sum1, sum2);
}
/*******************************************************************/
/* Generate a adjacency matrix for a connected graph with nL edges missing */
__device__ unsigned short synaptic_weights_connected_network(double w[][N], unsigned short nL, curandState *rand_state) {
unsigned short i,j,k,kk,neuron1,neuron2;
double w_flag[N][N];
unsigned short syn_to_remove, tot_syn_removed;
short connected_nodes[N];
unsigned short current_ptr, endptr, parent_node;
unsigned short flag_connected = 0;
unsigned short flag_already_connected;
// GENERATE AN ALL-TO-ALL NETWORK ************************************************************************
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
if(j != i){
w[i][j] = 1;
}
else if(j == i){
w[i][j] = 0;
}
}
}
// REMOVE SYNAPSES FROM ABOVE ALL-TO-ALL NETWORK *********************************************************
syn_to_remove = nL;
tot_syn_removed = 0;
// Initialize array w_flag
for(k = 0; k < N; k++) {
for(kk = 0; kk < N; kk++) {
w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
}
}
// Generate a new network by removing synapses randomly
while(tot_syn_removed < syn_to_remove) {
neuron1 = curand(rand_state) % N;
neuron2 = curand(rand_state) % N;
if(neuron1 != neuron2) {
if(w_flag[neuron1][neuron2] == 0) { // synapse between these two neurons has not been changed.
w_flag[neuron1][neuron2] = 1;
w_flag[neuron2][neuron1] = 1;
w[neuron1][neuron2] = 0;
w[neuron2][neuron1] = w[neuron1][neuron2];
tot_syn_removed++;
}
}
}
// Is the network generated above connected ? /////////////
//w[0][0] = 0; w[0][1] = 1; w[0][2] = 1; w[0][3] = 0; w[0][4] = 1; w[0][5] = 0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] = 0; w[1][4] = 0; w[1][5] = 1;
//w[2][0] = w[0][2]; w[2][1] = w[1][2]; w[2][2] = 0; w[2][3] = 0; w[2][4] = 1; w[2][5] = 0;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3] = 0; w[3][4] = 0; w[3][5] = 0;
//w[4][0] = w[0][4]; w[4][1] = w[1][4]; w[4][2] = w[2][4]; w[4][3] = w[3][4]; w[4][4] = 0; w[4][5] = 1;
//w[5][0] = w[0][5]; w[5][1] = w[1][5]; w[5][2] = w[2][5]; w[5][3] = w[3][5]; w[5][4] = w[4][5]; w[5][5] = 0;
//w[0][0] = 0; w[0][1] = 0; w[0][2] = 1; w[0][3]=0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] =0;
//w[2][0]=w[0][2]; w[2][1]=w[1][2]; w[2][2] =0; w[2][3] = 1;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3]=0;
// for(k = 0; k < N; k++) {
// for(kk = 0; kk < N; kk++) {
// w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
// }
// }
connected_nodes[0] = 0;
for(i=1;i<N;i++) {
connected_nodes[i] = -1;
}
current_ptr = 0;
endptr = 0; // points towards the last non-zero element in the connected_nodes array
while(current_ptr <= endptr) {
for(i = 0; i < N; i++) {
parent_node = connected_nodes[current_ptr];
flag_already_connected = 0;
for(j = 0; j <= endptr; j++) {
if(connected_nodes[j] == i) {
flag_already_connected = 1;
}
}
if(w[parent_node][i] == 1) {
if(w_flag[parent_node][i] == 0) {
if(flag_already_connected ==0) {
endptr ++;
connected_nodes[endptr] = i; // stores node numbers connected to parent_node
w_flag[parent_node][i] = 1;
w_flag[i][parent_node] = w_flag[parent_node][i]; //links already visited
//printf("i= %d \t endptr= %d \t current_ptr= %d \t connected_nodes[endptr] = %d \n",i, endptr,current_ptr,connected_nodes[endptr]);
}
}
}
if (i == N-1) {
current_ptr++;
}
}
}
if(endptr == N-1) {
flag_connected = 1;
}
return flag_connected;
}
/* Create weight matrices in GPU memory */
__global__ void store_weights(double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short nL_break = NL_min + threadId * NL_step;
unsigned short flag_connected;
curandState rand_state;
curand_init(1234, threadId, 0, &rand_state);
for(unsigned short i = 0; i < Ng_max; ++i) {
flag_connected = 0;
do {
flag_connected = synaptic_weights_connected_network(w[threadId * Ng_max + i], nL_break, &rand_state);
} while(flag_connected == 0);
}
}
/* Run a simulation on a single thread */
__global__ void simulate(simulation_result *results, global_mem *g_mem, double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
// Check if this thread a valid one
if(threadId >= num_simulations) {
return;
}
// Initialize and seed the random number generator
curandState rand_state;
curand_init(threadId, clock(), clock(), &rand_state);
double tmax = 20;
double dt = 0.0002;
double epsilon = 0.01;
double vth = 0.8;
double vreset = 0;
double a = 1;
double b = 1;
double tol = 0.0001;
int Nstep = tmax / dt;
unsigned short ic = threadId % Nic;
unsigned short ig = (threadId / Nic) % Ng_max;
unsigned short iL = threadId / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
results[threadId].ic = ic;
results[threadId].ig = ig;
results[threadId].iL = iL;
results[threadId].nL_break = nL_break;
int i;
unsigned short k, kk, InSync_neurons;
unsigned short spike[N], push_up_flag[N];
double f0, f1, f2, f3, tspike_diff1, tspike_diff2, t_old, t_new;
double v_old[N], v_new[N], push_up_amnt[N];
// double v_initnew[100]= {0.1545367758770665, 0.814818668976894, 0.15320113199547414, 0.8353524225981629, 0.08115890455440067, 0.6914756325608367, 0.4130575136157111, 0.5278299763853765, 0.2812216969669379, 0.8062893532936973, 0.9026514070819015, 0.6496189902535245, 0.6286630367202969, 0.6171265038631547, 0.472005565894945, 0.43981531433376, 0.8449193307307433, 0.3499655732796455, 0.6064637293486522, 0.1567131568957726, 0.6917890946540877, 0.19314656121526463, 0.9715334462829239, 0.42821872654614646, 0.5153519308836192, 0.8849979650599988, 0.6757089505722944, 0.31767924448674467, 0.2910320632769062, 0.32862537004994197, 0.45168148961810184, 0.01955708613009799, 0.5696484846788225, 0.450835587565686, 0.026054486371280938, 0.35039306479694443, 0.4040846812243857, 0.27342993028260487, 0.5638358124122043, 0.9484997135038367, 0.4077636621202826, 0.8220935863179847, 0.7196517781502417, 0.5968801478996293, 0.17909455403785213, 0.9071518551971325, 0.49350749777889813, 0.8002803025938409, 0.3071891631672753, 0.5367924012551228, 0.8628384065372916, 0.9147597382639411, 0.5859467778984498, 0.506728558827792, 0.5444346202867876, 0.7105452431393048, 0.8833280213387779, 0.7101823916271959, 0.21378218672881877, 0.2647380984685085, 0.8051689609566608, 0.636661266440235, 0.1284215317086359, 0.8991055384060852, 0.9185260634481671, 0.7505310205211034, 0.5449904790914537, 0.8418539582522988, 0.8227024116656272, 0.8206769102729885, 0.5615504438601934, 0.9070762107580452, 0.37619234543451996, 0.23085180280640882, 0.6623891864245589, 0.9806074893915904, 0.8067560379883594, 0.9895526050531294, 0.5548342062752014, 0.818488769718889, 0.48622692029833214, 0.6501553126075313, 0.3176597622855678, 0.9742850850234102, 0.6065112069910525, 0.37288262643468995, 0.074431646812396, 0.194162041772725, 0.021779459371789267, 0.2856071586947684, 0.5653325199766001, 0.10132723526598542, 0.7041397023518559, 0.6412510211401311, 0.061293406975714726, 0.2728425423344597, 0.6529094748027036, 0.6152282218769618, 0.2633952283711999, 0.44178953896737416};
// Generate initial state
for(kk = 0; kk < N; kk++) {
results[threadId].v_init[kk] = curand_uniform_double(&rand_state) * (vth);
v_old[kk] = results[threadId].v_init[kk];
}
// for(kk = 0; kk < N; kk++) {
// results[threadId].v_init[kk] = v_initnew[kk];
// v_old[kk] = results[threadId].v_init[kk];
// }
// initialize arrays
memset(results[threadId].spike_count, 0, N * sizeof(unsigned short));
memset(results[threadId].tspike, 0, N * MAXCOL * sizeof(double));
// Time loop begins
t_old = 0;
for(i = 1; i < Nstep; i++) {
t_new = i*dt;
// Identify (1) the neurons that spiked in previous time step, (2) time of all the spikes of each neuron
// (3) total number of spikes in each neuron so far
for(kk = 0; kk < N; kk++) {
push_up_amnt[kk] = 0; // initialize these arrays at every time step
push_up_flag[kk] = 0;
if(v_old[kk] >= vth) {
spike[kk] = 1; // if neuron spiked
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
else {
spike[kk] = 0; // if neuron did not spike
}
}
for(kk = 0; kk < N; kk++) {
for(k = 0; k < N; k++) {
if(k != kk && spike[kk] != 1 && spike[k]==1) {
push_up_amnt[kk] = push_up_amnt[kk] +
(epsilon) * w[threadId % Nic][kk][k] * spike[k];
push_up_flag[kk] = 1;
}
}
if(v_old[kk] < vth) {
if(push_up_flag[kk] == 1) {
v_new[kk] = v_old[kk] + push_up_amnt[kk];
if(v_new[kk] >= vth) {
v_new[kk] = vreset;
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
}
else if(push_up_flag[kk] == 0) {
f0 = a - b * v_old[kk];
f1 = a - b * (v_old[kk] + f0 * 0.5 * dt);
f2 = a - b * (v_old[kk] + f1 * 0.5 * dt);
f3 = a - b * (v_old[kk] + f2 * dt);
v_new[kk] = v_old[kk] + dt * (f0 + 2 * f1 + 2 * f2 + f3) / 6;
}
}
else if (v_old[kk] >= vth) {
v_new[kk] = vreset;
}
// swap v_old & v_new for next time iteration
v_old[kk] = v_new[kk];
}
// Advance time
t_old = t_new;
} // Time loop ends
// Count number of iL-networks where all neurons fire in sync
InSync_neurons = 1;
for(kk = 1; kk < N; kk++) {
tspike_diff1 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 11] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 11]);
tspike_diff2 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 10] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 10]);
if(tspike_diff1 < tol && tspike_diff2 < tol) {
InSync_neurons++; // count number of neurons firing in sync for the chosen initial condition
}
}
if(InSync_neurons == N) {
//g_mem->All_sync_count1[iL][ig]++; // count number of ic's that yield All-sync for iL-iG network.
g_mem->All_sync_count2[iL]++;
//printf("Number of instances of full sync = %d \n",All_sync_count2[iL]);
//fprintf(all_sync,"Number of instances of full sync = %d \n",All_sync_count2[0]);
}
// Write spike time on file
/*for(kk=0;kk<N;kk++) {
tmp1 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-7];
tmp2 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-8];
tmp3 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-9];
tmp4 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-10];
tmp5 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-11];
tmp6 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-12];
tmp7 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-13];
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t \%d \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,flag_unconnctd_graph);
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6,tmp7);
}*/
printf("Thread #%d finished with tmax = %lf\n", threadId, t_old);
}
int main() {
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
printf("Running %d simulations with N = %d, NL_max = %d, Ng_max = %d, Nic = %d\n\n", num_simulations, N, NL_max, Ng_max, Nic);
// Initialize the weight matrices in the GPU memory
void *d_w;
cudaMalloc(&d_w, (NL_max - NL_min) * Ng_max / NL_step * N * N * sizeof(double));
store_weights<<<1, (NL_max - NL_min) / NL_step>>>((double (*)[N][N])d_w);
// Initialize the global GPU memory
global_mem g_mem;
global_mem *d_g_mem;
cudaMalloc(&d_g_mem, sizeof(global_mem));
for(unsigned short i = 0; i < NL_max - NL_min; ++i) {
for(unsigned short j = 0; j < Ng_max; ++j) {
g_mem.All_sync_count1[i][j] = 0;
}
g_mem.All_sync_count2[i] = 0;
}
cudaMemcpy(d_g_mem, &g_mem, sizeof(g_mem), cudaMemcpyHostToDevice);
// Allocate memory for storing results
simulation_result *results = (simulation_result *) malloc(sizeof(simulation_result) * num_simulations);
simulation_result *d_results;
cudaMalloc(&d_results, sizeof(simulation_result) * num_simulations);
// Get optimal grid and block dimensions
int grid_size, block_size;
cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, simulate, 0, num_simulations);
printf("Number of blocks = %d, Number of threads in a block = %d\n", grid_size, block_size);
// Start all simulations simultaneously
simulate<<<grid_size, block_size>>>(d_results, d_g_mem, (double (*)[N][N])d_w);
// Retrieve the results back from GPU
cudaMemcpy(results, d_results, sizeof(simulation_result) * num_simulations, cudaMemcpyDeviceToHost);
cudaMemcpy(&g_mem, d_g_mem, sizeof(g_mem), cudaMemcpyDeviceToHost);
// Open a file to store the results
FILE *file = fopen(FILENAME, "w");
// Write the results to file
for(int i = 0; i < num_simulations; ++i) {
unsigned short ic = i % Nic;
unsigned short ig = (i / Nic) % Ng_max;
unsigned short iL = i / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
fprintf(file, "\n------------------------------------------------------------------\n");
// Simulation parameters
fprintf(file, "\n\n%d. nL_break = %d\tig = %d\tic = %d :\n\n\t", i+1, nL_break, ig, ic);
// TODO: Weight matrix
// Initial voltages
fprintf(file, "Initial voltages:\n\t");
for(unsigned short j = 0; j < N; ++j) {
fprintf(file, "%f ", results[i].v_init[j]);
}
// All_sync_count2
fprintf(file, "\n\n\tAll_sync_count2[%d]: %d\n\n\t", iL, g_mem.All_sync_count2[iL]);
// Spike times
fprintf(file, "Spike times:\n\t");
for(unsigned short j = 0; j < N; ++j) {
for(unsigned short k = 1; k <= results[i].spike_count[j]; ++k) {
fprintf(file, "%f ", results[i].tspike[j * MAXCOL + k]);
}
fprintf(file, "\n\t");
}
}
// Clean-up
fclose(file);
free(results);
cudaFree(d_w);
cudaFree(d_g_mem);
cudaFree(d_results);
return 0;
}
|
a30287cd906aa0da663b28facfcb04dac56d4634.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <iostream>
#include <helper_cuda.h>
#include <kernel.cu>
#include <GPULearn.hh>
using namespace std;
GPULearn::GPULearn(float* a_in, int len_a,
float* b_in, int len_b)
{
// check dim
assert(len_a == len_b);
a_h = a_in;
b_h = b_in;
std::cout<<"array a\n";
for(int i=0; i< len_a; i++)
std::cout << a_h[i] << " ";
std::cout<< "\n";
std::cout<<"array b\n";
for(int i=0; i< len_b; i++)
std::cout << b_h[i] << " ";
std::cout<< "\n";
length = len_a;
size_t bytes = length * sizeof(float);
// allocate device memory
checkCudaErrors(hipMalloc(&a_d, bytes));
checkCudaErrors(hipMalloc(&b_d, bytes));
checkCudaErrors(hipMalloc(&c_d, bytes));
//hipError_t err = hipMalloc((void**) &array_device, size);
//assert(err == 0);
checkCudaErrors(hipMemcpy(a_d, a_h, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(b_d, b_h, bytes, hipMemcpyHostToDevice));
//err = hipMemcpy(array_device, array_host, size, hipMemcpyHostToDevice);
//assert(err == 0);
}
void GPULearn::vectorAdd() {
dim3 blocks = dim3(256, 1, 1);
dim3 grids = dim3(BLK(length, 256), 1, 1);
std::cout << "length : " << length << "\n";
hipLaunchKernelGGL(( kernel_vectorAdd), dim3(grids), dim3(blocks) , 0, 0, a_d, b_d, c_d, length);
}
void GPULearn::getData() {
size_t bytes = length * sizeof(float);
checkCudaErrors(hipMemcpy(c_h, c_d, bytes, hipMemcpyDeviceToHost));
}
void GPULearn::getData_extern(float* c_out, int dim_c) {
assert(length == dim_c);
size_t bytes = length * sizeof(float);
checkCudaErrors(hipMemcpy(c_out, c_d, bytes, hipMemcpyDeviceToHost));
}
GPULearn::~GPULearn() {
checkCudaErrors(hipFree(a_d));
checkCudaErrors(hipFree(b_d));
checkCudaErrors(hipFree(c_d));
}
|
a30287cd906aa0da663b28facfcb04dac56d4634.cu
|
#include <assert.h>
#include <iostream>
#include <helper_cuda.h>
#include <kernel.cu>
#include <GPULearn.hh>
using namespace std;
GPULearn::GPULearn(float* a_in, int len_a,
float* b_in, int len_b)
{
// check dim
assert(len_a == len_b);
a_h = a_in;
b_h = b_in;
std::cout<<"array a\n";
for(int i=0; i< len_a; i++)
std::cout << a_h[i] << " ";
std::cout<< "\n";
std::cout<<"array b\n";
for(int i=0; i< len_b; i++)
std::cout << b_h[i] << " ";
std::cout<< "\n";
length = len_a;
size_t bytes = length * sizeof(float);
// allocate device memory
checkCudaErrors(cudaMalloc(&a_d, bytes));
checkCudaErrors(cudaMalloc(&b_d, bytes));
checkCudaErrors(cudaMalloc(&c_d, bytes));
//cudaError_t err = cudaMalloc((void**) &array_device, size);
//assert(err == 0);
checkCudaErrors(cudaMemcpy(a_d, a_h, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(b_d, b_h, bytes, cudaMemcpyHostToDevice));
//err = cudaMemcpy(array_device, array_host, size, cudaMemcpyHostToDevice);
//assert(err == 0);
}
void GPULearn::vectorAdd() {
dim3 blocks = dim3(256, 1, 1);
dim3 grids = dim3(BLK(length, 256), 1, 1);
std::cout << "length : " << length << "\n";
kernel_vectorAdd<<< grids, blocks >>>(a_d, b_d, c_d, length);
}
void GPULearn::getData() {
size_t bytes = length * sizeof(float);
checkCudaErrors(cudaMemcpy(c_h, c_d, bytes, cudaMemcpyDeviceToHost));
}
void GPULearn::getData_extern(float* c_out, int dim_c) {
assert(length == dim_c);
size_t bytes = length * sizeof(float);
checkCudaErrors(cudaMemcpy(c_out, c_d, bytes, cudaMemcpyDeviceToHost));
}
GPULearn::~GPULearn() {
checkCudaErrors(cudaFree(a_d));
checkCudaErrors(cudaFree(b_d));
checkCudaErrors(cudaFree(c_d));
}
|
9592867d262799da9f0fb3ea5e33310707e140ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 16M
const unsigned int BLOCK_SIZE = 1024; // block size
const unsigned int GRID_SIZE = 8; // grid size
/*
*
* Many Block: every thread(totally thread number is BLOCK_SIZE*GRID_SIZE) exec DATE_SIZE/BLOCK_SIZE*GRID_SIZE computation task
* And Use Shared Memory
* if BLOCK_SIZE*GRID_SIZE == DATE_SIZE, every thread exec 1 time)
*
* friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
* thread k compute column k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
*
* BlockId : bid0 |... bidGRID_SIZE-1
* --------------------------------------------------------------------------------------|----------------
* ThreadId: tid0 tid1 tidBLOCK_SIZE-1|... tidBLOCK_SIZE*GRID_SIZE-1
* --------------------------------------------------------------------------------------|----------------
* DataId : dat0 dat1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1
*
* ...
* Shared : shared[0] shared[1] shared[tidBLOCK_SIZE-1]|... shared[0] ... shared[tidBLOCK_SIZE-1]
* --------------------------------------------------------------------------------------|----------------
*SharedAdd: shared[0] |... shared[0]
* --------------------------------------------------------------------------------------|----------------
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gtid = threadIdx.x +blockDim.x * blockIdx.x;
int i = 0;
__shared__ int shared[BLOCK_SIZE];
shared[tid] = 0;
// each different block has its own different shared memory result
// in each different block, shared[tid] save each thread[tid] added result (the result of the specified thread in a block exec many times computation task) in specified block[bid]
for(i = gtid; i < DATE_SIZE; i = i + BLOCK_SIZE * GRID_SIZE)
{
shared[tid] += pInputData[i] * pInputData[i];
}
// sync threads in a block
__syncthreads();
// add each thread's shared memory value(the result of the specified thread in the specified block[bid] exec many times computation task) in the specified block[bid] computation result and save result to shared[0] And pResult[bid]
// ONLY tid=0 participates in block shared memory addition computation task and is belongs to serial computation
/*
* if(tid == 0)
* {
* for(i = 1; i < BLOCK_SIZE; i++)
* {
* shared[0] += shared[i];
* }
* pResult[bid] = shared[0]; // every block saves (all threads in the block exec many times) the finial computatiom result to pResult[bid]
* }
*
*/
// Parallize block shared memory addition computation, (TREE ADD)
int offset = BLOCK_SIZE/2;
while(offset > 0)
{
if(tid < offset)
{
shared[tid] += shared[tid + offset];
}
offset >>=1;
// sync threads in a block
__syncthreads();
}
pResult[bid] = shared[0];
// Parallize block shared memory addition computation, (SPREAD TREE ADD)
/*
* int offset = BLOCK_SIZE/2;
* pResult[bid] = shared[0];
* if(tid < 512) { shared[tid] += shared[tid+512];}
* __syncthreads();
* if(tid < 256) { shared[tid] += shared[tid+256];}
* __syncthreads();
* if(tid < 128) { shared[tid] += shared[tid+128];}
* __syncthreads();
* if(tid < 64) { shared[tid] += shared[tid+64];}
* __syncthreads();
* if(tid < 32) { shared[tid] += shared[tid+32];}
* __syncthreads();
* if(tid < 16) { shared[tid] += shared[tid+16];}
* __syncthreads();
* if(tid < 8) { shared[tid] += shared[tid+8];}
* __syncthreads();
* if(tid < 4) { shared[tid] += shared[tid+4];}
* __syncthreads();
* if(tid < 2) { shared[tid] += shared[tid+2];}
* __syncthreads();
* if(tid < 1) { shared[tid] += shared[tid+1];}
* __syncthreads();
*
* pResult[bid] = shared[0];
*/
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
hipGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
hipSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int *pHostBlockData = (int*)malloc(sizeof(int) * GRID_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceResult, sizeof(int) * GRID_SIZE));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyHostToDevice);
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, hipMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
hipLaunchKernelGGL(( SquareSum), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, pDeviceData, pDeviceResult);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
{
printf("%s\n", hipGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(hipMemcpy(pHostBlockData, pDeviceResult, sizeof(int) * GRID_SIZE, hipMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(hipFree(pDeviceData));
HANDLE_CUDA_ERROR(hipFree(pDeviceResult));
// Add every thread result in CPU
TIME_TRACE_CPU_START(AddEveryThreadData);
for (int i = 0 ; i < GRID_SIZE; i++)
{
hostResult += pHostBlockData[i];
}
TIME_TRACE_CPU_STOP(AddEveryThreadData);
// Print result
printf("Square Sum Computed Via Result GPU & CPU is %d.\n", hostResult);
// hipDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(hipDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostBlockData); pHostBlockData = NULL;
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
|
9592867d262799da9f0fb3ea5e33310707e140ae.cu
|
#include "stdio.h"
#include <cuda_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 16M
const unsigned int BLOCK_SIZE = 1024; // block size
const unsigned int GRID_SIZE = 8; // grid size
/*
*
* Many Block: every thread(totally thread number is BLOCK_SIZE*GRID_SIZE) exec DATE_SIZE/BLOCK_SIZE*GRID_SIZE computation task
* And Use Shared Memory
* if BLOCK_SIZE*GRID_SIZE == DATE_SIZE, every thread exec 1 time)
*
* friendly for global memory access(data space locality and benefit for cache line), adjacent thread access adjacent data addr space
* thread k compute column k data:(k = 0 ~ BLOCK_SIZE*GRID_SIZE-1)
*
* BlockId : bid0 |... bidGRID_SIZE-1
* --------------------------------------------------------------------------------------|----------------
* ThreadId: tid0 tid1 tidBLOCK_SIZE-1|... tidBLOCK_SIZE*GRID_SIZE-1
* --------------------------------------------------------------------------------------|----------------
* DataId : dat0 dat1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE+0 datBLOCK_SIZE*GRID_SIZE+1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE+BLOCK_SIZE*GRID_SIZE-1
* DataId : datBLOCK_SIZE*GRID_SIZE*2+0 datBLOCK_SIZE*GRID_SIZE*2+1 datBLOCK_SIZE-1|... datBLOCK_SIZE*GRID_SIZE*2+BLOCK_SIZE*GRID_SIZE-1
*
* ...
* Shared : shared[0] shared[1] shared[tidBLOCK_SIZE-1]|... shared[0] ... shared[tidBLOCK_SIZE-1]
* --------------------------------------------------------------------------------------|----------------
*SharedAdd: shared[0] |... shared[0]
* --------------------------------------------------------------------------------------|----------------
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gtid = threadIdx.x +blockDim.x * blockIdx.x;
int i = 0;
__shared__ int shared[BLOCK_SIZE];
shared[tid] = 0;
// each different block has its own different shared memory result
// in each different block, shared[tid] save each thread[tid] added result (the result of the specified thread in a block exec many times computation task) in specified block[bid]
for(i = gtid; i < DATE_SIZE; i = i + BLOCK_SIZE * GRID_SIZE)
{
shared[tid] += pInputData[i] * pInputData[i];
}
// sync threads in a block
__syncthreads();
// add each thread's shared memory value(the result of the specified thread in the specified block[bid] exec many times computation task) in the specified block[bid] computation result and save result to shared[0] And pResult[bid]
//① ONLY tid=0 participates in block shared memory addition computation task and is belongs to serial computation
/*
* if(tid == 0)
* {
* for(i = 1; i < BLOCK_SIZE; i++)
* {
* shared[0] += shared[i];
* }
* pResult[bid] = shared[0]; // every block saves (all threads in the block exec many times) the finial computatiom result to pResult[bid]
* }
*
*/
//② Parallize block shared memory addition computation, (TREE ADD)
int offset = BLOCK_SIZE/2;
while(offset > 0)
{
if(tid < offset)
{
shared[tid] += shared[tid + offset];
}
offset >>=1;
// sync threads in a block
__syncthreads();
}
pResult[bid] = shared[0];
//③ Parallize block shared memory addition computation, (SPREAD TREE ADD)
/*
* int offset = BLOCK_SIZE/2;
* pResult[bid] = shared[0];
* if(tid < 512) { shared[tid] += shared[tid+512];}
* __syncthreads();
* if(tid < 256) { shared[tid] += shared[tid+256];}
* __syncthreads();
* if(tid < 128) { shared[tid] += shared[tid+128];}
* __syncthreads();
* if(tid < 64) { shared[tid] += shared[tid+64];}
* __syncthreads();
* if(tid < 32) { shared[tid] += shared[tid+32];}
* __syncthreads();
* if(tid < 16) { shared[tid] += shared[tid+16];}
* __syncthreads();
* if(tid < 8) { shared[tid] += shared[tid+8];}
* __syncthreads();
* if(tid < 4) { shared[tid] += shared[tid+4];}
* __syncthreads();
* if(tid < 2) { shared[tid] += shared[tid+2];}
* __syncthreads();
* if(tid < 1) { shared[tid] += shared[tid+1];}
* __syncthreads();
*
* pResult[bid] = shared[0];
*/
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
cudaGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
cudaSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int *pHostBlockData = (int*)malloc(sizeof(int) * GRID_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceResult, sizeof(int) * GRID_SIZE));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyHostToDevice);
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, cudaMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
SquareSum<<<GRID_SIZE, BLOCK_SIZE>>>(pDeviceData, pDeviceResult);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(cudaMemcpy(pHostBlockData, pDeviceResult, sizeof(int) * GRID_SIZE, cudaMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(cudaFree(pDeviceData));
HANDLE_CUDA_ERROR(cudaFree(pDeviceResult));
// Add every thread result in CPU
TIME_TRACE_CPU_START(AddEveryThreadData);
for (int i = 0 ; i < GRID_SIZE; i++)
{
hostResult += pHostBlockData[i];
}
TIME_TRACE_CPU_STOP(AddEveryThreadData);
// Print result
printf("Square Sum Computed Via Result GPU & CPU is %d.\n", hostResult);
// cudaDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(cudaDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostBlockData); pHostBlockData = NULL;
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
|
OneDConstraintGPU.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "OneDConstraintGPU.cuh"
#include "EvaluatorConstraint.h"
#include <assert.h>
/*! \file OneDConstraintGPU.cu
\brief Defines GPU kernel code for calculating one dimensional constraint forces. Used by OneDConstraintGPU.
*/
//! Kernel for caculating one dimensional constraint forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param d_group_members List of members in the group
\param group_size number of members in the group
\param N number of particles in system
\param d_pos particle positions on device
\param d_vel particle velocities and masses on device
\param d_net_force Total unconstrained net force on the particles
\param deltaT step size from the Integrator
*/
extern "C" __global__
void gpu_compute_one_d_constraint_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar4 *d_net_force,
Scalar deltaT,
Scalar3 m_vec)
{
// start by identifying which particle we are to handle
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_group_members[group_idx];
// read in position, velocity, net force, and mass
Scalar4 pos = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
Scalar m = vel.w;
// convert to Scalar3's for passing to the evaluators
Scalar3 X = make_scalar3(pos.x, pos.y, pos.z);
Scalar3 V = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 F = make_scalar3(net_force.x, net_force.y, net_force.z);
// evaluate the constraint position
EvaluatorConstraint constraint(X, V, F, m, deltaT);
// evaluate the constraint force
Scalar3 FC;
Scalar virial[6];
Scalar3 U = constraint.evalU();
Scalar3 D = make_scalar3((U.x - X.x), (U.y - X.y), (U.z - X.z));
Scalar n = (D.x*m_vec.x + D.y*m_vec.y + D.z*m_vec.z)/(m_vec.x*m_vec.x + m_vec.y*m_vec.y + m_vec.z*m_vec.z);
Scalar3 C = make_scalar3((n*m_vec.x + X.x), (n*m_vec.y + X.y), (n*m_vec.z + X.z));
constraint.evalConstraintForce(FC, virial, C);
// now that the force calculation is complete, write out the results
d_force[idx] = make_scalar4(FC.x, FC.y, FC.z, Scalar(0.0));
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param d_group_members List of members in the group
\param group_size number of members in the group
\param N nunmber of particles
\param d_pos particle positions on the device
\param d_vel particle velocities on the device
\param d_net_force Total unconstrained net force on the particles
\param deltaT step size from the Integrator
\param block_size Block size to execute on the GPU
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
*/
hipError_t gpu_compute_one_d_constraint_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar4 *d_net_force,
Scalar deltaT,
unsigned int block_size,
Scalar3 m_vec)
{
assert(d_group_members);
assert(d_net_force);
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipMemset(d_force, 0, sizeof(Scalar4)*N);
hipMemset(d_virial, 0, 6*sizeof(Scalar)*virial_pitch);
hipLaunchKernelGGL(( gpu_compute_one_d_constraint_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force,
d_virial,
virial_pitch,
d_group_members,
group_size,
N,
d_pos,
d_vel,
d_net_force,
deltaT,
m_vec);
return hipSuccess;
}
|
OneDConstraintGPU.cu
|
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "OneDConstraintGPU.cuh"
#include "EvaluatorConstraint.h"
#include <assert.h>
/*! \file OneDConstraintGPU.cu
\brief Defines GPU kernel code for calculating one dimensional constraint forces. Used by OneDConstraintGPU.
*/
//! Kernel for caculating one dimensional constraint forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param d_group_members List of members in the group
\param group_size number of members in the group
\param N number of particles in system
\param d_pos particle positions on device
\param d_vel particle velocities and masses on device
\param d_net_force Total unconstrained net force on the particles
\param deltaT step size from the Integrator
*/
extern "C" __global__
void gpu_compute_one_d_constraint_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar4 *d_net_force,
Scalar deltaT,
Scalar3 m_vec)
{
// start by identifying which particle we are to handle
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_group_members[group_idx];
// read in position, velocity, net force, and mass
Scalar4 pos = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
Scalar m = vel.w;
// convert to Scalar3's for passing to the evaluators
Scalar3 X = make_scalar3(pos.x, pos.y, pos.z);
Scalar3 V = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 F = make_scalar3(net_force.x, net_force.y, net_force.z);
// evaluate the constraint position
EvaluatorConstraint constraint(X, V, F, m, deltaT);
// evaluate the constraint force
Scalar3 FC;
Scalar virial[6];
Scalar3 U = constraint.evalU();
Scalar3 D = make_scalar3((U.x - X.x), (U.y - X.y), (U.z - X.z));
Scalar n = (D.x*m_vec.x + D.y*m_vec.y + D.z*m_vec.z)/(m_vec.x*m_vec.x + m_vec.y*m_vec.y + m_vec.z*m_vec.z);
Scalar3 C = make_scalar3((n*m_vec.x + X.x), (n*m_vec.y + X.y), (n*m_vec.z + X.z));
constraint.evalConstraintForce(FC, virial, C);
// now that the force calculation is complete, write out the results
d_force[idx] = make_scalar4(FC.x, FC.y, FC.z, Scalar(0.0));
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param d_group_members List of members in the group
\param group_size number of members in the group
\param N nunmber of particles
\param d_pos particle positions on the device
\param d_vel particle velocities on the device
\param d_net_force Total unconstrained net force on the particles
\param deltaT step size from the Integrator
\param block_size Block size to execute on the GPU
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
*/
cudaError_t gpu_compute_one_d_constraint_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int *d_group_members,
unsigned int group_size,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar4 *d_net_force,
Scalar deltaT,
unsigned int block_size,
Scalar3 m_vec)
{
assert(d_group_members);
assert(d_net_force);
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
cudaMemset(d_force, 0, sizeof(Scalar4)*N);
cudaMemset(d_virial, 0, 6*sizeof(Scalar)*virial_pitch);
gpu_compute_one_d_constraint_forces_kernel<<< grid, threads>>>(d_force,
d_virial,
virial_pitch,
d_group_members,
group_size,
N,
d_pos,
d_vel,
d_net_force,
deltaT,
m_vec);
return cudaSuccess;
}
|
a1f903a8b9c3638a1ba34086cad53d024c063e2e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
typedef struct {
float *hA, *hB, *hC;
float *dA, *dB, *dC;
int element_count;
size_t vector_bytes;
int v_threadsPerBlock;
int v_blocksPerGrid;
hipStream_t stream;
} ThreadContext;
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
|
a1f903a8b9c3638a1ba34086cad53d024c063e2e.cu
|
#include "includes.h"
extern "C" {
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
typedef struct {
float *hA, *hB, *hC;
float *dA, *dB, *dC;
int element_count;
size_t vector_bytes;
int v_threadsPerBlock;
int v_blocksPerGrid;
cudaStream_t stream;
} ThreadContext;
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
|
a71bbdaf8cf6fcaed2b061820cac46f5e61ea7d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include "error.cuh"
void addArrays(float *a, float *b, float *c, int N);
int main(){
const int N = 100000;
const int M = sizeof(float) * N;
float *h_a, *h_b, *h_c;
float value_a = 1.11, value_b = 2.22;
int i;
h_a = (float*)malloc(M);
h_b = (float*)malloc(M);
h_c = (float*)malloc(M);
for(i = 0; i < N; i++)
h_a[i] = value_a;
for(i = 0; i < N; i++)
h_b[i] = value_b;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
hipEventQuery(start);
addArrays(h_a, h_b, h_c, N);
hipEventRecord(end);
hipEventSynchronize(end);
float time_passed;
hipEventElapsedTime(&time_passed, start, end);
printf("Time passed %f ms.\n", time_passed);
hipEventDestroy(start);
hipEventDestroy(end);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void addArrays(float *a, float *b, float *c, int N){
for(int i = 0; i < N; i++){
c[i] = a[i] + b[i];
}
}
|
a71bbdaf8cf6fcaed2b061820cac46f5e61ea7d9.cu
|
#include<stdio.h>
#include<stdlib.h>
#include "error.cuh"
void addArrays(float *a, float *b, float *c, int N);
int main(){
const int N = 100000;
const int M = sizeof(float) * N;
float *h_a, *h_b, *h_c;
float value_a = 1.11, value_b = 2.22;
int i;
h_a = (float*)malloc(M);
h_b = (float*)malloc(M);
h_c = (float*)malloc(M);
for(i = 0; i < N; i++)
h_a[i] = value_a;
for(i = 0; i < N; i++)
h_b[i] = value_b;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
cudaEventQuery(start);
addArrays(h_a, h_b, h_c, N);
cudaEventRecord(end);
cudaEventSynchronize(end);
float time_passed;
cudaEventElapsedTime(&time_passed, start, end);
printf("Time passed %f ms.\n", time_passed);
cudaEventDestroy(start);
cudaEventDestroy(end);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void addArrays(float *a, float *b, float *c, int N){
for(int i = 0; i < N; i++){
c[i] = a[i] + b[i];
}
}
|
494e958fefc38d08eebca1fa276dd8e0e0b1b40a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#define abs(x) (x > 0 ? x : -(x))
#define MAX(a,b) (a > b ? a : b)
#define MIN(a,b) (a < b ? a : b)
#define PI 3.1415926
#define GRIDDIM 32
#define BLOCKDIM 1024 //32*32
extern "C" void TOF_dist_backprojection(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num);
// define TOF-dist-bp
__device__ void TOF_dist_bp(float *image_bp, const float proj_value, const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) * (x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - (tof_value / L)) / 2;
if (abs(x1c - x2c) > abs(y1c - y2c))
{
for (int ix = 0; ix < nx; ix++)
{
float xc = (ix - nx2 + 0.5) * dx;
float tof_bin = dx;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1)*L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr * (xc - x1l) + y1l + ny2 * dy;
//d1r-d2l
float kyrl = (y1r - y2l) / (x1r - x2l);
float yrl = kyrl * (xc - x1r) + y1r + ny2 * dy;
float yy1 = MIN(ylr,yrl); // xcdetectorxy
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floorf(yy1/dy);
int cy2 = (int)floorf(yy2/dy);
for (int iy=(int)MAX(0, cy1); iy < (int)MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2) - MAX(iy * dy,yy1)) / dy;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
else
{
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) * dy;
float tof_bin = dy;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = (((yc-y1c) / (y2c-y1c)) - ratio1) * L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr * (yc-y1l)+x1l+nx2 * dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl * (yc-y1r)+x1r+nx2 * dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floorf(xx1/dx);
float cx2 = (int)floorf(xx2/dx);
for (int ix=(int)MAX(0, cx1); ix < (int)MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1) * dx,xx2) - MAX(ix * dx,xx1))/dx;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
}
__global__ void TOF_dist_bp_kernel(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_bp(image_bp, proj_value[idx], tof_value[idx],
x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx],
time_resolution, dx, dy,
nx, ny);
}
}
void TOF_dist_backprojection(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
float *image_bp_d, *proj_value_d, *tof_value_d, *x1l_d, *y1l_d, *x1r_d, *y1r_d, *x2l_d, *y2l_d, *x2r_d, *y2r_d;
// device
hipMalloc(&image_bp_d, nx * ny * sizeof(float));
hipMalloc(&proj_value_d, event_num * sizeof(float));
hipMalloc(&tof_value_d, event_num * sizeof(float));
hipMalloc(&x1l_d, event_num * sizeof(float));
hipMalloc(&y1l_d, event_num * sizeof(float));
hipMalloc(&x1r_d, event_num * sizeof(float));
hipMalloc(&y1r_d, event_num * sizeof(float));
hipMalloc(&x2l_d, event_num * sizeof(float));
hipMalloc(&y2l_d, event_num * sizeof(float));
hipMalloc(&x2r_d, event_num * sizeof(float));
hipMalloc(&y2r_d, event_num * sizeof(float));
// hostdevice
hipMemcpy(proj_value_d, proj_value, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(tof_value_d, tof_value, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(x1l_d, x1l, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y1l_d, y1l, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(x1r_d, x1r, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y1r_d, y1r, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(x2l_d, x2l, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y2l_d, y2l, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(x2r_d, x2r, event_num * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y2r_d, y2r, event_num * sizeof(float), hipMemcpyHostToDevice);
// kernel
hipLaunchKernelGGL(( TOF_dist_bp_kernel), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, image_bp_d, proj_value_d, tof_value_d, x1l_d, y1l_d, x1r_d, y1r_d,
x2l_d, y2l_d, x2r_d, y2r_d, time_resolution, dx, dy, nx, ny, event_num);
// device
hipDeviceSynchronize();
// devicecpyhost
hipMemcpy(image_bp, image_bp_d, nx * ny * sizeof(float), hipMemcpyDeviceToHost);
//
//hipFree(image_bp_d);
hipFree(proj_value_d);
hipFree(tof_value_d);
hipFree(x1l_d);
hipFree(y1l_d);
hipFree(x1r_d);
hipFree(y1r_d);
hipFree(x2l_d);
hipFree(y2l_d);
hipFree(x2r_d);
hipFree(y2r_d);
}
//nvcc -Xcompiler -fPIC -shared -lcudart -o proj.so proj.cu
|
494e958fefc38d08eebca1fa276dd8e0e0b1b40a.cu
|
#include<stdio.h>
#include<math.h>
#define abs(x) (x > 0 ? x : -(x))
#define MAX(a,b) (a > b ? a : b)
#define MIN(a,b) (a < b ? a : b)
#define PI 3.1415926
#define GRIDDIM 32
#define BLOCKDIM 1024 //32*32
extern "C" void TOF_dist_backprojection(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num);
// define TOF-dist-bp
__device__ void TOF_dist_bp(float *image_bp, const float proj_value, const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) * (x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - (tof_value / L)) / 2;
if (abs(x1c - x2c) > abs(y1c - y2c))
{
for (int ix = 0; ix < nx; ix++)
{
float xc = (ix - nx2 + 0.5) * dx;
float tof_bin = dx;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1)*L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr * (xc - x1l) + y1l + ny2 * dy;
//d1r-d2l
float kyrl = (y1r - y2l) / (x1r - x2l);
float yrl = kyrl * (xc - x1r) + y1r + ny2 * dy;
float yy1 = MIN(ylr,yrl); // 横坐标为xc时,detector边缘与x轴的交点中y较小值
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floorf(yy1/dy);
int cy2 = (int)floorf(yy2/dy);
for (int iy=(int)MAX(0, cy1); iy < (int)MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2) - MAX(iy * dy,yy1)) / dy;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
else
{
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) * dy;
float tof_bin = dy;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = (((yc-y1c) / (y2c-y1c)) - ratio1) * L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr * (yc-y1l)+x1l+nx2 * dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl * (yc-y1r)+x1r+nx2 * dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floorf(xx1/dx);
float cx2 = (int)floorf(xx2/dx);
for (int ix=(int)MAX(0, cx1); ix < (int)MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1) * dx,xx2) - MAX(ix * dx,xx1))/dx;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
}
__global__ void TOF_dist_bp_kernel(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_bp(image_bp, proj_value[idx], tof_value[idx],
x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx],
time_resolution, dx, dy,
nx, ny);
}
}
void TOF_dist_backprojection(float *image_bp, const float *proj_value, const float *tof_value,
const float *x1l, const float *y1l, const float *x1r, const float *y1r,
const float *x2l, const float *y2l, const float *x2r, const float *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
float *image_bp_d, *proj_value_d, *tof_value_d, *x1l_d, *y1l_d, *x1r_d, *y1r_d, *x2l_d, *y2l_d, *x2r_d, *y2r_d;
// 申请device内存
cudaMalloc(&image_bp_d, nx * ny * sizeof(float));
cudaMalloc(&proj_value_d, event_num * sizeof(float));
cudaMalloc(&tof_value_d, event_num * sizeof(float));
cudaMalloc(&x1l_d, event_num * sizeof(float));
cudaMalloc(&y1l_d, event_num * sizeof(float));
cudaMalloc(&x1r_d, event_num * sizeof(float));
cudaMalloc(&y1r_d, event_num * sizeof(float));
cudaMalloc(&x2l_d, event_num * sizeof(float));
cudaMalloc(&y2l_d, event_num * sizeof(float));
cudaMalloc(&x2r_d, event_num * sizeof(float));
cudaMalloc(&y2r_d, event_num * sizeof(float));
// 将host上的数据拷贝到device上
cudaMemcpy(proj_value_d, proj_value, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(tof_value_d, tof_value, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(x1l_d, x1l, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y1l_d, y1l, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(x1r_d, x1r, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y1r_d, y1r, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(x2l_d, x2l, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y2l_d, y2l, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(x2r_d, x2r, event_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y2r_d, y2r, event_num * sizeof(float), cudaMemcpyHostToDevice);
// 执行kernel
TOF_dist_bp_kernel<<<GRIDDIM, BLOCKDIM>>>(image_bp_d, proj_value_d, tof_value_d, x1l_d, y1l_d, x1r_d, y1r_d,
x2l_d, y2l_d, x2r_d, y2r_d, time_resolution, dx, dy, nx, ny, event_num);
// 同步device,保证结果正常访问
cudaDeviceSynchronize();
// 将device结果cpy到host
cudaMemcpy(image_bp, image_bp_d, nx * ny * sizeof(float), cudaMemcpyDeviceToHost);
// 释放内存
//cudaFree(image_bp_d);
cudaFree(proj_value_d);
cudaFree(tof_value_d);
cudaFree(x1l_d);
cudaFree(y1l_d);
cudaFree(x1r_d);
cudaFree(y1r_d);
cudaFree(x2l_d);
cudaFree(y2l_d);
cudaFree(x2r_d);
cudaFree(y2r_d);
}
//nvcc -Xcompiler -fPIC -shared -lcudart -o proj.so proj.cu
|
85bad081b5499e3c5bdfa1938a857f17f4e4ad15.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hip/hip_runtime_api.h> // hipMalloc, hipMemcpy, etc.
#include <hip/hip_complex.h> // hipDoubleComplex
#include <custatevec.h> // custatevecApplyMatrix
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#include "helper.hpp" // HANDLE_ERROR, HANDLE_CUDA_ERROR
int main(void) {
const int nIndexBits = 3;
const int nSvSize = (1 << nIndexBits);
const int nMaxShots = 5;
const int nShots = 5;
const int bitStringLen = 2;
const int bitOrdering[] = {0, 1};
custatevecIndex_t bitStrings[nShots];
custatevecIndex_t bitStrings_result[] = {0b00, 0b01, 0b10, 0b11, 0b11};
hipDoubleComplex h_sv[] = {{ 0.0, 0.0}, { 0.0, 0.1}, { 0.1, 0.1}, { 0.1, 0.2},
{ 0.2, 0.2}, { 0.3, 0.3}, { 0.3, 0.4}, { 0.4, 0.5}};
// In real appliction, random numbers in range [0, 1) will be used.
const double randnums[] = {0.1, 0.8, 0.4, 0.6, 0.2};
custatevecSamplerDescriptor_t sampler;
hipDoubleComplex *d_sv;
HANDLE_CUDA_ERROR( hipMalloc((void**)&d_sv, nSvSize * sizeof(hipDoubleComplex)) );
HANDLE_CUDA_ERROR( hipMemcpy(d_sv, h_sv, nSvSize * sizeof(hipDoubleComplex),
hipMemcpyHostToDevice) );
//----------------------------------------------------------------------------------------------
// custatevec handle initialization
custatevecHandle_t handle;
HANDLE_ERROR( custatevecCreate(&handle) );
void* extraWorkspace = nullptr;
size_t extraWorkspaceSizeInBytes = 0;
// create sampler and check the size of external workspace
HANDLE_ERROR( custatevecSamplerCreate(
handle, d_sv, HIP_C_64F, nIndexBits, &sampler, nMaxShots,
&extraWorkspaceSizeInBytes) );
// allocate external workspace if necessary
if (extraWorkspaceSizeInBytes > 0)
HANDLE_CUDA_ERROR( hipMalloc(&extraWorkspace, extraWorkspaceSizeInBytes) );
// sample preprocess
HANDLE_ERROR( custatevecSamplerPreprocess(
handle, sampler, extraWorkspace, extraWorkspaceSizeInBytes) );
// sample bit strings
HANDLE_ERROR( custatevecSamplerSample(
handle, sampler, bitStrings, bitOrdering, bitStringLen, randnums, nShots,
CUSTATEVEC_SAMPLER_OUTPUT_ASCENDING_ORDER) );
// destroy descriptor and handle
HANDLE_ERROR( custatevecSamplerDestroy(sampler) );
HANDLE_ERROR( custatevecDestroy(handle) );
//----------------------------------------------------------------------------------------------
HANDLE_CUDA_ERROR( hipMemcpy(h_sv, d_sv, nSvSize * sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost) );
bool correct = true;
for (int i = 0; i < nShots; i++) {
if (bitStrings[i] != bitStrings_result[i]) {
correct = false;
break;
}
}
HANDLE_CUDA_ERROR( hipFree(d_sv) );
if (extraWorkspaceSizeInBytes)
HANDLE_CUDA_ERROR( hipFree(extraWorkspace) );
if (correct) {
printf("sampler example PASSED\n");
return EXIT_SUCCESS;
}
else {
printf("sampler example FAILED: wrong result\n");
return EXIT_FAILURE;
}
}
|
85bad081b5499e3c5bdfa1938a857f17f4e4ad15.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
#include <cuComplex.h> // cuDoubleComplex
#include <custatevec.h> // custatevecApplyMatrix
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#include "helper.hpp" // HANDLE_ERROR, HANDLE_CUDA_ERROR
int main(void) {
const int nIndexBits = 3;
const int nSvSize = (1 << nIndexBits);
const int nMaxShots = 5;
const int nShots = 5;
const int bitStringLen = 2;
const int bitOrdering[] = {0, 1};
custatevecIndex_t bitStrings[nShots];
custatevecIndex_t bitStrings_result[] = {0b00, 0b01, 0b10, 0b11, 0b11};
cuDoubleComplex h_sv[] = {{ 0.0, 0.0}, { 0.0, 0.1}, { 0.1, 0.1}, { 0.1, 0.2},
{ 0.2, 0.2}, { 0.3, 0.3}, { 0.3, 0.4}, { 0.4, 0.5}};
// In real appliction, random numbers in range [0, 1) will be used.
const double randnums[] = {0.1, 0.8, 0.4, 0.6, 0.2};
custatevecSamplerDescriptor_t sampler;
cuDoubleComplex *d_sv;
HANDLE_CUDA_ERROR( cudaMalloc((void**)&d_sv, nSvSize * sizeof(cuDoubleComplex)) );
HANDLE_CUDA_ERROR( cudaMemcpy(d_sv, h_sv, nSvSize * sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice) );
//----------------------------------------------------------------------------------------------
// custatevec handle initialization
custatevecHandle_t handle;
HANDLE_ERROR( custatevecCreate(&handle) );
void* extraWorkspace = nullptr;
size_t extraWorkspaceSizeInBytes = 0;
// create sampler and check the size of external workspace
HANDLE_ERROR( custatevecSamplerCreate(
handle, d_sv, CUDA_C_64F, nIndexBits, &sampler, nMaxShots,
&extraWorkspaceSizeInBytes) );
// allocate external workspace if necessary
if (extraWorkspaceSizeInBytes > 0)
HANDLE_CUDA_ERROR( cudaMalloc(&extraWorkspace, extraWorkspaceSizeInBytes) );
// sample preprocess
HANDLE_ERROR( custatevecSamplerPreprocess(
handle, sampler, extraWorkspace, extraWorkspaceSizeInBytes) );
// sample bit strings
HANDLE_ERROR( custatevecSamplerSample(
handle, sampler, bitStrings, bitOrdering, bitStringLen, randnums, nShots,
CUSTATEVEC_SAMPLER_OUTPUT_ASCENDING_ORDER) );
// destroy descriptor and handle
HANDLE_ERROR( custatevecSamplerDestroy(sampler) );
HANDLE_ERROR( custatevecDestroy(handle) );
//----------------------------------------------------------------------------------------------
HANDLE_CUDA_ERROR( cudaMemcpy(h_sv, d_sv, nSvSize * sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost) );
bool correct = true;
for (int i = 0; i < nShots; i++) {
if (bitStrings[i] != bitStrings_result[i]) {
correct = false;
break;
}
}
HANDLE_CUDA_ERROR( cudaFree(d_sv) );
if (extraWorkspaceSizeInBytes)
HANDLE_CUDA_ERROR( cudaFree(extraWorkspace) );
if (correct) {
printf("sampler example PASSED\n");
return EXIT_SUCCESS;
}
else {
printf("sampler example FAILED: wrong result\n");
return EXIT_FAILURE;
}
}
|
29cad930da79f1c98847b23641adaa2ad81dcfbc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CubeRender.cuh"
namespace cuber {
void setRenderRect(int * src)
{ hipMemcpyToSymbol(c_renderRect, src, 16); }
void setFrustum(float * src)
{ hipMemcpyToSymbol(c_frustumVec, src, 72); }
void render(uint * pix,
float * depth,
int blockx,
int gridx, int gridy)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
hipLaunchKernelGGL(( oneCube_kernel), dim3(grid), dim3(block) , 0, 0, pix,
depth);
}
void drawPyramid(uint * color,
float * depth,
int blockx,
int gridx, int gridy,
void * planes,
void * bounding)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
hipLaunchKernelGGL(( onePyrmaid_kernel), dim3(grid), dim3(block) , 0, 0, color,
depth,
(float4 *)planes,
(Aabb *)bounding);
}
void drawVoxel(uint * color,
float * depth,
int blockx,
int gridx, int gridy,
void * voxels)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
hipLaunchKernelGGL(( oneVoxel_kernel), dim3(grid), dim3(block), 14096 , 0, color,
depth,
(Voxel *)voxels);
}
const float cubefaces[] = {
-1, 0, 0,
1, 0, 0,
0,-1, 0,
0, 1, 0,
0, 0,-1,
0, 0, 1
};
void setBoxFaces()
{ hipMemcpyToSymbol(c_ray_box_face, cubefaces, 72); }
}
|
29cad930da79f1c98847b23641adaa2ad81dcfbc.cu
|
#include "CubeRender.cuh"
namespace cuber {
void setRenderRect(int * src)
{ cudaMemcpyToSymbol(c_renderRect, src, 16); }
void setFrustum(float * src)
{ cudaMemcpyToSymbol(c_frustumVec, src, 72); }
void render(uint * pix,
float * depth,
int blockx,
int gridx, int gridy)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
oneCube_kernel<<< grid, block >>>(pix,
depth);
}
void drawPyramid(uint * color,
float * depth,
int blockx,
int gridx, int gridy,
void * planes,
void * bounding)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
onePyrmaid_kernel<<< grid, block >>>(color,
depth,
(float4 *)planes,
(Aabb *)bounding);
}
void drawVoxel(uint * color,
float * depth,
int blockx,
int gridx, int gridy,
void * voxels)
{
dim3 block(blockx, blockx, 1);
dim3 grid(gridx, gridy, 1);
oneVoxel_kernel<<< grid, block, 14096 >>>(color,
depth,
(Voxel *)voxels);
}
const float cubefaces[] = {
-1, 0, 0,
1, 0, 0,
0,-1, 0,
0, 1, 0,
0, 0,-1,
0, 0, 1
};
void setBoxFaces()
{ cudaMemcpyToSymbol(c_ray_box_face, cubefaces, 72); }
}
|
0ee7add5c43ed3836554045f7e0e8b74846a1bb8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ge_sinh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ge_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ge_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ge_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0ee7add5c43ed3836554045f7e0e8b74846a1bb8.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ge_sinh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ge_sinh<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ge_sinh<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ge_sinh<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
db8d46abd2a27be961c78e720ac68be91039fbf5.hip
|
// !!! This is a file automatically generated by hipify!!!
//#include "QC_LDPC_CSS.h"
//#include <chrono>
//// See https://arxiv.org/pdf/quant-ph/0701020.pdf for construction
//
//QC_LDPC_CSS::QC_LDPC_CSS(int J, int K, int L, int P, int sigma, int tau) :
// _numEqsX(J * P), _numEqsZ(K * P), _numVars(L * P), _P(P),
// _hHC_vec(J * P, std::vector<int>(L * P)), _hHD_vec(K * P, std::vector<int>(L * P)),
// // allocate host memory for parity check matrices
// _pcmX_h(J * P, L * P), _pcmZ_h(K * P, L * P),
// // allocate host and device memory for syndromes
// _syndromeX_h(_numEqsX,0), _syndromeX_d(_numEqsX,0),
// _syndromeZ_h(_numEqsZ,0), _syndromeZ_d(_numEqsZ,0),
// // allocate host and device memory for var node updates
// _varNodesX(_numVars,_numEqsX,0), _varNodesZ(_numVars,_numEqsZ,0),
// _varNodesX_d(_numVars,_numEqsX,0), _varNodesZ_d(_numVars,_numEqsZ,0),
// // allocate host and device memory for check node updates
// _eqNodesX(_numEqsX,_numVars,0), _eqNodesZ(_numEqsZ,_numVars,0),
// _eqNodesX_d(_numEqsX,_numVars,0), _checkNodesZ_d(_numEqsZ,_numVars,0),
// // allocate host and device memory for index matrices
// _eqNodeVarIndicesX(_numEqsX, L), _eqNodeVarIndicesZ(_numEqsZ, L),
// _eqNodeVarIndicesX_d(_numEqsX, L), _eqNodeVarIndicesZ_d(_numEqsZ, L),
// _varNodeEqIndicesX(_numVars,J), _varNodeEqIndicesZ(_numVars, K),
// _varNodeEqIndicesX_d(_numVars, J), _varNodeEqIndicesZ_d(_numVars, K),
// _errorGenerator(_numVars)
//{
// int i, j, k, l, t, p, invSigma;
//
// // index matrices for parity check matrices _pcmX_h and _pcmZ_h
// IntArray2d_h hHC(J, L);
// IntArray2d_h hHD(K, L);
//
// // construct the cyclic set from which HC and HD will be made
// cusp::array1d<int, cusp::host_memory> ZP(P - 1);
// for (i = 0; i < P - 1; ++i) ZP[i] = i + 1;
// print(ZP);
//
// // find sigma^(-1). It is the element of ZP that when multiplied by sigma = 1
// for (i = 0; ZP[i] * sigma % P != 1; ++i); // loop through ZP until the inverse element is found.
// invSigma = ZP[i];
//
// // Build parity check matrices for HC and HD on the host since this is a one shot operation.
// // Time to transfer data to the gpu will make this inefficient.
// for (j = 0; j < J; ++j)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -j + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// }
// else
// {
// p = j - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (tau * t) % P; // -(tau*sigma^p) = P - (tau*sigma^p)
// }
// hHC(j, l) = t;
// }
// }
//
// for (k = 0; k < K; ++k)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -k - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = (tau * t) % P;
// }
// else
// {
// p = k + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (t); // -(sigma^p) = P - (sigma^p)
// }
// hHD(k, l) = t;
// }
// }
//// print_matrix(hHC);
//// print_matrix(hHD);
//
// int cj, ck, cjR, ckR, cl, c, row, col;
// // Construct the parity check matrix matrix row by row.
// // The matrix is made up of JxL PxP blocks.
// // Each block is a circulant permutation matrix, I(1)^c with c given by HC calculated previously
// // see https://arxiv.org/pdf/quant-ph/0701020.pdf or https://en.wikipedia.org/wiki/Circulant_matrix
// for (row = 0; row < J * P; ++row)
// {
// cj = (int)(row / P); // the row index for HC is the integer part of j/P
// cjR = row % P; // the row within block cj is j%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHC(cj, cl); //this is the power for the circulant permutation matrix, I(1)^c
// // cjR=0, c=1, block column index for non-zero entry = 1
// // cjR=1, c=1, block column index for non-zero entry = 2
// // cjR=P, c=1, block column index for non-zero entry = 0
// // block column index = (c + cjR) % P
// // offset block column index by block width P: offset = cl * P
// // column index = (c + cjR) % P + (cl * P);
// col = (c + cjR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHC_vec[row][col] = 1;
// _pcmX_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// for (row = 0; row < K * P; ++row)
// {
// ck = (int)(row / P); // the row index for HD is the integer part of k/P
// ckR = row % P; // the row within block ck is k%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHD(ck, cl); //this is the power for the circulant permutation matrix, I(1)^c
// col = (c + ckR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHD_vec[row][col] = 1;
// _pcmZ_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// // set index arrays and device pointers
// SetIndexArrays(_eqNodeVarIndicesX, _varNodeEqIndicesX, _pcmX_h);
// thrust::copy(_eqNodeVarIndicesX.values.begin(), _eqNodeVarIndicesX.values.end(), _eqNodeVarIndicesX_d.values.begin());
// thrust::copy(_varNodeEqIndicesX.values.begin(), _varNodeEqIndicesX.values.end(), _varNodeEqIndicesX_d.values.begin());
// _eqNodeVarIndicesX_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesX_d.values[0]);
// _varNodeEqIndicesX_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesX_d.values[0]);
//
// SetIndexArrays(_eqNodeVarIndicesZ, _varNodeEqIndicesZ, _pcmZ_h);
// thrust::copy(_eqNodeVarIndicesZ.values.begin(), _eqNodeVarIndicesZ.values.end(), _eqNodeVarIndicesZ_d.values.begin());
// thrust::copy(_varNodeEqIndicesZ.values.begin(), _varNodeEqIndicesZ.values.end(), _varNodeEqIndicesZ_d.values.begin());
// _eqNodeVarIndicesZ_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesZ_d.values[0]);
// _varNodeEqIndicesZ_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesZ_d.values[0]);
//
// _numEqsPerVarX = _varNodeEqIndicesX.num_cols;
// _numVarsPerEqX = _eqNodeVarIndicesX.num_cols;
// _numEqsPerVarZ = _varNodeEqIndicesZ.num_cols;
// _numVarsPerEqZ = _eqNodeVarIndicesZ.num_cols;
//
// // set device memory pointers for pre-allocated device matrices
// _syndromeX_d_ptr = thrust::raw_pointer_cast(&_syndromeX_d[0]);
// _syndromeZ_d_ptr = thrust::raw_pointer_cast(&_syndromeZ_d[0]);
//
// _varNodesX_d_ptr = thrust::raw_pointer_cast(&_varNodesX_d.values[0]);
// _varNodesZ_d_ptr = thrust::raw_pointer_cast(&_varNodesZ_d.values[0]);
//
// _eqNodesX_d_ptr = thrust::raw_pointer_cast(&_eqNodesX_d.values[0]);
// _eqNodesZ_d_ptr = thrust::raw_pointer_cast(&_checkNodesZ_d.values[0]);
//
// // We now have parity check matrices hPHC and hPHD on the host. https://arxiv.org/pdf/quant-ph/0701020.pdf
// // These satisfy the constraints that the girth of their respective Tanner graphs are >= 6
// // and they have a "twisted relation", i.e. dual(D) is in C.
//}
//
//QC_LDPC_CSS::~QC_LDPC_CSS()
//{
//}
//
//void QC_LDPC_CSS::SetIndexArrays(IntArray2d_h& checkNodeVarIndices, IntArray2d_h& varNodeEqIndices, IntArray2d_h& parityCheckMatrix)
//{
// // set device index matrices for var node and check node updates
// // each equation will include L variables.
// // each variable will be involved in J equations
// // loop over all check node equations in the parity check matrix for X errors
// int numEqs = parityCheckMatrix.num_rows;
// int n = parityCheckMatrix.num_cols;
// std::vector<std::vector<int>> cnVarIndices(numEqs, std::vector<int>());
// std::vector<std::vector<int>> vnEqIndices(n, std::vector<int>());
// // loop over all equations
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// // loop over all variables
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// // if the entry in the pcm is 1, this check node involves this variable. set the index entry
// if (parityCheckMatrix.values[pcmIdx])
// {
// cnVarIndices[eqIdx].push_back(varIdx);
// vnEqIndices[varIdx].push_back(eqIdx);
// }
// }
// }
// // copy data into provided array containers
// auto index = 0;
// for (auto i = 0; i<cnVarIndices.size(); ++i)
// {
// for(auto j=0; j<cnVarIndices[0].size(); ++j)
// {
// checkNodeVarIndices.values[index] = cnVarIndices[i][j];
// ++index;
// }
// }
// index = 0;
// for (auto i = 0; i<vnEqIndices.size(); ++i)
// {
// for (auto j = 0; j<vnEqIndices[0].size(); ++j)
// {
// varNodeEqIndices.values[index] = vnEqIndices[i][j];
// ++index;
// }
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(IntArray2d_h vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(cusp::array2d<float, cusp::host_memory, cusp::row_major> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<std::vector<float>> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// for (auto j = 0; j < vec[i].size(); ++j) {
// auto v = vec[i][j];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// //file << v << ",";
// }
// file << "\n\n";
// file.close();
// }else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<int> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// auto v = vec[i];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
///*
//Given a set of x errors and z errors, this will attempt to decode the errors
//and will return a success / failure code.
//See paper for algorithm:
//We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCUDA(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
//// float varUpdateKernelTime = 0;
//// float eqUpdateKernelTime = 0;
//// std::chrono::microseconds memCopyTime(0);
// std::chrono::microseconds checkConvergenceTime(0);
// std::chrono::microseconds updateTime(0);
// std::chrono::microseconds initTime(0);
// std::chrono::microseconds decodeTime(0);
// std::chrono::microseconds completeTime(0);
//
// auto begin = std::chrono::high_resolution_clock::now();
//
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for (int eqIdx = 0; eqIdx<_numEqsX; ++eqIdx)
// {
// for (int j = 0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
//
// // copy host data to device
// thrust::copy(_varNodesX.values.begin(), _varNodesX.values.end(), _varNodesX_d.values.begin());
// thrust::copy(_varNodesZ.values.begin(), _varNodesZ.values.end(), _varNodesZ_d.values.begin());
// thrust::copy(_eqNodesX.values.begin(), _eqNodesX.values.end(), _eqNodesX_d.values.begin());
// thrust::copy(_eqNodesZ.values.begin(), _eqNodesZ.values.end(), _checkNodesZ_d.values.begin());
// thrust::copy(syndromeX.begin(), syndromeX.end(), _syndromeX_d.begin());
// thrust::copy(syndromeZ.begin(), syndromeZ.end(), _syndromeZ_d.begin());
//
//
// auto N = maxIterations; // maximum number of iterations
//// bool xConverge = false;
//// bool zConverge = false;
//
// //dim3 eqNodeGridDimX(_numEqsX); // number of blocks.
// //dim3 eqNodeBlockDimX(_numEqsX,_numVarsPerEqX); // number of threads per block
//
// //dim3 eqNodeGridDimZ(_numEqsZ);
// //dim3 eqNodeBlockDimZ(_numEqsZ,_numVarsPerEqZ);
//
// //dim3 varNodeGridDimX(_numVars);
// //dim3 varNodeBlockDimX(_numEqsPerVarX);
// //auto varNodeMemSizeX = _numEqsPerVarX * sizeof(float);
//
// //dim3 varNodeGridDimZ(_numVars);
// //dim3 varNodeBlockDimZ(_numEqsPerVarZ);
// //auto varNodeMemSizeZ = _numEqsPerVarX * sizeof(float);
//
// auto finish = std::chrono::high_resolution_clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// initTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesX_d_ptr, _varNodesX_d_ptr, _eqNodeVarIndicesX_d_ptr, _varNodeEqIndicesX_d_ptr,
// _syndromeX_d_ptr, p, _numVars, _numEqsX, _numVarsPerEqX, _numEqsPerVarX, N);
//
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesZ_d_ptr, _varNodesZ_d_ptr, _eqNodeVarIndicesZ_d_ptr, _varNodeEqIndicesZ_d_ptr,
// _syndromeZ_d_ptr, p, _numVars, _numEqsZ, _numVarsPerEqZ, _numEqsPerVarZ, N);
//
// hipDeviceSynchronize();
//
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// decodeTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
//
// thrust::copy(_varNodesX_d.values.begin(), _varNodesX_d.values.end(), _varNodesX.values.begin());
// thrust::copy(_varNodesZ_d.values.begin(), _varNodesZ_d.values.end(), _varNodesZ.values.begin());
//
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if (_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
// }
// if (!CheckConvergence(_varNodesZ, high, low)) {
// code = code | CONVERGENCE_FAIL_Z;
// }
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// completeTime += duration;
//
//
//// std::cout << "VarNode update kernel execution time: " << varUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "EqNode update kernel execution time: " << eqUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "MemCopyTime: " << memCopyTime.count() << " micro-seconds." << std::endl;
//// std::cout << "Check convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Init time: " << initTime.count() << " micro-seconds." << std::endl;
// std::cout << "Decode time: " << decodeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Complete time: " << completeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Check Convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Update time: " << updateTime.count() << " micro-seconds." << std::endl;
//
//
// return code;
//}
//
///*
// Given a set of x errors and z errors, this will attempt to decode the errors
// and will return a success / failure code.
// See paper for algorithm:
// We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
// // array of probability estimates to send to each check node. there are _numEqsX variables, and _numVars check nodes
// /* std::vector<std::vector<float>> varNodeEstimatesX(_numEqsX, std::vector<float>(_numVars, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numEqsZ, std::vector<float>(_numVars, p));*/
//
// // each var node has a list of estimates from each check node.
// std::vector<std::vector<float>> varNodeEstimatesX(_numVars, std::vector<float>(_numEqsX, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numVars, std::vector<float>(_numEqsZ, p));
//
// // each check node has a list of beliefs for the value of each var node.
// std::vector<std::vector<float>> checkNodeBeliefsX(_numEqsX, std::vector<float>(_numVars));
// std::vector<std::vector<float>> checkNodeBeliefsZ(_numEqsZ, std::vector<float>(_numVars));
//
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if(!xConverge)
// {
// EqNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, syndromeX);
// VarNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, p, n == N - 1);
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(varNodeEstimatesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, syndromeZ);
// VarNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(varNodeEstimatesZ, high, low);
// }
// }
//
//
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(varNodeEstimatesX.size(), 0);
// std::vector<int> finalEstimatesZ(varNodeEstimatesZ.size(), 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto i = 0; i < varNodeEstimatesX.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesX[i].size(); ++j) {
// if (varNodeEstimatesX[i][j] != 0.0f) {
// if(varNodeEstimatesX[i][j] > high) finalEstimatesX[i] = 1;
// else if (varNodeEstimatesX[i][j] < low) finalEstimatesX[i] = 0;
// else {
// finalEstimatesX[i] = -1;
// code = code | CONVERGENCE_FAIL_X;
// }
// break;
// }
// }
// }
// for (auto i = 0; i < varNodeEstimatesZ.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesZ[i].size(); ++j) {
// if (varNodeEstimatesZ[i][j] != 0.0f) {
// if (varNodeEstimatesZ[i][j] > high) finalEstimatesZ[i] = 1;
// else if (varNodeEstimatesZ[i][j] < low) finalEstimatesZ[i] = 0;
// else {
// finalEstimatesZ[i] = -1;
// code = code | CONVERGENCE_FAIL_Z;
// }
// break;
// }
// }
// }
// // check syndrome errors
// if (code == SUCCESS) {
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
// }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU2(std::vector<int> xSyndrome, std::vector<int> zSyndrome, float errorProbability, std::vector<int>& xErrors, std::vector<int>& zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for(int eqIdx=0; eqIdx<_numEqsX; ++eqIdx)
// {
// for(int j=0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
// for (int i = 0; i < xSyndrome.size(); ++i) _syndromeX_h[i] = xSyndrome[i];
// for (int i = 0; i < zSyndrome.size(); ++i) _syndromeZ_h[i] = zSyndrome[i];
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if (!xConverge)
// {
// EqNodeUpdate(_eqNodesX,_varNodesX,_eqNodeVarIndicesX, _syndromeX_h);
// VarNodeUpdate(_eqNodesX, _varNodesX, _varNodeEqIndicesX ,p, n == N - 1);
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(_varNodesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(_eqNodesZ, _varNodesZ, _eqNodeVarIndicesZ, _syndromeZ_h);
// VarNodeUpdate(_eqNodesZ, _varNodesZ, _varNodeEqIndicesZ , p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(_varNodesZ, high, low);
// }
// }
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if(_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
//// WriteToFile(_varNodesX, "results/convXCPU.txt");
// }
// if (!CheckConvergence(_varNodesZ, high, low)) code = code | CONVERGENCE_FAIL_Z;
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(xSyndrome.begin(), xSyndrome.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(zSyndrome.begin(), zSyndrome.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(FloatArray2d_h &eqNodes, FloatArray2d_h varNodes, IntArray2d_h eqNodeVarIndices, IntArray1d_h syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodes.num_rows;
// int numVarsPerEq = eqNodeVarIndices.num_cols;
// int n = varNodes.num_rows;
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// int firstVarIdx = eqIdx*numVarsPerEq;
// // loop over variables to be updated for this check node
// for (auto i = 0; i < numVarsPerEq; ++i)
// {
// int index = firstVarIdx + i; // 1d array index to look up the variable index
// int varIdx = eqNodeVarIndices.values[index]; // variable index under investigation for this eq
// float product = 1.0f; // reset product
// // loop over all other variables in the equation, accumulate (1-2p) terms
// for (auto k = 0; k < numVarsPerEq; ++k)
// {
// if (k == i) continue; // skip the variable being updated
// int otherIndex = firstVarIdx + k; // 1d array index to look up the variable index
// int otherVarIdx = eqNodeVarIndices.values[otherIndex];
//
// // the index holding the estimate beinng used for this eq
// int varNodesIndex = otherVarIdx * numEqs + eqIdx;
// float value = varNodes.values[varNodesIndex]; // belief value for this variable and this eq
// product *= (1.0f - 2.0f*value);
// }
// int cnIdx = eqIdx * n + varIdx; // index for value within the check node array to update
// if (syndrome[eqIdx]) {
// eqNodes.values[cnIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// }
// else {
// eqNodes.values[cnIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
// }
// // WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// std::vector<int> syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
//
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over variables to be updated for this check node
// {
// eqNodeBeliefs[eqIdx][varIdx] = 0.0f; // not necessary, makes file output nicer.
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // if the parity check matrix is 0, the eq doesn't involve this var
// float product = 1.0f; // reset product
// for (auto otherVarIdx = 0; otherVarIdx < n; ++otherVarIdx) // loop over all other variables, accumulate (1-2p) terms
// {
// if (!parityCheckMatrix[eqIdx][otherVarIdx]) continue; // skip zeros
// if (otherVarIdx == varIdx) continue; // skip the variable being updated
// product *= (1.0f - 2.0f*varNodeEstimates[otherVarIdx][eqIdx]);
// }
// if(syndrome[eqIdx]) eqNodeBeliefs[eqIdx][varIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// else eqNodeBeliefs[eqIdx][varIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
//// WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(FloatArray2d_h eqNodes, FloatArray2d_h& varNodes, IntArray2d_h varNodeEqIndices, float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimate to var node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1 unless last)
// // where K = 1/[(1-pch)(1-p2)(1-p3)(1-p4)... + pch*p2*p3*p4...]
// int numEqs = eqNodes.num_rows;
// int n = varNodes.num_rows;
// int numEqsPerVar = varNodeEqIndices.num_cols;
//
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// int firstVarNode = varIdx * numEqs; // start of entries in VarNodes array for this variable
// int firstEqIndices = varIdx * numEqsPerVar; // starting point for first equation in the index list for this var.
// for (auto j = 0; j < numEqsPerVar; ++j) // loop over all equations for this variable
// {
// // find the index of the equation estimate being updated
// int index = firstEqIndices + j;
// int eqIdx = varNodeEqIndices.values[index];
//
// // 1d index for var nodes entry being updated
// int varNodesIdx = firstVarNode + eqIdx;
//
// // start with a priori channel error probability
// float prodP = errorProbability;
// float prodOneMinusP = 1.0f - errorProbability;
//
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto k = 0; k < numEqsPerVar; ++k)
// {
// int index2 = firstEqIndices + k; // 1d index for entry in the index array
// int otherEQIdx = varNodeEqIndices.values[index2];
//
// if (otherEQIdx == eqIdx && !last) continue;
// // 1d index for check nodes belief being used
// int checkNodesIdx = otherEQIdx * n + varIdx;
// float p = eqNodes.values[checkNodesIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
// varNodes.values[varNodesIdx] = value;
// }
// }
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimated probability to node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1)
// // where K = 1/[(1-p1)(1-p2)(1-p3)... + p1*p2*p3...]
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over all equations
// {
// varNodeEstimates[varIdx][eqIdx] = 0.0f; // not necessary, makes output nicer
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
//
// float prodP = errorProbability; // start with a priori channel error probability
// float prodOneMinusP = 1.0f - errorProbability;
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto otherEqIdx = 0; otherEqIdx < numEqs; ++otherEqIdx) // loop over all equation estimates
// {
// if (otherEqIdx == eqIdx && !last) continue; // skip the belief estimate from j to update the probability sent to j
// if (!parityCheckMatrix[otherEqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
// float p = eqNodeBeliefs[otherEqIdx][varIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
//// std::cout << "Setting var: " << i << " eq: " << j << " value: " << value << std::endl;
// varNodeEstimates[varIdx][eqIdx] = value;
// }
// }
//// WriteToFile(varNodeEstimates, "results/VariableNodeEstimates.txt");
//}
//
//std::vector<int> QC_LDPC_CSS::GetXSyndrome(std::vector<int> xErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHC_vec[row][col] * xErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//std::vector<int> QC_LDPC_CSS::GetZSyndrome(std::vector<int> zErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHD_vec[row][col] * zErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//void QC_LDPC_CSS::InitVarNodesArray(FloatArray2d_h& varNodes_h, FloatArray2d_d& varNodes_d, const IntArray2d_h& parityCheckMatrix, const int NUM_CONCURRENT_THREADS, float errorProbability)
//{
// int n = parityCheckMatrix.num_cols;
// int numEqs = parityCheckMatrix.num_rows;
// int size = n*numEqs;
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// for (int n = 0; n<NUM_CONCURRENT_THREADS; ++n)
// {
// if (!parityCheckMatrix.values[pcmIdx]) continue;
// int varNodesIdx = n*size + varIdx*numEqs + eqIdx;
// varNodes_h.values[varNodesIdx] = errorProbability;
// }
// }
// }
// thrust::copy(varNodes_h.values.begin(), varNodes_h.values.end(), varNodes_d.values.begin());
//}
//
//void QC_LDPC_CSS::SetDeviceSyndrome(const std::vector<int>& syndrome_h, const IntArray2d_d& syndrome_d)
//{
// for(int i=0; i<syndrome_h.size(); ++i)
// {
// syndrome_d.values[i] = syndrome_h[i];
// }
//}
//
//QC_LDPC_CSS::Statistics QC_LDPC_CSS::GetStatistics(int errorWeight, int numErrors, float errorProbability, int maxIterations)
//{
// //float p = 2 / 3 * errorProbability;
// //const int NUM_CONCURRENT_THREADS = 32;
// //std::vector<int> xErrors(_numVars, 0);
// //std::vector<int> zErrors(_numVars, 0);
//
// //// set up host and device memory for calculations
// //IntArray2d_h xSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsX);
// //IntArray2d_d xSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsX);
// //int* xSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //IntArray2d_h zSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsZ);
// //IntArray2d_d zSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsZ);
// //int* zSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //int size = _numVars * _numEqsX;
// //FloatArray2d_h varNodesX_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesX_d.values[i*size]);
//
// //FloatArray2d_h eqNodesX_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesX_d.values[i*size]);
//
// //size = _numVars * _numEqsZ;
// //FloatArray2d_h varNodesZ_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesZ_d.values[i*size]);
// //
// //FloatArray2d_h eqNodesZ_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesZ_d.values[i*size]);
//
// //for (int i = 0; i < numErrors; ++i) {
// // InitVarNodesArray(varNodesX_h, varNodesX_d, _pcmX_h, NUM_CONCURRENT_THREADS, p);
// // InitVarNodesArray(varNodesZ_h, varNodesZ_d, _pcmZ_h, NUM_CONCURRENT_THREADS, p);
// // for (int j = 0; j < NUM_CONCURRENT_THREADS; ++j) {
// // _errorGenerator.GenerateError(xErrors, zErrors, errorWeight);
// // SetDeviceSyndrome(GetXSyndrome(xErrors), xSyndromeArray_d);
// // SetDeviceSyndrome(GetZSyndrome(zErrors), zSyndromeArray_d);
// // }
// //}
// return Statistics();
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const std::vector<std::vector<float>>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.size(); ++i) {
// for (auto j = 0; j < estimates[i].size(); ++j) {
// if (estimates[i][j] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates[i][j] > low && estimates[i][j] < high) return false;
// }
// }
// }
// return true;
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const cusp::array2d<float,cusp::host_memory,cusp::row_major>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.num_rows; ++i) {
// for (auto j = 0; j < estimates.num_cols; ++j) {
// int index = i * estimates.num_cols + j;
// if (estimates.values[index] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates.values[index] > low && estimates.values[index] < high) return false;
// }
// }
// }
// return true;
//}
|
db8d46abd2a27be961c78e720ac68be91039fbf5.cu
|
//#include "QC_LDPC_CSS.h"
//#include <chrono>
//// See https://arxiv.org/pdf/quant-ph/0701020.pdf for construction
//
//QC_LDPC_CSS::QC_LDPC_CSS(int J, int K, int L, int P, int sigma, int tau) :
// _numEqsX(J * P), _numEqsZ(K * P), _numVars(L * P), _P(P),
// _hHC_vec(J * P, std::vector<int>(L * P)), _hHD_vec(K * P, std::vector<int>(L * P)),
// // allocate host memory for parity check matrices
// _pcmX_h(J * P, L * P), _pcmZ_h(K * P, L * P),
// // allocate host and device memory for syndromes
// _syndromeX_h(_numEqsX,0), _syndromeX_d(_numEqsX,0),
// _syndromeZ_h(_numEqsZ,0), _syndromeZ_d(_numEqsZ,0),
// // allocate host and device memory for var node updates
// _varNodesX(_numVars,_numEqsX,0), _varNodesZ(_numVars,_numEqsZ,0),
// _varNodesX_d(_numVars,_numEqsX,0), _varNodesZ_d(_numVars,_numEqsZ,0),
// // allocate host and device memory for check node updates
// _eqNodesX(_numEqsX,_numVars,0), _eqNodesZ(_numEqsZ,_numVars,0),
// _eqNodesX_d(_numEqsX,_numVars,0), _checkNodesZ_d(_numEqsZ,_numVars,0),
// // allocate host and device memory for index matrices
// _eqNodeVarIndicesX(_numEqsX, L), _eqNodeVarIndicesZ(_numEqsZ, L),
// _eqNodeVarIndicesX_d(_numEqsX, L), _eqNodeVarIndicesZ_d(_numEqsZ, L),
// _varNodeEqIndicesX(_numVars,J), _varNodeEqIndicesZ(_numVars, K),
// _varNodeEqIndicesX_d(_numVars, J), _varNodeEqIndicesZ_d(_numVars, K),
// _errorGenerator(_numVars)
//{
// int i, j, k, l, t, p, invSigma;
//
// // index matrices for parity check matrices _pcmX_h and _pcmZ_h
// IntArray2d_h hHC(J, L);
// IntArray2d_h hHD(K, L);
//
// // construct the cyclic set from which HC and HD will be made
// cusp::array1d<int, cusp::host_memory> ZP(P - 1);
// for (i = 0; i < P - 1; ++i) ZP[i] = i + 1;
// print(ZP);
//
// // find sigma^(-1). It is the element of ZP that when multiplied by sigma = 1
// for (i = 0; ZP[i] * sigma % P != 1; ++i); // loop through ZP until the inverse element is found.
// invSigma = ZP[i];
//
// // Build parity check matrices for HC and HD on the host since this is a one shot operation.
// // Time to transfer data to the gpu will make this inefficient.
// for (j = 0; j < J; ++j)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -j + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// }
// else
// {
// p = j - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (tau * t) % P; // -(tau*sigma^p) = P - (tau*sigma^p)
// }
// hHC(j, l) = t;
// }
// }
//
// for (k = 0; k < K; ++k)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -k - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = (tau * t) % P;
// }
// else
// {
// p = k + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (t); // -(sigma^p) = P - (sigma^p)
// }
// hHD(k, l) = t;
// }
// }
//// print_matrix(hHC);
//// print_matrix(hHD);
//
// int cj, ck, cjR, ckR, cl, c, row, col;
// // Construct the parity check matrix matrix row by row.
// // The matrix is made up of JxL PxP blocks.
// // Each block is a circulant permutation matrix, I(1)^c with c given by HC calculated previously
// // see https://arxiv.org/pdf/quant-ph/0701020.pdf or https://en.wikipedia.org/wiki/Circulant_matrix
// for (row = 0; row < J * P; ++row)
// {
// cj = (int)(row / P); // the row index for HC is the integer part of j/P
// cjR = row % P; // the row within block cj is j%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHC(cj, cl); //this is the power for the circulant permutation matrix, I(1)^c
// // cjR=0, c=1, block column index for non-zero entry = 1
// // cjR=1, c=1, block column index for non-zero entry = 2
// // cjR=P, c=1, block column index for non-zero entry = 0
// // block column index = (c + cjR) % P
// // offset block column index by block width P: offset = cl * P
// // column index = (c + cjR) % P + (cl * P);
// col = (c + cjR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHC_vec[row][col] = 1;
// _pcmX_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// for (row = 0; row < K * P; ++row)
// {
// ck = (int)(row / P); // the row index for HD is the integer part of k/P
// ckR = row % P; // the row within block ck is k%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHD(ck, cl); //this is the power for the circulant permutation matrix, I(1)^c
// col = (c + ckR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHD_vec[row][col] = 1;
// _pcmZ_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// // set index arrays and device pointers
// SetIndexArrays(_eqNodeVarIndicesX, _varNodeEqIndicesX, _pcmX_h);
// thrust::copy(_eqNodeVarIndicesX.values.begin(), _eqNodeVarIndicesX.values.end(), _eqNodeVarIndicesX_d.values.begin());
// thrust::copy(_varNodeEqIndicesX.values.begin(), _varNodeEqIndicesX.values.end(), _varNodeEqIndicesX_d.values.begin());
// _eqNodeVarIndicesX_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesX_d.values[0]);
// _varNodeEqIndicesX_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesX_d.values[0]);
//
// SetIndexArrays(_eqNodeVarIndicesZ, _varNodeEqIndicesZ, _pcmZ_h);
// thrust::copy(_eqNodeVarIndicesZ.values.begin(), _eqNodeVarIndicesZ.values.end(), _eqNodeVarIndicesZ_d.values.begin());
// thrust::copy(_varNodeEqIndicesZ.values.begin(), _varNodeEqIndicesZ.values.end(), _varNodeEqIndicesZ_d.values.begin());
// _eqNodeVarIndicesZ_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesZ_d.values[0]);
// _varNodeEqIndicesZ_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesZ_d.values[0]);
//
// _numEqsPerVarX = _varNodeEqIndicesX.num_cols;
// _numVarsPerEqX = _eqNodeVarIndicesX.num_cols;
// _numEqsPerVarZ = _varNodeEqIndicesZ.num_cols;
// _numVarsPerEqZ = _eqNodeVarIndicesZ.num_cols;
//
// // set device memory pointers for pre-allocated device matrices
// _syndromeX_d_ptr = thrust::raw_pointer_cast(&_syndromeX_d[0]);
// _syndromeZ_d_ptr = thrust::raw_pointer_cast(&_syndromeZ_d[0]);
//
// _varNodesX_d_ptr = thrust::raw_pointer_cast(&_varNodesX_d.values[0]);
// _varNodesZ_d_ptr = thrust::raw_pointer_cast(&_varNodesZ_d.values[0]);
//
// _eqNodesX_d_ptr = thrust::raw_pointer_cast(&_eqNodesX_d.values[0]);
// _eqNodesZ_d_ptr = thrust::raw_pointer_cast(&_checkNodesZ_d.values[0]);
//
// // We now have parity check matrices hPHC and hPHD on the host. https://arxiv.org/pdf/quant-ph/0701020.pdf
// // These satisfy the constraints that the girth of their respective Tanner graphs are >= 6
// // and they have a "twisted relation", i.e. dual(D) is in C.
//}
//
//QC_LDPC_CSS::~QC_LDPC_CSS()
//{
//}
//
//void QC_LDPC_CSS::SetIndexArrays(IntArray2d_h& checkNodeVarIndices, IntArray2d_h& varNodeEqIndices, IntArray2d_h& parityCheckMatrix)
//{
// // set device index matrices for var node and check node updates
// // each equation will include L variables.
// // each variable will be involved in J equations
// // loop over all check node equations in the parity check matrix for X errors
// int numEqs = parityCheckMatrix.num_rows;
// int n = parityCheckMatrix.num_cols;
// std::vector<std::vector<int>> cnVarIndices(numEqs, std::vector<int>());
// std::vector<std::vector<int>> vnEqIndices(n, std::vector<int>());
// // loop over all equations
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// // loop over all variables
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// // if the entry in the pcm is 1, this check node involves this variable. set the index entry
// if (parityCheckMatrix.values[pcmIdx])
// {
// cnVarIndices[eqIdx].push_back(varIdx);
// vnEqIndices[varIdx].push_back(eqIdx);
// }
// }
// }
// // copy data into provided array containers
// auto index = 0;
// for (auto i = 0; i<cnVarIndices.size(); ++i)
// {
// for(auto j=0; j<cnVarIndices[0].size(); ++j)
// {
// checkNodeVarIndices.values[index] = cnVarIndices[i][j];
// ++index;
// }
// }
// index = 0;
// for (auto i = 0; i<vnEqIndices.size(); ++i)
// {
// for (auto j = 0; j<vnEqIndices[0].size(); ++j)
// {
// varNodeEqIndices.values[index] = vnEqIndices[i][j];
// ++index;
// }
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(IntArray2d_h vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(cusp::array2d<float, cusp::host_memory, cusp::row_major> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<std::vector<float>> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// for (auto j = 0; j < vec[i].size(); ++j) {
// auto v = vec[i][j];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// //file << v << ",";
// }
// file << "\n\n";
// file.close();
// }else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<int> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// auto v = vec[i];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
///*
//Given a set of x errors and z errors, this will attempt to decode the errors
//and will return a success / failure code.
//See paper for algorithm:
//We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCUDA(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//// float varUpdateKernelTime = 0;
//// float eqUpdateKernelTime = 0;
//// std::chrono::microseconds memCopyTime(0);
// std::chrono::microseconds checkConvergenceTime(0);
// std::chrono::microseconds updateTime(0);
// std::chrono::microseconds initTime(0);
// std::chrono::microseconds decodeTime(0);
// std::chrono::microseconds completeTime(0);
//
// auto begin = std::chrono::high_resolution_clock::now();
//
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for (int eqIdx = 0; eqIdx<_numEqsX; ++eqIdx)
// {
// for (int j = 0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
//
// // copy host data to device
// thrust::copy(_varNodesX.values.begin(), _varNodesX.values.end(), _varNodesX_d.values.begin());
// thrust::copy(_varNodesZ.values.begin(), _varNodesZ.values.end(), _varNodesZ_d.values.begin());
// thrust::copy(_eqNodesX.values.begin(), _eqNodesX.values.end(), _eqNodesX_d.values.begin());
// thrust::copy(_eqNodesZ.values.begin(), _eqNodesZ.values.end(), _checkNodesZ_d.values.begin());
// thrust::copy(syndromeX.begin(), syndromeX.end(), _syndromeX_d.begin());
// thrust::copy(syndromeZ.begin(), syndromeZ.end(), _syndromeZ_d.begin());
//
//
// auto N = maxIterations; // maximum number of iterations
//// bool xConverge = false;
//// bool zConverge = false;
//
// //dim3 eqNodeGridDimX(_numEqsX); // number of blocks.
// //dim3 eqNodeBlockDimX(_numEqsX,_numVarsPerEqX); // number of threads per block
//
// //dim3 eqNodeGridDimZ(_numEqsZ);
// //dim3 eqNodeBlockDimZ(_numEqsZ,_numVarsPerEqZ);
//
// //dim3 varNodeGridDimX(_numVars);
// //dim3 varNodeBlockDimX(_numEqsPerVarX);
// //auto varNodeMemSizeX = _numEqsPerVarX * sizeof(float);
//
// //dim3 varNodeGridDimZ(_numVars);
// //dim3 varNodeBlockDimZ(_numEqsPerVarZ);
// //auto varNodeMemSizeZ = _numEqsPerVarX * sizeof(float);
//
// auto finish = std::chrono::high_resolution_clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// initTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesX_d_ptr, _varNodesX_d_ptr, _eqNodeVarIndicesX_d_ptr, _varNodeEqIndicesX_d_ptr,
// _syndromeX_d_ptr, p, _numVars, _numEqsX, _numVarsPerEqX, _numEqsPerVarX, N);
//
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesZ_d_ptr, _varNodesZ_d_ptr, _eqNodeVarIndicesZ_d_ptr, _varNodeEqIndicesZ_d_ptr,
// _syndromeZ_d_ptr, p, _numVars, _numEqsZ, _numVarsPerEqZ, _numEqsPerVarZ, N);
//
// cudaDeviceSynchronize();
//
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// decodeTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
//
// thrust::copy(_varNodesX_d.values.begin(), _varNodesX_d.values.end(), _varNodesX.values.begin());
// thrust::copy(_varNodesZ_d.values.begin(), _varNodesZ_d.values.end(), _varNodesZ.values.begin());
//
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if (_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
// }
// if (!CheckConvergence(_varNodesZ, high, low)) {
// code = code | CONVERGENCE_FAIL_Z;
// }
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// completeTime += duration;
//
//
//// std::cout << "VarNode update kernel execution time: " << varUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "EqNode update kernel execution time: " << eqUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "MemCopyTime: " << memCopyTime.count() << " micro-seconds." << std::endl;
//// std::cout << "Check convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Init time: " << initTime.count() << " micro-seconds." << std::endl;
// std::cout << "Decode time: " << decodeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Complete time: " << completeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Check Convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Update time: " << updateTime.count() << " micro-seconds." << std::endl;
//
//
// return code;
//}
//
///*
// Given a set of x errors and z errors, this will attempt to decode the errors
// and will return a success / failure code.
// See paper for algorithm:
// We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
// // array of probability estimates to send to each check node. there are _numEqsX variables, and _numVars check nodes
// /* std::vector<std::vector<float>> varNodeEstimatesX(_numEqsX, std::vector<float>(_numVars, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numEqsZ, std::vector<float>(_numVars, p));*/
//
// // each var node has a list of estimates from each check node.
// std::vector<std::vector<float>> varNodeEstimatesX(_numVars, std::vector<float>(_numEqsX, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numVars, std::vector<float>(_numEqsZ, p));
//
// // each check node has a list of beliefs for the value of each var node.
// std::vector<std::vector<float>> checkNodeBeliefsX(_numEqsX, std::vector<float>(_numVars));
// std::vector<std::vector<float>> checkNodeBeliefsZ(_numEqsZ, std::vector<float>(_numVars));
//
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if(!xConverge)
// {
// EqNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, syndromeX);
// VarNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, p, n == N - 1);
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(varNodeEstimatesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, syndromeZ);
// VarNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(varNodeEstimatesZ, high, low);
// }
// }
//
//
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(varNodeEstimatesX.size(), 0);
// std::vector<int> finalEstimatesZ(varNodeEstimatesZ.size(), 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto i = 0; i < varNodeEstimatesX.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesX[i].size(); ++j) {
// if (varNodeEstimatesX[i][j] != 0.0f) {
// if(varNodeEstimatesX[i][j] > high) finalEstimatesX[i] = 1;
// else if (varNodeEstimatesX[i][j] < low) finalEstimatesX[i] = 0;
// else {
// finalEstimatesX[i] = -1;
// code = code | CONVERGENCE_FAIL_X;
// }
// break;
// }
// }
// }
// for (auto i = 0; i < varNodeEstimatesZ.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesZ[i].size(); ++j) {
// if (varNodeEstimatesZ[i][j] != 0.0f) {
// if (varNodeEstimatesZ[i][j] > high) finalEstimatesZ[i] = 1;
// else if (varNodeEstimatesZ[i][j] < low) finalEstimatesZ[i] = 0;
// else {
// finalEstimatesZ[i] = -1;
// code = code | CONVERGENCE_FAIL_Z;
// }
// break;
// }
// }
// }
// // check syndrome errors
// if (code == SUCCESS) {
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
// }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU2(std::vector<int> xSyndrome, std::vector<int> zSyndrome, float errorProbability, std::vector<int>& xErrors, std::vector<int>& zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for(int eqIdx=0; eqIdx<_numEqsX; ++eqIdx)
// {
// for(int j=0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
// for (int i = 0; i < xSyndrome.size(); ++i) _syndromeX_h[i] = xSyndrome[i];
// for (int i = 0; i < zSyndrome.size(); ++i) _syndromeZ_h[i] = zSyndrome[i];
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if (!xConverge)
// {
// EqNodeUpdate(_eqNodesX,_varNodesX,_eqNodeVarIndicesX, _syndromeX_h);
// VarNodeUpdate(_eqNodesX, _varNodesX, _varNodeEqIndicesX ,p, n == N - 1);
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(_varNodesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(_eqNodesZ, _varNodesZ, _eqNodeVarIndicesZ, _syndromeZ_h);
// VarNodeUpdate(_eqNodesZ, _varNodesZ, _varNodeEqIndicesZ , p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(_varNodesZ, high, low);
// }
// }
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if(_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
//// WriteToFile(_varNodesX, "results/convXCPU.txt");
// }
// if (!CheckConvergence(_varNodesZ, high, low)) code = code | CONVERGENCE_FAIL_Z;
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(xSyndrome.begin(), xSyndrome.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(zSyndrome.begin(), zSyndrome.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(FloatArray2d_h &eqNodes, FloatArray2d_h varNodes, IntArray2d_h eqNodeVarIndices, IntArray1d_h syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodes.num_rows;
// int numVarsPerEq = eqNodeVarIndices.num_cols;
// int n = varNodes.num_rows;
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// int firstVarIdx = eqIdx*numVarsPerEq;
// // loop over variables to be updated for this check node
// for (auto i = 0; i < numVarsPerEq; ++i)
// {
// int index = firstVarIdx + i; // 1d array index to look up the variable index
// int varIdx = eqNodeVarIndices.values[index]; // variable index under investigation for this eq
// float product = 1.0f; // reset product
// // loop over all other variables in the equation, accumulate (1-2p) terms
// for (auto k = 0; k < numVarsPerEq; ++k)
// {
// if (k == i) continue; // skip the variable being updated
// int otherIndex = firstVarIdx + k; // 1d array index to look up the variable index
// int otherVarIdx = eqNodeVarIndices.values[otherIndex];
//
// // the index holding the estimate beinng used for this eq
// int varNodesIndex = otherVarIdx * numEqs + eqIdx;
// float value = varNodes.values[varNodesIndex]; // belief value for this variable and this eq
// product *= (1.0f - 2.0f*value);
// }
// int cnIdx = eqIdx * n + varIdx; // index for value within the check node array to update
// if (syndrome[eqIdx]) {
// eqNodes.values[cnIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// }
// else {
// eqNodes.values[cnIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
// }
// // WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// std::vector<int> syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
//
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over variables to be updated for this check node
// {
// eqNodeBeliefs[eqIdx][varIdx] = 0.0f; // not necessary, makes file output nicer.
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // if the parity check matrix is 0, the eq doesn't involve this var
// float product = 1.0f; // reset product
// for (auto otherVarIdx = 0; otherVarIdx < n; ++otherVarIdx) // loop over all other variables, accumulate (1-2p) terms
// {
// if (!parityCheckMatrix[eqIdx][otherVarIdx]) continue; // skip zeros
// if (otherVarIdx == varIdx) continue; // skip the variable being updated
// product *= (1.0f - 2.0f*varNodeEstimates[otherVarIdx][eqIdx]);
// }
// if(syndrome[eqIdx]) eqNodeBeliefs[eqIdx][varIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// else eqNodeBeliefs[eqIdx][varIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
//// WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(FloatArray2d_h eqNodes, FloatArray2d_h& varNodes, IntArray2d_h varNodeEqIndices, float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimate to var node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1 unless last)
// // where K = 1/[(1-pch)(1-p2)(1-p3)(1-p4)... + pch*p2*p3*p4...]
// int numEqs = eqNodes.num_rows;
// int n = varNodes.num_rows;
// int numEqsPerVar = varNodeEqIndices.num_cols;
//
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// int firstVarNode = varIdx * numEqs; // start of entries in VarNodes array for this variable
// int firstEqIndices = varIdx * numEqsPerVar; // starting point for first equation in the index list for this var.
// for (auto j = 0; j < numEqsPerVar; ++j) // loop over all equations for this variable
// {
// // find the index of the equation estimate being updated
// int index = firstEqIndices + j;
// int eqIdx = varNodeEqIndices.values[index];
//
// // 1d index for var nodes entry being updated
// int varNodesIdx = firstVarNode + eqIdx;
//
// // start with a priori channel error probability
// float prodP = errorProbability;
// float prodOneMinusP = 1.0f - errorProbability;
//
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto k = 0; k < numEqsPerVar; ++k)
// {
// int index2 = firstEqIndices + k; // 1d index for entry in the index array
// int otherEQIdx = varNodeEqIndices.values[index2];
//
// if (otherEQIdx == eqIdx && !last) continue;
// // 1d index for check nodes belief being used
// int checkNodesIdx = otherEQIdx * n + varIdx;
// float p = eqNodes.values[checkNodesIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
// varNodes.values[varNodesIdx] = value;
// }
// }
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimated probability to node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1)
// // where K = 1/[(1-p1)(1-p2)(1-p3)... + p1*p2*p3...]
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over all equations
// {
// varNodeEstimates[varIdx][eqIdx] = 0.0f; // not necessary, makes output nicer
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
//
// float prodP = errorProbability; // start with a priori channel error probability
// float prodOneMinusP = 1.0f - errorProbability;
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto otherEqIdx = 0; otherEqIdx < numEqs; ++otherEqIdx) // loop over all equation estimates
// {
// if (otherEqIdx == eqIdx && !last) continue; // skip the belief estimate from j to update the probability sent to j
// if (!parityCheckMatrix[otherEqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
// float p = eqNodeBeliefs[otherEqIdx][varIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
//// std::cout << "Setting var: " << i << " eq: " << j << " value: " << value << std::endl;
// varNodeEstimates[varIdx][eqIdx] = value;
// }
// }
//// WriteToFile(varNodeEstimates, "results/VariableNodeEstimates.txt");
//}
//
//std::vector<int> QC_LDPC_CSS::GetXSyndrome(std::vector<int> xErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHC_vec[row][col] * xErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//std::vector<int> QC_LDPC_CSS::GetZSyndrome(std::vector<int> zErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHD_vec[row][col] * zErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//void QC_LDPC_CSS::InitVarNodesArray(FloatArray2d_h& varNodes_h, FloatArray2d_d& varNodes_d, const IntArray2d_h& parityCheckMatrix, const int NUM_CONCURRENT_THREADS, float errorProbability)
//{
// int n = parityCheckMatrix.num_cols;
// int numEqs = parityCheckMatrix.num_rows;
// int size = n*numEqs;
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// for (int n = 0; n<NUM_CONCURRENT_THREADS; ++n)
// {
// if (!parityCheckMatrix.values[pcmIdx]) continue;
// int varNodesIdx = n*size + varIdx*numEqs + eqIdx;
// varNodes_h.values[varNodesIdx] = errorProbability;
// }
// }
// }
// thrust::copy(varNodes_h.values.begin(), varNodes_h.values.end(), varNodes_d.values.begin());
//}
//
//void QC_LDPC_CSS::SetDeviceSyndrome(const std::vector<int>& syndrome_h, const IntArray2d_d& syndrome_d)
//{
// for(int i=0; i<syndrome_h.size(); ++i)
// {
// syndrome_d.values[i] = syndrome_h[i];
// }
//}
//
//QC_LDPC_CSS::Statistics QC_LDPC_CSS::GetStatistics(int errorWeight, int numErrors, float errorProbability, int maxIterations)
//{
// //float p = 2 / 3 * errorProbability;
// //const int NUM_CONCURRENT_THREADS = 32;
// //std::vector<int> xErrors(_numVars, 0);
// //std::vector<int> zErrors(_numVars, 0);
//
// //// set up host and device memory for calculations
// //IntArray2d_h xSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsX);
// //IntArray2d_d xSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsX);
// //int* xSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //IntArray2d_h zSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsZ);
// //IntArray2d_d zSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsZ);
// //int* zSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //int size = _numVars * _numEqsX;
// //FloatArray2d_h varNodesX_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesX_d.values[i*size]);
//
// //FloatArray2d_h eqNodesX_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesX_d.values[i*size]);
//
// //size = _numVars * _numEqsZ;
// //FloatArray2d_h varNodesZ_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesZ_d.values[i*size]);
// //
// //FloatArray2d_h eqNodesZ_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesZ_d.values[i*size]);
//
// //for (int i = 0; i < numErrors; ++i) {
// // InitVarNodesArray(varNodesX_h, varNodesX_d, _pcmX_h, NUM_CONCURRENT_THREADS, p);
// // InitVarNodesArray(varNodesZ_h, varNodesZ_d, _pcmZ_h, NUM_CONCURRENT_THREADS, p);
// // for (int j = 0; j < NUM_CONCURRENT_THREADS; ++j) {
// // _errorGenerator.GenerateError(xErrors, zErrors, errorWeight);
// // SetDeviceSyndrome(GetXSyndrome(xErrors), xSyndromeArray_d);
// // SetDeviceSyndrome(GetZSyndrome(zErrors), zSyndromeArray_d);
// // }
// //}
// return Statistics();
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const std::vector<std::vector<float>>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.size(); ++i) {
// for (auto j = 0; j < estimates[i].size(); ++j) {
// if (estimates[i][j] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates[i][j] > low && estimates[i][j] < high) return false;
// }
// }
// }
// return true;
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const cusp::array2d<float,cusp::host_memory,cusp::row_major>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.num_rows; ++i) {
// for (auto j = 0; j < estimates.num_cols; ++j) {
// int index = i * estimates.num_cols + j;
// if (estimates.values[index] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates.values[index] > low && estimates.values[index] < high) return false;
// }
// }
// }
// return true;
//}
|
242085bd21c511df6fbc17ab555ae5865e8b652f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeaxpy.cu, normal z -> d, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_d_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_d_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_dcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_dmfree( Y, queue );
magma_dmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_dmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
|
242085bd21c511df6fbc17ab555ae5865e8b652f.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeaxpy.cu, normal z -> d, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_d_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_d_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
dgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_dcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_dmfree( Y, queue );
magma_dmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_dmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
|
8386a01560e518364554220c09b029a4506229d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max)
max = val;
}
}
}
z[zOffset] = max;
}
break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { //Exclude padding
uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
}
else if (extraParam0 == 1) //Include padding
sum /= kProd;
z[zOffset] = sum;
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0);
z[zOffset] = sum;
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
}
}
|
8386a01560e518364554220c09b029a4506229d4.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max)
max = val;
}
}
}
z[zOffset] = max;
}
break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { //Exclude padding
uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
}
else if (extraParam0 == 1) //Include padding
sum /= kProd;
z[zOffset] = sum;
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0);
z[zOffset] = sum;
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
}
}
|
e7ca97577293de77f35bfd0da9bf3518d51d456a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_checkop_kernel;
int xdim0_checkop_kernel_h = -1;
int ydim0_checkop_kernel_h = -1;
__constant__ int xdim1_checkop_kernel;
int xdim1_checkop_kernel_h = -1;
int ydim1_checkop_kernel_h = -1;
__constant__ int xdim2_checkop_kernel;
int xdim2_checkop_kernel_h = -1;
int ydim2_checkop_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
// user function
__device__
void
checkop_kernel_gpu(const double *rho_new, const double *x,
const double *rhoin, double *pre, double *post,
int *num) {
double diff;
diff = (rho_new[OPS_ACC0(0)] - rhoin[OPS_ACC2(0)]);
if (fabs(diff) < 0.01 && x[OPS_ACC1(0)] > -4.1) {
*post = *post + diff * diff;
*num = *num + 1;
} else
*pre = *pre + (rho_new[OPS_ACC0(0)] - rhol) * (rho_new[OPS_ACC0(0)] - rhol);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void
ops_checkop_kernel(const double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, int *__restrict arg5, int size0) {
double arg3_l[1];
double arg4_l[1];
int arg5_l[1];
for (int d = 0; d < 1; d++)
arg3_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg4_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg5_l[d] = ZERO_int;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
if (idx_x < size0) {
checkop_kernel_gpu(arg0, arg1, arg2, arg3_l, arg4_l, arg5_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg3[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg3_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg4[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg4_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg5[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg5_l[d]);
}
// host stub function
void ops_par_loop_checkop_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 14))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(14, "checkop_kernel");
OPS_kernels[14].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_checkop_kernel_h || xdim1 != xdim1_checkop_kernel_h ||
xdim2 != xdim2_checkop_kernel_h) {
hipMemcpyToSymbol(xdim0_checkop_kernel, &xdim0, sizeof(int));
xdim0_checkop_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_checkop_kernel, &xdim1, sizeof(int));
xdim1_checkop_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_checkop_kernel, &xdim2, sizeof(int));
xdim2_checkop_kernel_h = xdim2;
}
#ifdef OPS_MPI
double *arg3h =
(double *)(((ops_reduction)args[3].data)->data +
((ops_reduction)args[3].data)->size * block->index);
#else
double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
#endif
#ifdef OPS_MPI
double *arg4h =
(double *)(((ops_reduction)args[4].data)->data +
((ops_reduction)args[4].data)->size * block->index);
#else
double *arg4h = (double *)(((ops_reduction)args[4].data)->data);
#endif
#ifdef OPS_MPI
int *arg5h = (int *)(((ops_reduction)args[5].data)->data +
((ops_reduction)args[5].data)->size * block->index);
#else
int *arg5h = (int *)(((ops_reduction)args[5].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(int));
reduct_size = MAX(reduct_size, sizeof(int) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OPS_reduct_h + reduct_bytes;
arg3.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg3.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg4.data = OPS_reduct_h + reduct_bytes;
arg4.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg4.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg5.data = OPS_reduct_h + reduct_bytes;
arg5.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((int *)arg5.data)[d + b * 1] = ZERO_int;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(int));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[14].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(int) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_checkop_kernel), dim3(grid), dim3(tblock), nshared, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
(double *)arg3.data_d, (double *)arg4.data_d, (int *)arg5.data_d, x_size);
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1];
}
}
arg3.data = (char *)arg3h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d + b * 1];
}
}
arg4.data = (char *)arg4h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg5h[d] = arg5h[d] + ((int *)arg5.data)[d + b * 1];
}
}
arg5.data = (char *)arg5h;
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[14].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[14].mpi_time += t2 - t1;
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
e7ca97577293de77f35bfd0da9bf3518d51d456a.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_checkop_kernel;
int xdim0_checkop_kernel_h = -1;
int ydim0_checkop_kernel_h = -1;
__constant__ int xdim1_checkop_kernel;
int xdim1_checkop_kernel_h = -1;
int ydim1_checkop_kernel_h = -1;
__constant__ int xdim2_checkop_kernel;
int xdim2_checkop_kernel_h = -1;
int ydim2_checkop_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
// user function
__device__
void
checkop_kernel_gpu(const double *rho_new, const double *x,
const double *rhoin, double *pre, double *post,
int *num) {
double diff;
diff = (rho_new[OPS_ACC0(0)] - rhoin[OPS_ACC2(0)]);
if (fabs(diff) < 0.01 && x[OPS_ACC1(0)] > -4.1) {
*post = *post + diff * diff;
*num = *num + 1;
} else
*pre = *pre + (rho_new[OPS_ACC0(0)] - rhol) * (rho_new[OPS_ACC0(0)] - rhol);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void
ops_checkop_kernel(const double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, int *__restrict arg5, int size0) {
double arg3_l[1];
double arg4_l[1];
int arg5_l[1];
for (int d = 0; d < 1; d++)
arg3_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg4_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg5_l[d] = ZERO_int;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
if (idx_x < size0) {
checkop_kernel_gpu(arg0, arg1, arg2, arg3_l, arg4_l, arg5_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg3[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg3_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg4[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg4_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(
&arg5[d + (blockIdx.x + blockIdx.y * gridDim.x) * 1], arg5_l[d]);
}
// host stub function
void ops_par_loop_checkop_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 14))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(14, "checkop_kernel");
OPS_kernels[14].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_checkop_kernel_h || xdim1 != xdim1_checkop_kernel_h ||
xdim2 != xdim2_checkop_kernel_h) {
cudaMemcpyToSymbol(xdim0_checkop_kernel, &xdim0, sizeof(int));
xdim0_checkop_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_checkop_kernel, &xdim1, sizeof(int));
xdim1_checkop_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_checkop_kernel, &xdim2, sizeof(int));
xdim2_checkop_kernel_h = xdim2;
}
#ifdef OPS_MPI
double *arg3h =
(double *)(((ops_reduction)args[3].data)->data +
((ops_reduction)args[3].data)->size * block->index);
#else
double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
#endif
#ifdef OPS_MPI
double *arg4h =
(double *)(((ops_reduction)args[4].data)->data +
((ops_reduction)args[4].data)->size * block->index);
#else
double *arg4h = (double *)(((ops_reduction)args[4].data)->data);
#endif
#ifdef OPS_MPI
int *arg5h = (int *)(((ops_reduction)args[5].data)->data +
((ops_reduction)args[5].data)->size * block->index);
#else
int *arg5h = (int *)(((ops_reduction)args[5].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(int));
reduct_size = MAX(reduct_size, sizeof(int) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OPS_reduct_h + reduct_bytes;
arg3.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg3.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg4.data = OPS_reduct_h + reduct_bytes;
arg4.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg4.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg5.data = OPS_reduct_h + reduct_bytes;
arg5.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((int *)arg5.data)[d + b * 1] = ZERO_int;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(int));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[14].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(int) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
ops_checkop_kernel<<<grid, tblock, nshared>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
(double *)arg3.data_d, (double *)arg4.data_d, (int *)arg5.data_d, x_size);
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1];
}
}
arg3.data = (char *)arg3h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d + b * 1];
}
}
arg4.data = (char *)arg4h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg5h[d] = arg5h[d] + ((int *)arg5.data)[d + b * 1];
}
}
arg5.data = (char *)arg5h;
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[14].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[14].mpi_time += t2 - t1;
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
21a72f852cb7af9ae05e830f5e4e3e0bde8b2f3b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,NVPTX %s
// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AMDGCN %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// CHECK: @d_v = addrspace(1) externally_initialized global i32 0,
__shared__ int s_v;
// CHECK: @s_v = addrspace(3) global i32 undef,
__constant__ int c_v;
// CHECK: addrspace(4) externally_initialized global i32 0,
__device__ int d_v_i = 1;
// CHECK: @d_v_i = addrspace(1) externally_initialized global i32 1,
// trivial constructor -- allowed
__device__ T d_t;
// CHECK: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
__shared__ T s_t;
// CHECK: @s_t = addrspace(3) global %struct.T undef,
__constant__ T c_t;
// CHECK: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
__device__ T d_t_i = {2};
// CHECK: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
__constant__ T c_t_i = {2};
// CHECK: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// empty constructor
__device__ EC d_ec;
// CHECK: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
__shared__ EC s_ec;
// CHECK: @s_ec = addrspace(3) global %struct.EC undef,
__constant__ EC c_ec;
// CHECK: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// empty destructor
__device__ ED d_ed;
// CHECK: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
__shared__ ED s_ed;
// CHECK: @s_ed = addrspace(3) global %struct.ED undef,
__constant__ ED c_ed;
// CHECK: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
__device__ ECD d_ecd;
// CHECK: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
__shared__ ECD s_ecd;
// CHECK: @s_ecd = addrspace(3) global %struct.ECD undef,
__constant__ ECD c_ecd;
// CHECK: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// CHECK: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
__shared__ ETC s_etc;
// CHECK: @s_etc = addrspace(3) global %struct.ETC undef,
__constant__ ETC c_etc;
// CHECK: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
__device__ NCFS d_ncfs;
// CHECK: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
__constant__ NCFS c_ncfs;
// CHECK: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// CHECK: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
__shared__ T_B_T s_t_b_t;
// CHECK: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// CHECK: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// CHECK: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
__shared__ T_F_T s_t_f_t;
// CHECK: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// CHECK: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// CHECK: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
__shared__ T_FA_T s_t_fa_t;
// CHECK: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// CHECK: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// CHECK: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
__shared__ EC_I_EC s_ec_i_ec;
// CHECK: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// CHECK: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// We should not emit global initializers for device-side variables.
// CHECK-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
T t;
// CHECK-NOT: call
EC ec;
// CHECK: call void @_ZN2ECC1Ev(%struct.EC* %ec)
ED ed;
// CHECK-NOT: call
ECD ecd;
// CHECK: call void @_ZN3ECDC1Ev(%struct.ECD* %ecd)
ETC etc;
// CHECK: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* %etc)
UC uc;
// undefined constructor -- not allowed
// CHECK: call void @_ZN2UCC1Ev(%struct.UC* %uc)
UD ud;
// undefined destructor -- not allowed
// CHECK-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// CHECK: call void @_ZN3ECIC1Ev(%struct.ECI* %eci)
NEC nec;
// non-empty constructor -- not allowed
// CHECK: call void @_ZN3NECC1Ev(%struct.NEC* %nec)
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// CHECK: call void @_ZN3NCVC1Ev(%struct.NCV* %ncv)
NCV ncv;
// CHECK-NOT: call
VD vd;
// CHECK: call void @_ZN2VDC1Ev(%struct.VD* %vd)
NCF ncf;
// CHECK: call void @_ZN3NCFC1Ev(%struct.NCF* %ncf)
NCFS ncfs;
// CHECK: call void @_ZN4NCFSC1Ev(%struct.NCFS* %ncfs)
UTC utc;
// CHECK: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* %utc)
NETC netc;
// CHECK: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* %netc)
T_B_T t_b_t;
// CHECK-NOT: call
T_F_T t_f_t;
// CHECK-NOT: call
T_FA_T t_fa_t;
// CHECK-NOT: call
EC_I_EC ec_i_ec;
// CHECK: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* %ec_i_ec)
EC_I_EC1 ec_i_ec1;
// CHECK: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* %ec_i_ec1)
T_V_T t_v_t;
// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t)
T_B_NEC t_b_nec;
// CHECK: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* %t_b_nec)
T_F_NEC t_f_nec;
// CHECK: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* %t_f_nec)
T_FA_NEC t_fa_nec;
// CHECK: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* %t_fa_nec)
T_B_NED t_b_ned;
// CHECK-NOT: call
T_F_NED t_f_ned;
// CHECK-NOT: call
T_FA_NED t_fa_ned;
// CHECK-NOT: call
static __shared__ EC s_ec;
// CHECK-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// CHECK-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
// anchor point separating constructors and destructors
df(); // CHECK: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// CHECK-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* %t_fa_ned)
// CHECK-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* %t_f_ned)
// CHECK-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* %t_b_ned)
// CHECK-NEXT: call void @_ZN2VDD1Ev(%struct.VD* %vd)
// CHECK-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* %ned)
// CHECK-NEXT: call void @_ZN2UDD1Ev(%struct.UD* %ud)
// CHECK-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* %ecd)
// CHECK-NEXT: call void @_ZN2EDD1Ev(%struct.ED* %ed)
// CHECK-NEXT: ret void
}
// We should not emit global init function.
// CHECK-NOT: @_GLOBAL__sub_I
|
21a72f852cb7af9ae05e830f5e4e3e0bde8b2f3b.cu
|
// REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,NVPTX %s
// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AMDGCN %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// CHECK: @d_v = addrspace(1) externally_initialized global i32 0,
__shared__ int s_v;
// CHECK: @s_v = addrspace(3) global i32 undef,
__constant__ int c_v;
// CHECK: addrspace(4) externally_initialized global i32 0,
__device__ int d_v_i = 1;
// CHECK: @d_v_i = addrspace(1) externally_initialized global i32 1,
// trivial constructor -- allowed
__device__ T d_t;
// CHECK: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
__shared__ T s_t;
// CHECK: @s_t = addrspace(3) global %struct.T undef,
__constant__ T c_t;
// CHECK: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
__device__ T d_t_i = {2};
// CHECK: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
__constant__ T c_t_i = {2};
// CHECK: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// empty constructor
__device__ EC d_ec;
// CHECK: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
__shared__ EC s_ec;
// CHECK: @s_ec = addrspace(3) global %struct.EC undef,
__constant__ EC c_ec;
// CHECK: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// empty destructor
__device__ ED d_ed;
// CHECK: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
__shared__ ED s_ed;
// CHECK: @s_ed = addrspace(3) global %struct.ED undef,
__constant__ ED c_ed;
// CHECK: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
__device__ ECD d_ecd;
// CHECK: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
__shared__ ECD s_ecd;
// CHECK: @s_ecd = addrspace(3) global %struct.ECD undef,
__constant__ ECD c_ecd;
// CHECK: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// CHECK: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
__shared__ ETC s_etc;
// CHECK: @s_etc = addrspace(3) global %struct.ETC undef,
__constant__ ETC c_etc;
// CHECK: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
__device__ NCFS d_ncfs;
// CHECK: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
__constant__ NCFS c_ncfs;
// CHECK: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// CHECK: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
__shared__ T_B_T s_t_b_t;
// CHECK: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// CHECK: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// CHECK: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
__shared__ T_F_T s_t_f_t;
// CHECK: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// CHECK: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// CHECK: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
__shared__ T_FA_T s_t_fa_t;
// CHECK: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// CHECK: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// CHECK: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
__shared__ EC_I_EC s_ec_i_ec;
// CHECK: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// CHECK: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// We should not emit global initializers for device-side variables.
// CHECK-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
T t;
// CHECK-NOT: call
EC ec;
// CHECK: call void @_ZN2ECC1Ev(%struct.EC* %ec)
ED ed;
// CHECK-NOT: call
ECD ecd;
// CHECK: call void @_ZN3ECDC1Ev(%struct.ECD* %ecd)
ETC etc;
// CHECK: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* %etc)
UC uc;
// undefined constructor -- not allowed
// CHECK: call void @_ZN2UCC1Ev(%struct.UC* %uc)
UD ud;
// undefined destructor -- not allowed
// CHECK-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// CHECK: call void @_ZN3ECIC1Ev(%struct.ECI* %eci)
NEC nec;
// non-empty constructor -- not allowed
// CHECK: call void @_ZN3NECC1Ev(%struct.NEC* %nec)
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// CHECK: call void @_ZN3NCVC1Ev(%struct.NCV* %ncv)
NCV ncv;
// CHECK-NOT: call
VD vd;
// CHECK: call void @_ZN2VDC1Ev(%struct.VD* %vd)
NCF ncf;
// CHECK: call void @_ZN3NCFC1Ev(%struct.NCF* %ncf)
NCFS ncfs;
// CHECK: call void @_ZN4NCFSC1Ev(%struct.NCFS* %ncfs)
UTC utc;
// CHECK: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* %utc)
NETC netc;
// CHECK: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* %netc)
T_B_T t_b_t;
// CHECK-NOT: call
T_F_T t_f_t;
// CHECK-NOT: call
T_FA_T t_fa_t;
// CHECK-NOT: call
EC_I_EC ec_i_ec;
// CHECK: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* %ec_i_ec)
EC_I_EC1 ec_i_ec1;
// CHECK: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* %ec_i_ec1)
T_V_T t_v_t;
// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t)
T_B_NEC t_b_nec;
// CHECK: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* %t_b_nec)
T_F_NEC t_f_nec;
// CHECK: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* %t_f_nec)
T_FA_NEC t_fa_nec;
// CHECK: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* %t_fa_nec)
T_B_NED t_b_ned;
// CHECK-NOT: call
T_F_NED t_f_ned;
// CHECK-NOT: call
T_FA_NED t_fa_ned;
// CHECK-NOT: call
static __shared__ EC s_ec;
// CHECK-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// CHECK-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
// anchor point separating constructors and destructors
df(); // CHECK: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// CHECK-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* %t_fa_ned)
// CHECK-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* %t_f_ned)
// CHECK-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* %t_b_ned)
// CHECK-NEXT: call void @_ZN2VDD1Ev(%struct.VD* %vd)
// CHECK-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* %ned)
// CHECK-NEXT: call void @_ZN2UDD1Ev(%struct.UD* %ud)
// CHECK-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* %ecd)
// CHECK-NEXT: call void @_ZN2EDD1Ev(%struct.ED* %ed)
// CHECK-NEXT: ret void
}
// We should not emit global init function.
// CHECK-NOT: @_GLOBAL__sub_I
|
2e30317d4fe8e3c6182b43aca8218a4c809e1974.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// This functor is used to record the substring positions for each
// extract column. Then, only substr is needed for the result.
template<size_t stack_size>
struct extract_sizer_fn
{
dreprog* prog;
custring_view_array d_strings;
int col;
int* d_begins;
int* d_ends;
size_t* d_lengths;
__device__ void operator()(unsigned int idx)
{
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
custring_view* dstr = d_strings[idx];
d_begins[idx] = -1;
d_ends[idx] = -1;
if( !dstr )
return;
int begin=0, end=dstr->chars_count();
int result = prog->find(idx,dstr,begin,end);
if( result > 0 )
result = prog->extract(idx,dstr,begin,end,col);
if( result > 0 )
{
d_begins[idx] = begin;
d_ends[idx] = end;
unsigned int size = dstr->substr_size(begin,end-begin);
d_lengths[idx] = (size_t)ALIGN_SIZE(size);
}
}
};
// column-major version of extract() method above
int NVStrings::extract( const char* pattern, std::vector<NVStrings*>& results)
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::extract: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
//
int groups = prog->group_counts();
if( groups==0 )
{
dreprog::destroy(prog);
return 0;
}
//
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> begins(count,0);
int* d_begins = begins.data().get();
rmm::device_vector<int> ends(count,0);
int* d_ends = ends.data().get();
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
// build strings vector for each group (column)
for( int col=0; col < groups; ++col )
{
// first, build two vectors of (begin,end) position values;
// also get the lengths of the substrings
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_SMALL>{prog, d_strings, col, d_begins, d_ends, d_lengths});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_MEDIUM>{prog, d_strings, col, d_begins, d_ends, d_lengths});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_LARGE>{prog, d_strings, col, d_begins, d_ends, d_lengths});
// create list of strings for this group
NVStrings* column = new NVStrings(count);
results.push_back(column); // append here so continue statement will work
char* d_buffer = column->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
continue;
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// copy the substrings into the new object
custring_view_array d_results = column->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_begins, d_ends, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int start = d_begins[idx];
int stop = d_ends[idx];
if( stop > start )
d_results[idx] = dstr->substr((unsigned)start,(unsigned)(stop-start),1,d_buffer+d_offsets[idx]);
});
// column already added to results above
}
dreprog::destroy(prog);
return groups;
}
|
2e30317d4fe8e3c6182b43aca8218a4c809e1974.cu
|
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// This functor is used to record the substring positions for each
// extract column. Then, only substr is needed for the result.
template<size_t stack_size>
struct extract_sizer_fn
{
dreprog* prog;
custring_view_array d_strings;
int col;
int* d_begins;
int* d_ends;
size_t* d_lengths;
__device__ void operator()(unsigned int idx)
{
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
custring_view* dstr = d_strings[idx];
d_begins[idx] = -1;
d_ends[idx] = -1;
if( !dstr )
return;
int begin=0, end=dstr->chars_count();
int result = prog->find(idx,dstr,begin,end);
if( result > 0 )
result = prog->extract(idx,dstr,begin,end,col);
if( result > 0 )
{
d_begins[idx] = begin;
d_ends[idx] = end;
unsigned int size = dstr->substr_size(begin,end-begin);
d_lengths[idx] = (size_t)ALIGN_SIZE(size);
}
}
};
// column-major version of extract() method above
int NVStrings::extract( const char* pattern, std::vector<NVStrings*>& results)
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::extract: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
//
int groups = prog->group_counts();
if( groups==0 )
{
dreprog::destroy(prog);
return 0;
}
//
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> begins(count,0);
int* d_begins = begins.data().get();
rmm::device_vector<int> ends(count,0);
int* d_ends = ends.data().get();
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
// build strings vector for each group (column)
for( int col=0; col < groups; ++col )
{
// first, build two vectors of (begin,end) position values;
// also get the lengths of the substrings
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_SMALL>{prog, d_strings, col, d_begins, d_ends, d_lengths});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_MEDIUM>{prog, d_strings, col, d_begins, d_ends, d_lengths});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
extract_sizer_fn<RX_STACK_LARGE>{prog, d_strings, col, d_begins, d_ends, d_lengths});
// create list of strings for this group
NVStrings* column = new NVStrings(count);
results.push_back(column); // append here so continue statement will work
char* d_buffer = column->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
continue;
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// copy the substrings into the new object
custring_view_array d_results = column->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_begins, d_ends, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int start = d_begins[idx];
int stop = d_ends[idx];
if( stop > start )
d_results[idx] = dstr->substr((unsigned)start,(unsigned)(stop-start),1,d_buffer+d_offsets[idx]);
});
// column already added to results above
}
dreprog::destroy(prog);
return groups;
}
|
b8a3469509564f88ee5f00ed3d95b4062534a4b3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// #include <cupti.h>
#include <math_constants.h>
#include "lcutil.h"
#include <hip/hip_runtime_api.h>
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
#define COMP_ITERATIONS (512) //512
#define THREADS (1024)
#define BLOCKS (32760)
#define REGBLOCK_SIZE (4)
#define UNROLL_ITERATIONS (32)
#define deviceNum (0)
template <class T> __global__ void benchmark(){
__shared__ T shared[THREADS];
T r0 = shared[threadIdx.x],
r1 = r0,
r2 = r0,
r3 = r0;
#pragma unroll 32
for(int i=0; i<UNROLL_ITERATIONS; i++){
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
}
shared[threadIdx.x] = r0;
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(hipEvent_t *start, hipEvent_t *stop){
CUDA_SAFE_CALL( hipEventCreate(start) );
CUDA_SAFE_CALL( hipEventCreate(stop) );
CUDA_SAFE_CALL( hipEventRecord(*start, 0) );
}
float finalizeEvents(hipEvent_t start, hipEvent_t stop){
CUDA_SAFE_CALL( hipGetLastError() );
CUDA_SAFE_CALL( hipEventRecord(stop, 0) );
CUDA_SAFE_CALL( hipEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( hipEventDestroy(start) );
CUDA_SAFE_CALL( hipEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* flops){
const long long computations = 2*(long long)(COMP_ITERATIONS)*REGBLOCK_SIZE*THREADS*BLOCKS;
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
hipEvent_t start, stop;
initializeEvents(&start, &stop);
hipLaunchKernelGGL(( benchmark<double>), dim3(dimGrid), dim3(dimBlock) , 0, 0, );
hipDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result = ((double)computations)/(double)time*1000./(double)(1000*1000*1000);
*kernel_time = time;
*flops=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
// hipCtx_t context = 0;
hipDevice_t device = 0;
int deviceCount;
// cupti_eventData cuptiEvent;
// RuntimeApiTrace_t trace;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
int ntries;
if (argc>1){
ntries = atoi(argv[1]);
}else{
ntries = 1;
}
hipSetDevice(deviceNum);
double time[ntries][2],value[ntries][4];
(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
(hipDeviceGet(&device, deviceNum));
// CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device));
int measure;
(hipDeviceGetAttribute(&measure,hipDeviceAttributeMaxThreadsPerBlock,device));
printf("Max Threads per block = %d\n", measure);
// DRIVER_API_CALL(hipCtxCreate(&context, 0, device));
int i;
for (i=0;i<ntries;i++){
runbench(0,&time[0][0],&value[0][0]);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( hipDeviceReset());
return 0;
}
|
b8a3469509564f88ee5f00ed3d95b4062534a4b3.cu
|
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// #include <cupti.h>
#include <math_constants.h>
#include "lcutil.h"
#include <cuda_profiler_api.h>
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
#define COMP_ITERATIONS (512) //512
#define THREADS (1024)
#define BLOCKS (32760)
#define REGBLOCK_SIZE (4)
#define UNROLL_ITERATIONS (32)
#define deviceNum (0)
template <class T> __global__ void benchmark(){
__shared__ T shared[THREADS];
T r0 = shared[threadIdx.x],
r1 = r0,
r2 = r0,
r3 = r0;
#pragma unroll 32
for(int i=0; i<UNROLL_ITERATIONS; i++){
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
}
shared[threadIdx.x] = r0;
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* flops){
const long long computations = 2*(long long)(COMP_ITERATIONS)*REGBLOCK_SIZE*THREADS*BLOCKS;
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
cudaEvent_t start, stop;
initializeEvents(&start, &stop);
benchmark<double><<< dimGrid, dimBlock >>>();
cudaDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result = ((double)computations)/(double)time*1000./(double)(1000*1000*1000);
*kernel_time = time;
*flops=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
// CUcontext context = 0;
CUdevice device = 0;
int deviceCount;
// cupti_eventData cuptiEvent;
// RuntimeApiTrace_t trace;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
int ntries;
if (argc>1){
ntries = atoi(argv[1]);
}else{
ntries = 1;
}
cudaSetDevice(deviceNum);
double time[ntries][2],value[ntries][4];
(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
(cuDeviceGet(&device, deviceNum));
// CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device));
int measure;
(cuDeviceGetAttribute(&measure,CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK,device));
printf("Max Threads per block = %d\n", measure);
// DRIVER_API_CALL(cuCtxCreate(&context, 0, device));
int i;
for (i=0;i<ntries;i++){
runbench(0,&time[0][0],&value[0][0]);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( cudaDeviceReset());
return 0;
}
|
b6d29270f3aa7fe58ce37597fa3b338a28b53b22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define NUM_BINS 4096
#define BLOCK_SIZE 512
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histogram(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) {
//@@ Write the kernel that computes the histogram
//@@ Make sure to use the privitization technique
__shared__ unsigned int private_histo[NUM_BINS];
for (unsigned int binIdx = threadIdx.x; binIdx < NUM_BINS; binIdx += BLOCK_SIZE) { //reference to line 3 from figure 9.10 from pg 211 of the textbook.
private_histo[binIdx] = 0;
}
__syncthreads();
int tid = threadIdx.x + blockIdx.x * blockDim.x; //reference PPT12 Slides 45-47
int stride = blockDim.x * gridDim.x;
while(tid < num_elements) {
int numberValue = input[tid];
if (numberValue >= 0 && numberValue < num_bins) {
atomicAdd(&(private_histo[numberValue]), 1);
}
tid += stride;
}
__syncthreads();
for (unsigned int binIdx = threadIdx.x; binIdx < NUM_BINS; binIdx += BLOCK_SIZE) { //reference to line 9 from figure 9.10 from pg 211 of the textbook.
atomicAdd(&(bins[binIdx]), private_histo[binIdx]);
}
}
__global__ void saturate(unsigned int *bins, unsigned int num_bins) {
//@@ Write the kernel that applies saturtion to counters (i.e., if the bin value is more than 127, make it equal to 127)
for (int i = 0; i < num_bins; i++) { //simple function for 127 value cap.
if (bins[i] > 127) {
bins[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
int numBlocks;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
numBlocks = (float(inputLength - 1)) / BLOCK_SIZE + 1;
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating device memory");
//@@ Allocate device memory here
CUDA_CHECK(hipDeviceSynchronize());
hipMalloc((void**)&deviceInput, inputLength * sizeof(float));
hipMalloc((void**)&deviceBins, NUM_BINS * sizeof(float));
wbTime_stop(GPU, "Allocating device memory");
wbTime_start(GPU, "Copying input host memory to device");
//@@ Copy input host memory to device
CUDA_CHECK(hipDeviceSynchronize());
hipMemcpy(deviceInput, hostInput, inputLength * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input host memory to device");
wbTime_start(GPU, "Clearing the bins on device");
//@@ zero out the deviceBins using hipMemset()
hipMemset(deviceBins, 0, NUM_BINS * sizeof(float));
wbTime_stop(GPU, "Clearing the bins on device");
//@@ Initialize the grid and block dimensions here
dim3 GridDim(numBlocks, 1, 1);
dim3 BlockDim(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Invoke kernels: first call histogram kernel and then call saturate kernel
hipLaunchKernelGGL(( histogram) , dim3(GridDim), dim3(BlockDim) , 0, 0, deviceInput, deviceBins, inputLength, NUM_BINS);
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( saturate) , dim3(GridDim), dim3(BlockDim) , 0, 0, deviceBins, NUM_BINS);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output device memory to host");
//@@ Copy output device memory to host
CUDA_CHECK(hipDeviceSynchronize());
hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output device memory to host");
wbTime_start(GPU, "Freeing device memory");
//@@ Free the device memory here
hipFree(deviceInput);
hipFree(deviceBins);
wbTime_stop(GPU, "Freeing device memory");
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
}
|
b6d29270f3aa7fe58ce37597fa3b338a28b53b22.cu
|
#include <wb.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define NUM_BINS 4096
#define BLOCK_SIZE 512
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histogram(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) {
//@@ Write the kernel that computes the histogram
//@@ Make sure to use the privitization technique
__shared__ unsigned int private_histo[NUM_BINS];
for (unsigned int binIdx = threadIdx.x; binIdx < NUM_BINS; binIdx += BLOCK_SIZE) { //reference to line 3 from figure 9.10 from pg 211 of the textbook.
private_histo[binIdx] = 0;
}
__syncthreads();
int tid = threadIdx.x + blockIdx.x * blockDim.x; //reference PPT12 Slides 45-47
int stride = blockDim.x * gridDim.x;
while(tid < num_elements) {
int numberValue = input[tid];
if (numberValue >= 0 && numberValue < num_bins) {
atomicAdd(&(private_histo[numberValue]), 1);
}
tid += stride;
}
__syncthreads();
for (unsigned int binIdx = threadIdx.x; binIdx < NUM_BINS; binIdx += BLOCK_SIZE) { //reference to line 9 from figure 9.10 from pg 211 of the textbook.
atomicAdd(&(bins[binIdx]), private_histo[binIdx]);
}
}
__global__ void saturate(unsigned int *bins, unsigned int num_bins) {
//@@ Write the kernel that applies saturtion to counters (i.e., if the bin value is more than 127, make it equal to 127)
for (int i = 0; i < num_bins; i++) { //simple function for 127 value cap.
if (bins[i] > 127) {
bins[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
int numBlocks;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
numBlocks = (float(inputLength - 1)) / BLOCK_SIZE + 1;
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating device memory");
//@@ Allocate device memory here
CUDA_CHECK(cudaDeviceSynchronize());
cudaMalloc((void**)&deviceInput, inputLength * sizeof(float));
cudaMalloc((void**)&deviceBins, NUM_BINS * sizeof(float));
wbTime_stop(GPU, "Allocating device memory");
wbTime_start(GPU, "Copying input host memory to device");
//@@ Copy input host memory to device
CUDA_CHECK(cudaDeviceSynchronize());
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input host memory to device");
wbTime_start(GPU, "Clearing the bins on device");
//@@ zero out the deviceBins using cudaMemset()
cudaMemset(deviceBins, 0, NUM_BINS * sizeof(float));
wbTime_stop(GPU, "Clearing the bins on device");
//@@ Initialize the grid and block dimensions here
dim3 GridDim(numBlocks, 1, 1);
dim3 BlockDim(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Invoke kernels: first call histogram kernel and then call saturate kernel
histogram <<< GridDim, BlockDim >>> (deviceInput, deviceBins, inputLength, NUM_BINS);
CUDA_CHECK(cudaDeviceSynchronize());
saturate <<< GridDim, BlockDim >>> (deviceBins, NUM_BINS);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output device memory to host");
//@@ Copy output device memory to host
CUDA_CHECK(cudaDeviceSynchronize());
cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output device memory to host");
wbTime_start(GPU, "Freeing device memory");
//@@ Free the device memory here
cudaFree(deviceInput);
cudaFree(deviceBins);
wbTime_stop(GPU, "Freeing device memory");
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
}
|
00d77b122dfc2d9ac636b2c12db986875cae4f61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragmaompatomic
dist[end] = dist[source] + weight;
// #pragmaompatomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
return dist;
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
uint* sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// TO-DO PARALLEL
hipLaunchKernelGGL(( sssp_GPU_Kernel), dim3(numBlock), dim3(numThreadsPerBlock) , 0, 0, numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
} while(!finished);
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipMemcpy(dist, d_dist, numNodes * sizeof(uint), hipMemcpyDeviceToHost));
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
return dist;
}
__global__ void sssp_Hybrid_GPU_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
}
}
}
void sssp_Hybrid_CPU(int threadId,
int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > splitIndex) return;
if (end > splitIndex) {
end = splitIndex;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
void sssp_Hybrid_MergeDist(int threadId,
int numNodes,
int numNodesPerThread,
uint *dist,
uint *dist_copy) {
int start = threadId * numNodesPerThread;
int end = (threadId + 1) * numNodesPerThread;
if (start > numNodes) return;
if (end > numNodes) {
end = numNodes;
}
for (int i = start; i < end; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
uint* sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.3;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_host_to_device;
Timer timer_device_to_host;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
timer_gpu.start();
timer_cpu.start();
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == h_numThreads - 1 && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// timer_host_to_device.start();
gpuErrorcheck(hipMemcpy(d_dist, dist, sizeof(uint) * numNodes, hipMemcpyHostToDevice));
// timer_host_to_device.stop();
hipLaunchKernelGGL(( sssp_Hybrid_GPU_Kernel), dim3(d_numBlock), dim3(d_numThreadsPerBlock), 0, 0, splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
// timer_device_to_host.start();
gpuErrorcheck(hipMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, hipMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
sssp_Hybrid_CPU(threadId,
splitIndex,
numEdges,
h_numEdgesPerThread,
dist,
preNode,
edgesSource,
edgesEnd,
edgesWeight,
&finished);
timer_cpu.stop();
}
}
finished = finished && h_finished;
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
int h_numNodesPerThread = (numNodes) / (h_numThreads) + 1;
if (!finished) {
// Merge
sssp_Hybrid_MergeDist(threadId,
numNodes,
h_numNodesPerThread,
dist,
dist_copy);
}
}
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1.1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else if (factor < 0.9) {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime() > 0 ? timer_cpu.elapsedTime() : 0;
loopInfo.time_gpu = timer_gpu.elapsedTime() > 0 ? timer_gpu.elapsedTime() : 0;
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
printLoopInfo(infos);
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
return dist;
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
}
timer_total.stop();
printf("Total execution time: %f ms\n", timer_total.elapsedTime());
printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
}
|
00d77b122dfc2d9ac636b2c12db986875cae4f61.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragma omp atomic
dist[end] = dist[source] + weight;
// #pragma omp atomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
return dist;
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
uint* sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// TO-DO PARALLEL
sssp_GPU_Kernel<<< numBlock, numThreadsPerBlock >>> (numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
} while(!finished);
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaMemcpy(dist, d_dist, numNodes * sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
return dist;
}
__global__ void sssp_Hybrid_GPU_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
}
}
}
void sssp_Hybrid_CPU(int threadId,
int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > splitIndex) return;
if (end > splitIndex) {
end = splitIndex;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
void sssp_Hybrid_MergeDist(int threadId,
int numNodes,
int numNodesPerThread,
uint *dist,
uint *dist_copy) {
int start = threadId * numNodesPerThread;
int end = (threadId + 1) * numNodesPerThread;
if (start > numNodes) return;
if (end > numNodes) {
end = numNodes;
}
for (int i = start; i < end; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
uint* sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.3;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_host_to_device;
Timer timer_device_to_host;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
timer_gpu.start();
timer_cpu.start();
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == h_numThreads - 1 && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// timer_host_to_device.start();
gpuErrorcheck(cudaMemcpy(d_dist, dist, sizeof(uint) * numNodes, cudaMemcpyHostToDevice));
// timer_host_to_device.stop();
sssp_Hybrid_GPU_Kernel<<< d_numBlock, d_numThreadsPerBlock>>> (splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
// timer_device_to_host.start();
gpuErrorcheck(cudaMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, cudaMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
sssp_Hybrid_CPU(threadId,
splitIndex,
numEdges,
h_numEdgesPerThread,
dist,
preNode,
edgesSource,
edgesEnd,
edgesWeight,
&finished);
timer_cpu.stop();
}
}
finished = finished && h_finished;
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
int h_numNodesPerThread = (numNodes) / (h_numThreads) + 1;
if (!finished) {
// Merge
sssp_Hybrid_MergeDist(threadId,
numNodes,
h_numNodesPerThread,
dist,
dist_copy);
}
}
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1.1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else if (factor < 0.9) {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime() > 0 ? timer_cpu.elapsedTime() : 0;
loopInfo.time_gpu = timer_gpu.elapsedTime() > 0 ? timer_gpu.elapsedTime() : 0;
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
printLoopInfo(infos);
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
return dist;
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
}
timer_total.stop();
printf("Total execution time: %f ms\n", timer_total.elapsedTime());
printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
}
|
8d2493d00fea27d74a45a26013aa511030add18e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define N 4 //for sequential only
#define MAX_NUMBER_THREADS 1024
hipError_t drumWithCuda(int userN, int userNumOfBlocks, int userNumOfThreadsPerBlock, int userNumOfIterations);
__global__ void drumKernelinner(double *u, double *u1, double *u2, int threadsPerBlock, int numOfElementsPerBlock, int userN)
{
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
double p = 0.5;
double nConst = 0.0002;
if ((j % userN != 0) && (j % userN != userN - 1)) {
if (j > userN && j < ((userN * userN) - userN)) {
u[j] = (p * (u1[j - userN] + u1[j + userN] + u1[j - 1] + u1[j + 1]
- 4 * u1[j]) + (2 * u1[j]) - ((1 - nConst) * u2[j])) / (1 + nConst);
}
}
}
}
__global__ void drumKernelsides(double* u, double* u1, double* u2, int threadsPerBlock, int numOfElementsPerBlock, int userN)
{
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
double G = 0.75;
if (j < userN) {
if (j % userN != 0 && j % userN != userN - 1) {
u[j] = G * u[userN + j];
}
}
if (j % userN == 0) {
if ((!(j < userN)) && (j < ((userN * userN) - userN))) {
u[j] = G * u[j + 1];
}
}
if (j % userN == userN - 1) {
if ((!(j < userN)) && (j < ((userN * userN) - userN))) {
u[j] = G * u[j - 1];
}
}
if (j > ((userN * userN) - userN)) {
if (j % userN != 0 && j % userN != userN - 1) {
u[j] = G * u[j - userN];
}
}
}
}
__global__ void copyCurrToPrev(double* u, double* u1, double* u2, int threadsPerBlock, int numOfElementsPerBlock) {
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
u2[j] = u1[j];
u1[j] = u[j];
}
}
int main(int argc, char* argv[])
{
char* SeqOrPar = nullptr;
int userN = 0;
int userNumOfBlocks = 0;
int userNumOfThreadsPerBlock = 0;
int userNumOfIterations = 0;
if (argc < 3 || argv[1] == NULL || argv[2] == NULL ||
argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") {
printf("Lab3.exe <Sequential> <Number of Iterations> <Drum size is 4x4>\n"
"OR \n <Parallel> <Number of Iterations> + Optional in this order: <Number of Blocks> <Number of Threads Per Block> <Drum Size (N)>\n"
"E.x: Lab3.exe Parallel 12 64 64 512\n");
return 0;
}
else {
if (argv[1] != NULL) {
SeqOrPar = argv[1];
}
if (argv[2] != NULL) {
userNumOfIterations = atoi(argv[2]);
}
if (argv[3] != NULL) {
userNumOfBlocks = atoi(argv[3]);
if (argv[4] != NULL) {
userNumOfThreadsPerBlock = atoi(argv[4]);
if (argv[5] != NULL) {
userN = atoi(argv[5]);
}
}
}
}
if (!strcmp(SeqOrPar, "Sequential")) {
double const p = 0.5;
double const nConst = 0.0002;
double G = 0.75;
double u[N * N] = { 0.0 };
double u1[N * N] = { 0.0 };
double u2[N * N] = { 0.0 };
u1[(N*(N/2)) + N/2] = 1.0;
for (int z = 0; z < userNumOfIterations; z++) {
for (int i = 1; i <= N-2; i++) {
for (int j = 1; j <= N-2; j++) {
u[(i*N) + j] = (p * (u1[((i-1) * N) + j] + u1[((i+1) * N) + j] + u1[(i * N) + (j-1)] + u1[(i * N) + (j + 1)] - 4 * u1[(i*N) + j]) +
2 * u1[(i*N)+j] - (1 - nConst) * u2[(i * N) + j]) / (1 + nConst);
}
}
for (int i = 1; i <= N-2; i++) {
u[i] = G * u[N+i];
u[(N*(N - 1))+i] = G * u[(N * (N - 2)) + i];
u[i*N] = G * u[(i*N) + 1];
u[(i*N) + (N - 1)] = G * u[(i * N) + (N - 2)];
}
u[0] = G * u[N];
u[(N - 1)*N] = G * u[(N - 1) * N];
u[N - 1] = G * u[N - 2];
u[N*(N - 1) + (N - 1)] = G * u[N * (N - 1) + (N - 2)];
memcpy(&u2, &u1, (N * N) * sizeof(double));
memcpy(&u1, &u, (N * N) * sizeof(double));
printf("u[2,2] at iteration %d: %f \n", z+1, u[(N * (N / 2)) + N / 2]);
}
}
else {
drumWithCuda(userN, userNumOfBlocks, userNumOfThreadsPerBlock, userNumOfIterations);
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t drumWithCuda(int userN, int userNumOfBlocks, int userNumOfThreadsPerBlock, int userNumOfIterations)
{
hipError_t cudaStatus;
clock_t start_t, end_t;
double *u, *u1, *u2;
double G = 0.75;
if (userN == NULL) {
userN = 512;
printf("Drum size assumed to be 512x512 \n");
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//Mallocing u, u1, u2
cudaStatus = hipMallocManaged((void**)& u, (userN * userN) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc of u failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& u1, (userN * userN) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc of u1 failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& u2, (userN * userN) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc of u2 failed!");
goto Error;
}
//initializing to 0
memset(&u[0], 0, (userN * userN) * sizeof(double));
memset(&u1[0], 0, (userN * userN) * sizeof(double));
memset(&u2[0], 0, (userN * userN) * sizeof(double));
//
u1[(userN * (userN / 2)) + userN / 2] = 1.0;
int numBlocks = 0;
int threadsPerBlock = 0;
int numOfElementsPerBlock = 0;
//Verifing and validating the user input
if (userN == 4) {
numBlocks = 1;
threadsPerBlock = 16;
}
else {
userN = 512;
numBlocks = userNumOfBlocks;
threadsPerBlock = userNumOfThreadsPerBlock;
if ((numBlocks*threadsPerBlock > userN*userN) || threadsPerBlock > MAX_NUMBER_THREADS) {
printf("Using more threads or blocks than needed\nRan with maximum number of blocks and threads per block \n");
threadsPerBlock = MAX_NUMBER_THREADS;
numBlocks = 16;
}
if (userNumOfBlocks == 0 || userNumOfThreadsPerBlock == 0) {
numBlocks = ((userN * userN + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
threadsPerBlock = ((userN * userN + (numBlocks - 1)) / numBlocks);
}
}
numOfElementsPerBlock = userN * userN / numBlocks;
printf("Drum Dimension: %dx%d\n", userN, userN);
printf("Number of Blocks: %d\n", numBlocks);
printf("Number of Threads Per Block: %d\n", threadsPerBlock);
printf("Number of Elements Per Thread: %d\n", (userN * userN) / (numBlocks*threadsPerBlock));
start_t = clock();
for (int x = 0; x < userNumOfIterations; x++) {
// Launch a kernel on the GPU with one thread for each element.
drumKernelinner << <numBlocks, threadsPerBlock >> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock, userN);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "drumKernelinner launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching drumKernelinner!\n", cudaStatus);
goto Error;
}
//Update Sides
drumKernelsides << <numBlocks, threadsPerBlock>> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock, userN);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "drumKernelSides launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching drumKernelSides!\n", cudaStatus);
goto Error;
}
//update corners
u[0] = G * u[userN];
u[(userN - 1)* userN] = G * u[(userN - 1) * userN];
u[userN - 1] = G * u[userN - 2];
u[userN *(userN - 1) + (userN - 1)] = G * u[userN * (userN - 1) + (userN - 2)];
//Copy current values to u1 and u1 to u2
copyCurrToPrev << <numBlocks, threadsPerBlock>> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "copyCurrToPrev launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching copyCurrToPrev!\n", cudaStatus);
goto Error;
}
printf("u[256,256] at iteration %d: %f \n", x+1, u[(userN * (userN / 2)) + userN / 2]);
}
end_t = clock();
printf("\n time taken: %d \n", ((end_t - start_t)));
Error:
//BE FREE MY LOVLIES
hipFree(u);
hipFree(u1);
hipFree(u2);
return cudaStatus;
}
|
8d2493d00fea27d74a45a26013aa511030add18e.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define N 4 //for sequential only
#define MAX_NUMBER_THREADS 1024
cudaError_t drumWithCuda(int userN, int userNumOfBlocks, int userNumOfThreadsPerBlock, int userNumOfIterations);
__global__ void drumKernelinner(double *u, double *u1, double *u2, int threadsPerBlock, int numOfElementsPerBlock, int userN)
{
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
double p = 0.5;
double nConst = 0.0002;
if ((j % userN != 0) && (j % userN != userN - 1)) {
if (j > userN && j < ((userN * userN) - userN)) {
u[j] = (p * (u1[j - userN] + u1[j + userN] + u1[j - 1] + u1[j + 1]
- 4 * u1[j]) + (2 * u1[j]) - ((1 - nConst) * u2[j])) / (1 + nConst);
}
}
}
}
__global__ void drumKernelsides(double* u, double* u1, double* u2, int threadsPerBlock, int numOfElementsPerBlock, int userN)
{
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
double G = 0.75;
if (j < userN) {
if (j % userN != 0 && j % userN != userN - 1) {
u[j] = G * u[userN + j];
}
}
if (j % userN == 0) {
if ((!(j < userN)) && (j < ((userN * userN) - userN))) {
u[j] = G * u[j + 1];
}
}
if (j % userN == userN - 1) {
if ((!(j < userN)) && (j < ((userN * userN) - userN))) {
u[j] = G * u[j - 1];
}
}
if (j > ((userN * userN) - userN)) {
if (j % userN != 0 && j % userN != userN - 1) {
u[j] = G * u[j - userN];
}
}
}
}
__global__ void copyCurrToPrev(double* u, double* u1, double* u2, int threadsPerBlock, int numOfElementsPerBlock) {
for (int i = 0; i < numOfElementsPerBlock / threadsPerBlock; i++) {
int j = (threadIdx.x + threadsPerBlock * i) + (blockIdx.x * numOfElementsPerBlock);
u2[j] = u1[j];
u1[j] = u[j];
}
}
int main(int argc, char* argv[])
{
char* SeqOrPar = nullptr;
int userN = 0;
int userNumOfBlocks = 0;
int userNumOfThreadsPerBlock = 0;
int userNumOfIterations = 0;
if (argc < 3 || argv[1] == NULL || argv[2] == NULL ||
argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") {
printf("Lab3.exe <Sequential> <Number of Iterations> <Drum size is 4x4>\n"
"OR \n <Parallel> <Number of Iterations> + Optional in this order: <Number of Blocks> <Number of Threads Per Block> <Drum Size (N)>\n"
"E.x: Lab3.exe Parallel 12 64 64 512\n");
return 0;
}
else {
if (argv[1] != NULL) {
SeqOrPar = argv[1];
}
if (argv[2] != NULL) {
userNumOfIterations = atoi(argv[2]);
}
if (argv[3] != NULL) {
userNumOfBlocks = atoi(argv[3]);
if (argv[4] != NULL) {
userNumOfThreadsPerBlock = atoi(argv[4]);
if (argv[5] != NULL) {
userN = atoi(argv[5]);
}
}
}
}
if (!strcmp(SeqOrPar, "Sequential")) {
double const p = 0.5;
double const nConst = 0.0002;
double G = 0.75;
double u[N * N] = { 0.0 };
double u1[N * N] = { 0.0 };
double u2[N * N] = { 0.0 };
u1[(N*(N/2)) + N/2] = 1.0;
for (int z = 0; z < userNumOfIterations; z++) {
for (int i = 1; i <= N-2; i++) {
for (int j = 1; j <= N-2; j++) {
u[(i*N) + j] = (p * (u1[((i-1) * N) + j] + u1[((i+1) * N) + j] + u1[(i * N) + (j-1)] + u1[(i * N) + (j + 1)] - 4 * u1[(i*N) + j]) +
2 * u1[(i*N)+j] - (1 - nConst) * u2[(i * N) + j]) / (1 + nConst);
}
}
for (int i = 1; i <= N-2; i++) {
u[i] = G * u[N+i];
u[(N*(N - 1))+i] = G * u[(N * (N - 2)) + i];
u[i*N] = G * u[(i*N) + 1];
u[(i*N) + (N - 1)] = G * u[(i * N) + (N - 2)];
}
u[0] = G * u[N];
u[(N - 1)*N] = G * u[(N - 1) * N];
u[N - 1] = G * u[N - 2];
u[N*(N - 1) + (N - 1)] = G * u[N * (N - 1) + (N - 2)];
memcpy(&u2, &u1, (N * N) * sizeof(double));
memcpy(&u1, &u, (N * N) * sizeof(double));
printf("u[2,2] at iteration %d: %f \n", z+1, u[(N * (N / 2)) + N / 2]);
}
}
else {
drumWithCuda(userN, userNumOfBlocks, userNumOfThreadsPerBlock, userNumOfIterations);
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t drumWithCuda(int userN, int userNumOfBlocks, int userNumOfThreadsPerBlock, int userNumOfIterations)
{
cudaError_t cudaStatus;
clock_t start_t, end_t;
double *u, *u1, *u2;
double G = 0.75;
if (userN == NULL) {
userN = 512;
printf("Drum size assumed to be 512x512 \n");
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//Mallocing u, u1, u2
cudaStatus = cudaMallocManaged((void**)& u, (userN * userN) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc of u failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& u1, (userN * userN) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc of u1 failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& u2, (userN * userN) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc of u2 failed!");
goto Error;
}
//initializing to 0
memset(&u[0], 0, (userN * userN) * sizeof(double));
memset(&u1[0], 0, (userN * userN) * sizeof(double));
memset(&u2[0], 0, (userN * userN) * sizeof(double));
//
u1[(userN * (userN / 2)) + userN / 2] = 1.0;
int numBlocks = 0;
int threadsPerBlock = 0;
int numOfElementsPerBlock = 0;
//Verifing and validating the user input
if (userN == 4) {
numBlocks = 1;
threadsPerBlock = 16;
}
else {
userN = 512;
numBlocks = userNumOfBlocks;
threadsPerBlock = userNumOfThreadsPerBlock;
if ((numBlocks*threadsPerBlock > userN*userN) || threadsPerBlock > MAX_NUMBER_THREADS) {
printf("Using more threads or blocks than needed\nRan with maximum number of blocks and threads per block \n");
threadsPerBlock = MAX_NUMBER_THREADS;
numBlocks = 16;
}
if (userNumOfBlocks == 0 || userNumOfThreadsPerBlock == 0) {
numBlocks = ((userN * userN + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
threadsPerBlock = ((userN * userN + (numBlocks - 1)) / numBlocks);
}
}
numOfElementsPerBlock = userN * userN / numBlocks;
printf("Drum Dimension: %dx%d\n", userN, userN);
printf("Number of Blocks: %d\n", numBlocks);
printf("Number of Threads Per Block: %d\n", threadsPerBlock);
printf("Number of Elements Per Thread: %d\n", (userN * userN) / (numBlocks*threadsPerBlock));
start_t = clock();
for (int x = 0; x < userNumOfIterations; x++) {
// Launch a kernel on the GPU with one thread for each element.
drumKernelinner << <numBlocks, threadsPerBlock >> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock, userN);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "drumKernelinner launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching drumKernelinner!\n", cudaStatus);
goto Error;
}
//Update Sides
drumKernelsides << <numBlocks, threadsPerBlock>> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock, userN);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "drumKernelSides launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching drumKernelSides!\n", cudaStatus);
goto Error;
}
//update corners
u[0] = G * u[userN];
u[(userN - 1)* userN] = G * u[(userN - 1) * userN];
u[userN - 1] = G * u[userN - 2];
u[userN *(userN - 1) + (userN - 1)] = G * u[userN * (userN - 1) + (userN - 2)];
//Copy current values to u1 and u1 to u2
copyCurrToPrev << <numBlocks, threadsPerBlock>> > (u, u1, u2, threadsPerBlock, numOfElementsPerBlock);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "copyCurrToPrev launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching copyCurrToPrev!\n", cudaStatus);
goto Error;
}
printf("u[256,256] at iteration %d: %f \n", x+1, u[(userN * (userN / 2)) + userN / 2]);
}
end_t = clock();
printf("\n time taken: %d \n", ((end_t - start_t)));
Error:
//BE FREE MY LOVLIES
cudaFree(u);
cudaFree(u1);
cudaFree(u2);
return cudaStatus;
}
|
623b1b4138c33f652ced7af126636602882760f3.hip
|
// !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <hip/hip_runtime_api.h>
#include "../../graph_parser/util.h"
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for index
err = hipMalloc(&index_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc index_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run BFS for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start BFS\n");
hipLaunchKernelGGL(( BFS), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish BFS\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, index_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, index_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
623b1b4138c33f652ced7af126636602882760f3.cu
|
// clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <cuda_runtime_api.h>
#include "../../graph_parser/util.h"
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for index
err = cudaMalloc(&index_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc index_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run BFS for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start BFS\n");
BFS<<<grid, threads>>>(vertex, context, i);
printf("Finish BFS\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, index_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, index_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
e353a272a0d24e4d12ae84f686b49adc02f0cebd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
//#include <mpi.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/sort.h>
#include "cudamacro.h"
#include "bc2d.h"
// BEST DRAKE 128 1 2
// BEST PizDaint 2 2 256
#define THREADS (128)
#define ROWXTH 2
#define ROWXTHD 1
__device__ __constant__ LOCINT dN;
__device__ __constant__ LOCINT drow_bl;
__device__ __constant__ LOCINT dcol_bl;
__device__ __constant__ LOCINT drow_pp;
__device__ __constant__ int dC;
__device__ __constant__ int dR;
__device__ __constant__ int dmyrow;
__device__ __constant__ int dmycol;
__device__ LOCINT dnfrt;
__device__ LOCINT d_reach_v0;
static LOCINT *d_msk=NULL;
static int *d_lvl=NULL;
static LOCINT *d_col=NULL;
static LOCINT *d_row=NULL;
static LOCINT *d_deg=NULL;
static LOCINT *d_rbuf=NULL;
static LOCINT *d_cbuf=NULL;
static LOCINT *d_cbuf_start=NULL;
static LOCINT *d_sbuf=NULL;
static uint32_t *d_snum=NULL;
static LOCINT *d_frt=NULL;
static LOCINT *d_frt_start=NULL;
static LOCINT *d_frt_sig=NULL;
static LOCINT *d_sig=NULL;
static LOCINT *d_tmp_sig=NULL;
static LOCINT *d_rbuf_sig=NULL;
static LOCINT *d_sbuf_sig=NULL;
static float *d_delta=NULL;
static float *d_fsbuf=NULL;
static float *d_frbuf=NULL;
static float *d_bc=NULL;
static LOCINT *d_reach= NULL;
static LOCINT *d_all = NULL;
hipEvent_t start, stop;
hipStream_t stream[2];
FILE *Fopen(const char *path, const char *mode) {
FILE *fp = NULL;
fp = fopen(path, mode);
if (!fp) {
fprintf(stderr, "Cannot open file %s...\n", path);
exit(EXIT_FAILURE);
}
return fp;
}
void dump_device_array(const char *name, LOCINT *d_arr, int n) {
FILE *fp=NULL;
char fname[MAX_LINE];
int i;
LOCINT *in;
snprintf(fname, MAX_LINE, "%s_%d", name, myid);
fp = Fopen(fname, "a");
in = (LOCINT *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( hipMemcpy(in, d_arr, n*sizeof(*in), hipMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(fp, " %d,", in[i]);
fprintf(fp, "\n");
fclose(fp);
free(in);
return;
}
void dump_array2(int *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_uarray2(LOCINT *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_farray2(float *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %f,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_device_array2(int *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
int *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (int *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( hipMemcpy(in, d_arr, n*sizeof(*in), hipMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
void dump_device_uarray2(LOCINT *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
LOCINT *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (LOCINT *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( hipMemcpy(in, d_arr, n*sizeof(*in), hipMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
void dump_device_farray2(float *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
float *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (float *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( hipMemcpy(in, d_arr, n*sizeof(*d_arr), hipMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %f,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
// returns the index of the maximum i | v[i] <= val
__device__ LOCINT bmaxlt(const LOCINT *__restrict__ v, LOCINT num, LOCINT val) {
LOCINT min = 0;
LOCINT max = num-1;
LOCINT mid = max >> 1;
while(min <= max) {
if (v[mid] == val) return mid;
if (v[mid] < val) min = mid+1;
else max = mid-1;
mid = (max>>1)+(min>>1)+((min&max)&1); //(max + min) >> 1
}
return mid;
}
__global__ void read_edge_count(const LOCINT *__restrict__ deg, const LOCINT *__restrict__ rbuf, LOCINT n, LOCINT *cbuf) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
cbuf[tid] = deg[rbuf[tid]];
return;
}
/*
* write_sigma (d_sbuf+i*ld, d_sig, d_tmp_sig, snum[i], d_sbuf_sig+i*ld);
*/
__global__ void write_sigma(const LOCINT *__restrict__ sbuf, const LOCINT *__restrict__ sigma,
LOCINT * tmp_sig, LOCINT n, LOCINT *sbuf_sig) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
sbuf_sig[tid] = sigma[sbuf[tid]] + tmp_sig[sbuf[tid]]; // Calculate the total sigma and prepare for sending
tmp_sig[sbuf[tid]] = 0; // So we already have the array zero for next round
return;
}
__global__ void update_bc(const float *__restrict__ delta, int r0, LOCINT n, float *bc, LOCINT *reach, const uint64_t nvisited) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
if (r0 == tid) {
if (d_reach_v0 > 0) bc[tid] = (bc[tid])+ (d_reach_v0)*(nvisited-2);
return;
}
bc[tid] = (bc[tid]) + delta[tid]*(d_reach_v0 + 1);
return;
}
void update_bc_cuda(uint64_t v0, int ncol, const uint64_t __restrict__ nvisited) {
int r0 = -1;
if (GI2PI(v0) == myrow) {
r0 = GI2LOCI(v0);
}
hipLaunchKernelGGL(( update_bc), dim3((ncol+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_delta, r0, ncol, d_bc, d_reach, nvisited);
}
void sort_by_degree(LOCINT *deg, LOCINT *bc_order){
thrust::sort_by_key(deg, deg + N, bc_order);
}
__inline__ __device__ int warpReduceSum(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__ int blockReduceSum(int val) {
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane==0) shared[wid]=val; __syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val); //Final reduce within first warp
return val;
}
__global__ void deviceReduceKernel(const LOCINT *__restrict__ in, LOCINT* out, int N, const int * __restrict__ cond) {
LOCINT sum = 0;
const uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N; i += THREADS/4 * THREADS/4) {
//if (cond[i] > 0) sum+= in[i];
sum += in[i]*(cond[i] > 0 );
// p = in[i];
// if ( cond[i] == -1 ) p=0;
// sum += p;
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)atomicAdd(out, sum);
}
void pre_update_bc_cuda(LOCINT *reach, uint64_t v0, LOCINT *all){
hipMemsetAsync(d_all,0,sizeof(LOCINT));
hipLaunchKernelGGL(( deviceReduceKernel), dim3(THREADS/4), dim3(THREADS/4), 0, 0, d_reach, d_all, row_pp, d_lvl);
hipMemcpy(all,d_all,sizeof(int),hipMemcpyDeviceToHost);
}
__global__ void write_delta(const LOCINT *__restrict__ frt, const LOCINT *__restrict__ sigma,
const LOCINT *__restrict__ reach,
float *rbuf, LOCINT n, float *sbuf) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
LOCINT i;
if (tid >= n) return;
if (CUDA_ISMYCOLL(frt[tid])) {
// No race condition since a node appears only ones in the frontier
// Calculate delta only for my own vertices
// Here delta is updated using row index
i = CUDA_MYLOCJ2LOCI(frt[tid]);
//sbuf[i] = rbuf[tid] * sigma[i] + reach[i]; // add reach[i]
sbuf[i] = rbuf[tid] * sigma[i];
// Copy back the value into the send-receive buffer
//srbuf[tid] = delta[i] ;
}
rbuf[tid] = 0;
}
LOCINT write_delta_cuda(LOCINT ncol, float *hRFbuf, float *hSFbuf) {
float et=0;
TIMER_DEF(1);
TIMER_START(1);
// Reset send buffer
MY_CUDA_CHECK( hipMemset(d_fsbuf, 0, row_pp*sizeof(*d_fsbuf)) );
if (!ncol) {
TIMER_STOP(1);
goto out;
}
// Copy receive buffer into device memory
MY_CUDA_CHECK( hipMemcpy(d_frbuf, hRFbuf , ncol*sizeof(*hRFbuf), hipMemcpyHostToDevice ));
TIMER_STOP(1);
MY_CUDA_CHECK( hipEventRecord(start, 0) );
// READ_DFRT
hipLaunchKernelGGL(( write_delta), dim3((ncol+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_rbuf, d_sig, d_reach, d_frbuf, ncol, d_fsbuf);
// Here we have d_delta updated
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("write_delta");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
out:
MY_CUDA_CHECK( hipMemcpy(hSFbuf, d_fsbuf, MAX(row_pp,col_bl)*sizeof(*hSFbuf), hipMemcpyDeviceToHost ));
return ncol;
}
__global__ void scan_col_mono(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf,
const LOCINT *__restrict__ cbuf, LOCINT ncol,
LOCINT *msk, int *lvl, LOCINT* sig, int level,
LOCINT *sbuf, uint32_t *snum) {
// This processes ROWXTH elements together
LOCINT r[ROWXTH];
LOCINT c[ROWXTH]; // Vertex in the current frontier
LOCINT s[ROWXTH]; // Sigma of the vertex in the current frontier
LOCINT m[ROWXTH], q[ROWXTH], i[ROWXTH];
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTH;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, /*(tid<ncol)?tid+1:*/ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= cbuf[i[k]+1]; i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]);
s[k] = (sig[c[k]]);
} //c[k] is the predecessor, s[k] is its sigma
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
m[k] = ((LOCINT)1) << (r[k]%BITS(msk)); // its mask value
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
if ((msk[r[k]/BITS(msk)])&m[k]) //continue;
q[k] = m[k]; // the if below will eval to false...
else
q[k] = atomicOr(msk+r[k]/BITS(msk), m[k]);
if (!(m[k]&q[k])) { // New vertex
uint32_t off = atomicInc(snum, 0xFFFFFFFF); // Offset // Increment + 1
// This is the GLOBAL VERTEX !!
sbuf[off] = r[k]; // Copy the new discovered vertex into the sbuf for sending
lvl[r[k]] = level; // Update level
}
if ((lvl[r[k]]) == -1 || (lvl[r[k]]) == level) { // Order in the OR is important! // Update sigma
// Update sigma
atomicAdd(sig + r[k], s[k]);
}
} // end for over k
return;
}
__global__ void scan_col_mono2(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf,
const LOCINT *__restrict__ cbuf, LOCINT ncol,
LOCINT *msk, int *lvl, LOCINT* sig, int level,
LOCINT *sbuf, uint32_t *snum) {
// This processes ROWXTH elements together
LOCINT r[ROWXTH];
LOCINT c[ROWXTH]; // Vertex in the current frontier
LOCINT m[ROWXTH], q[ROWXTH], i[ROWXTH];
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTH;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, /*(tid<ncol)?tid+1:*/ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= cbuf[i[k]+1]; i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]);
} //c[k] is the predecessor, s[k] is its sigma
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
m[k] = ((LOCINT)1) << (r[k]%BITS(msk)); // its mask value
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
if ((msk[r[k]/BITS(msk)])&m[k]) //continue;
q[k] = m[k]; // the if below will eval to false...
else
q[k] = atomicOr(msk+r[k]/BITS(msk), m[k]);
if (!(m[k]&q[k])) { // New vertex
uint32_t off = atomicInc(snum, 0xFFFFFFFF); // Offset // Increment + 1
// This is the GLOBAL VERTEX !!
sbuf[off] = r[k]; // Copy the new discovered vertex into the sbuf for sending
lvl[r[k]] = level; // Update level
}
const int l = (lvl[r[k]]);
if (l == -1 || l == level) { // Order in the OR is important! // Update sigma
// Update sigma
atomicAdd(sig + r[k], (sig[c[k]]));
}
} // end for over k
return;
}
__global__ void scan_frt_mono(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf, const LOCINT *__restrict__ cbuf, LOCINT ncol,
const LOCINT *__restrict__ sigma, float *delta,
const int *__restrict__ lvl, int depth) {
// This processes ROWXTH elements together
LOCINT r[ROWXTHD];
LOCINT c[ROWXTHD]; // Vertex in the current frontier
LOCINT i[ROWXTHD];
float a;
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTHD;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= (cbuf[i[k]+1]); i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]); } //c[k] is the vertex in the input buffer
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for (int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
if (lvl[r[k]] == depth+1) { // this is a successor
// sigma and delta are indexed by row
a = ((delta[r[k]]) + 1)/sigma[r[k]]*sigma[c[k]];
// IN SINGLE DEVICE we multiply a * sigma[c[k]]
// Need to add into the SRbuffer using the same index used to access rbuf
atomicAdd(delta+c[k], a);
}
} // end for over k
return;
}
__global__ void append_row(const LOCINT *__restrict__ row, const LOCINT *__restrict__ row_sig, LOCINT n,
const LOCINT *__restrict__ cbuf, LOCINT np,
LOCINT *msk, const LOCINT * __restrict__ reach, int *lvl,
int level, LOCINT *frt, LOCINT *tmp_sig, LOCINT * frt_sig, uint32_t *all) {
LOCINT r, m, q, s;
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
r = row[tid];
s = row_sig[tid];
m = ((LOCINT)1) << (r%BITS(msk));
if (!(msk[r/BITS(msk)]&m)) { // Check if the vertex was already visited
q = atomicOr(msk+r/BITS(msk), m); // Mark visited
if (!(m&q)) { // Check if the vertex was already visited
uint32_t off = atomicInc(&dnfrt, 0xFFFFFFFF);
frt[off] = r; // Still Global
frt_sig[off] = 0;
lvl[r] = level;
}
}
if (lvl[r] == level || lvl[r] == -1) {
// Update sigma with the value provided
atomicAdd(tmp_sig+r, s);
}
return;
}
//hipLaunchKernelGGL(( append_sigma), dim3((nfrt+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_frt, d_sig, d_frt_sig, d_tmp_sig, nfrt);
__global__ void append_sigma(LOCINT * sbuf, LOCINT * sigma, LOCINT *sbuf_sig, LOCINT * tmp_sig, LOCINT n) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
sbuf_sig[tid] = sbuf_sig[tid] + tmp_sig[sbuf[tid]]; // this is d_sbuf_sig
sigma[sbuf[tid]] = sbuf_sig[tid];
tmp_sig[sbuf[tid]] = 0;
sbuf[tid] = CUDA_MYLOCI2LOCJ(sbuf[tid]); // Row index to Column Index
return;
}
static size_t tot_dev_mem = 0;
static void *CudaMallocSet(size_t size, int val) {
void *ptr;
MY_CUDA_CHECK( hipMalloc(&ptr, size) );
MY_CUDA_CHECK( hipMemset(ptr, val, size) );
tot_dev_mem += size;
return ptr;
}
void *CudaMallocHostSet(size_t size, int val) {
void *ptr;
MY_CUDA_CHECK( hipHostMalloc(&ptr, size) );
memset(ptr, val, size);
return ptr;
}
void CudaFreeHost(void *ptr) {
MY_CUDA_CHECK( hipHostFree(ptr) );
return;
}
__global__ void set_degree(LOCINT *col, LOCINT *deg, LOCINT n) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
deg[tid] = col[tid+1] - col[tid];
return;
}
void set_mlp_cuda(LOCINT row, int level, int sigma) {
LOCINT v;
MY_CUDA_CHECK( hipMemcpy(&v, d_msk+row/BITS(d_msk), sizeof(v), hipMemcpyDeviceToHost) );
v |= (1ULL<<(row%BITS(d_msk)));
MY_CUDA_CHECK( hipMemcpy(d_msk+row/BITS(d_msk), &v, sizeof(*d_msk), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_lvl+row, &level, sizeof(level), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_sig+row, &sigma, sizeof(sigma), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_frt, &row, sizeof(row), hipMemcpyHostToDevice) );
return;
}
__global__ void compact(LOCINT *col, LOCINT *row, LOCINT *deg, LOCINT *msk) {
int n;
LOCINT *v;
int bid = threadIdx.x;
int lid = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
int goff = 0;
int bcount = 0;
__shared__ uint32_t sh_v[32];
v = row + col[blockIdx.x];
n = deg[blockIdx.x];
// sync()s in the loop don't cause stall because
// every warp that cycle has either all threads
// cycling or some cycling and some returned.
for(; bid < n; bid += blockDim.x) {
LOCINT s = v[bid];
uint32_t m;
int t;
m = ((LOCINT)1) << (s%BITS(msk));
t = (msk[s/BITS(msk)]&m) == 0;
m = __ballot(t);
if (lid == wid) sh_v[wid] = __popc(m);
bcount = __syncthreads_count(t);
if (wid == 0) {
uint32_t k;
uint32_t l = sh_v[lid];
for (k=1; k <= 16; k <<= 1) {
// source th is always present so shfl
// never returns the calling th's var
int r = __shfl_up((int)l, k, 32);
if (lid >= k) l += r;
}
sh_v[lid] = l;
}
uint32_t i = __popc(m & ((1<<lid)-1));
__syncthreads();
uint32_t off = (!wid) ? 0 : sh_v[wid-1];
if (t) v[goff + off + i] = s;
goff += bcount;
}
if (threadIdx.x == 0) deg[blockIdx.x] = goff;
return;
}
/*
* scan_frt_csc_cuda(frt, ncol, depth, hRFbuf);
*/
LOCINT scan_frt_csc_cuda_mono(int offset, int ncol, int depth) {
LOCINT i;
int blocks, nrow=0;
float et=0;
TIMER_DEF(1);
LOCINT *d_ncbuf;
TIMER_START(1);
if (!ncol) {
TIMER_STOP(1);
goto out;
}
#ifdef ONEPREFIX
nrow = tlvl[depth+1];
d_ncbuf = d_cbuf_start+offset;
#else
static thrust::device_ptr<LOCINT> d_val(d_cbuf);
// calculate degree for each vertex in frt
hipLaunchKernelGGL(( read_edge_count), dim3((ncol+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_deg, d_frt_start+offset, ncol, d_cbuf);
MY_CUDA_CHECK( hipMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), hipMemcpyDeviceToHost) );
nrow = i;
// Prefix sum to count how many threads to launch
thrust::exclusive_scan(d_val, d_val+ncol, d_val);
MY_CUDA_CHECK( hipMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), hipMemcpyDeviceToHost) );
nrow += i;
d_ncbuf = d_cbuf;
#endif
if (!nrow) {
TIMER_STOP(1);
goto out;
}
TIMER_STOP(1);
MY_CUDA_CHECK( hipEventRecord(start, 0) );
blocks = (((nrow+ROWXTHD-1)/ROWXTHD)+THREADS-1)/THREADS;
//dump_device_farray2(d_delta, row_pp, "d_delta");
// Store result directly into d_delta
hipLaunchKernelGGL(( scan_frt_mono), dim3(blocks), dim3(THREADS), 0, 0, d_row, d_col, nrow, d_frt_start+offset, d_ncbuf, ncol,
d_sig, d_delta, d_lvl, depth);
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("scan_frt");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
out:
return ncol;
}
/**
*/
LOCINT scan_col_csc_cuda_mono(int ncol, int level) {
int blocks;
LOCINT i;
float et=0;
LOCINT nfrt=0, nrow=0;
#ifdef ONEPREFIX
#ifdef THRUST
thrust::device_ptr<LOCINT> d_val(d_cbuf);
#endif
int *d_out = NULL;
#else
#ifdef THRUST
static thrust::device_ptr<LOCINT> d_val(d_cbuf);
#endif
#endif
TIMER_DEF(1);
TIMER_DEF(2);
TIMER_START(1);
MY_CUDA_CHECK( hipMemset(d_snum, 0, sizeof(*d_snum)) );
hipLaunchKernelGGL(( read_edge_count), dim3((ncol+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_deg, d_frt, ncol, d_cbuf);
MY_CHECK_ERROR("read_edge_count");
MY_CUDA_CHECK( hipDeviceSynchronize() );
MY_CUDA_CHECK( hipMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), hipMemcpyDeviceToHost) );
nrow = i;
// Prefix sum to count how many threads to launch
thrust::exclusive_scan(d_val, d_val+ncol, d_val);
MY_CUDA_CHECK( hipMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), hipMemcpyDeviceToHost) );
nrow += i;
#ifdef ONEPREFIX
tlvl[level] = nrow;
#endif
if (!nrow) {
TIMER_STOP(1);
goto out;
}
TIMER_STOP(1);
MY_CUDA_CHECK( hipEventRecord(start, 0) );
blocks = (((nrow+ROWXTH-1)/ROWXTH)+THREADS-1)/THREADS;
hipLaunchKernelGGL(( scan_col_mono2), dim3(blocks), dim3(THREADS), 0, 0, d_row, d_col, nrow, d_frt, d_cbuf, ncol, d_msk, d_lvl,
d_sig, level, d_frt+ncol, d_snum);
// Here we have d_sbuf updated with the new discovered vertices and d_tmp_sig with the local value of the accumulated sigma
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("scan_col");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
//dump_device_uarray2(d_sig, row_pp, "scan_col d_sig 2");
out:
TIMER_START(2);
// Prepare sbuf to send vertices to other processors (We need to send Sigma as well
// copy d_snum back into CPU
MY_CUDA_CHECK( hipMemcpy(&nfrt, d_snum, sizeof(nfrt), hipMemcpyDeviceToHost) );
//dump_device_uarray2(d_frt+ncol, nfrt, "scan_col d_frt 3");
d_frt = d_frt + ncol;
#ifdef ONEPREFIX
d_cbuf = d_cbuf + ncol;
#endif
TIMER_STOP(2);
return nfrt;
}
LOCINT append_rows_cuda(LOCINT *rbuf, LOCINT ld, int *rnum, int np,
LOCINT *frt, LOCINT *frt_sigma, LOCINT nfrt, int level) {
float et=0;
LOCINT nrow=0;
LOCINT p, q;
LOCINT ld2 = ld*2;
TIMER_DEF(1);
TIMER_START(1);
nrow = 0;
for(int i = 0; i < np; i++) {
if (rnum[i]) {
MY_CUDA_CHECK( hipMemcpy(d_rbuf+nrow, rbuf+i*ld2, rnum[i]*sizeof(*rbuf), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_rbuf_sig+nrow, rbuf+i*ld2+rnum[i], rnum[i]*sizeof(*rbuf), hipMemcpyHostToDevice) );
nrow += rnum[i];
}
}
if (nrow > 0) {
// MY_CUDA_CHECK( hipMemcpy(d_rbuf, rbuf, nrow*sizeof(*rbuf), hipMemcpyHostToDevice) );
// in-place prefix-sum of rnum (too small to bother thrust)
p = rnum[0]; rnum[0] = 0;
for(int i = 1; i < np; i++) {
q = rnum[i];
rnum[i] = p + rnum[i-i];
p = q;
}
MY_CUDA_CHECK( hipMemcpy(d_cbuf, rnum, np*sizeof(*rnum), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dnfrt, &nfrt, sizeof(dnfrt), 0, hipMemcpyHostToDevice) );
TIMER_STOP(1);
//dump_device_array2(d_rbuf_sig, nrow, "append d_rbuf_sig");
//dump_device_array2(d_frt, nfrt, "append d_frt 1 ");
//dump_device_array2(d_frt_sig, nfrt, "append d_frt_sig 1 ");
//dump_device_array2(d_tmp_sig, row_pp, "append d_tmp_sig 1");
MY_CUDA_CHECK( hipEventRecord(start, 0) );
// Here update d_sbuf and d_sig, after that we need to update d_sbuf_sig
// UPDATE DFRT sostituito d_pred con di reach... 7 arg
hipLaunchKernelGGL(( append_row), dim3((nrow+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_rbuf, d_rbuf_sig, nrow, d_cbuf, np,
d_msk, d_reach, d_lvl, level, d_frt, d_tmp_sig, d_frt_sig, d_all);
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("append_row");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
//if (myid == 0) fprintf(stdout, "\tappend_row time = %f + %f\n", TIMER_ELAPSED(1)/1.0E+6, et/1.0E3);
MY_CUDA_CHECK( hipMemcpyFromSymbol(&nfrt, dnfrt, sizeof(nfrt), 0, hipMemcpyDeviceToHost) );
}
if (nfrt > 0) {
MY_CUDA_CHECK( hipEventRecord(start, 0) );
// READ DFRT
hipLaunchKernelGGL(( append_sigma), dim3((nfrt+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_frt, d_sig, d_frt_sig, d_tmp_sig, nfrt);
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("append_sigma");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
// Add new vertices to the frontier
MY_CUDA_CHECK( hipMemcpy(frt, d_frt, nfrt*sizeof(*d_frt), hipMemcpyDeviceToHost) );
MY_CUDA_CHECK( hipMemcpy(frt_sigma, d_frt_sig, nfrt*sizeof(*d_frt_sig), hipMemcpyDeviceToHost) );
}
//dump_device_array2(d_frt, nfrt, "append d_frt 3");
//dump_device_array2(d_frt_sig, nfrt, "append d_frt_sig 3");
//dump_device_array2(d_tmp_sig, row_pp, "append d_sbuf_sig 3");
//dump_device_array2(d_sig, row_pp, "append d_sig 3");
return nfrt;
}
void get_lvl(int *lvl) {
MY_CUDA_CHECK( hipMemcpy(lvl+(mycol*row_bl), d_lvl+(mycol*row_bl), row_bl*sizeof(*d_lvl), hipMemcpyDeviceToHost) );
}
void set_lvl(int *lvl) {
MY_CUDA_CHECK( hipMemcpy(d_lvl, lvl, row_pp*sizeof(*lvl), hipMemcpyHostToDevice) );
}
void get_all(LOCINT *all) {
MY_CUDA_CHECK( hipMemcpy(all, d_all, sizeof(*d_all), hipMemcpyDeviceToHost) );
// and set to zero for next bc
MY_CUDA_CHECK(hipMemset(d_all, 0,sizeof(*d_all)));
}
void get_frt(LOCINT *frt) {
MY_CUDA_CHECK( hipMemcpy(frt, d_frt_start, row_pp*sizeof(LOCINT), hipMemcpyDeviceToHost) );
}
void get_cbuf(LOCINT *cbuf) {
MY_CUDA_CHECK( hipMemcpy(cbuf, d_cbuf_start, row_pp*sizeof(LOCINT), hipMemcpyDeviceToHost) );
}
void get_msk(LOCINT *msk) {
MY_CUDA_CHECK( hipMemcpy(msk, d_msk, ((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk), hipMemcpyDeviceToHost) );
}
void get_deg(LOCINT *deg) {
MY_CUDA_CHECK( hipMemcpy(deg, d_deg, col_bl*sizeof(*d_deg), hipMemcpyDeviceToHost) );
}
void get_sigma(LOCINT *sigma) {
MY_CUDA_CHECK( hipMemcpy(sigma+(mycol*row_bl), d_sig+(mycol*row_bl), row_bl*sizeof(*d_sig), hipMemcpyDeviceToHost) );
}
void get_bc(float *bc) {
MY_CUDA_CHECK( hipMemcpy(bc, d_bc, row_pp*sizeof(*d_bc), hipMemcpyDeviceToHost) );
}
void set_sigma(LOCINT *sigma) {
MY_CUDA_CHECK( hipMemcpy(d_sig, sigma, row_pp*sizeof(*sigma), hipMemcpyHostToDevice) );
}
__global__ void set_delta(float *srbuf, float * delta, int nrow) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= nrow) return;
delta[tid] += srbuf[tid];
srbuf[tid] = 0;
return;
}
int set_delta_cuda(float *hSRbuf, int nrow) {
float et=0;
TIMER_DEF(1);
TIMER_START(1);
if (!nrow) {
TIMER_STOP(1);
return nrow;
}
//MY_CUDA_CHECK( hipMemset(d_frbuf, 0, row_pp*sizeof(*d_frbuf)) );
MY_CUDA_CHECK( hipMemcpy(d_frbuf, hSRbuf , nrow*sizeof(*hSRbuf), hipMemcpyHostToDevice ));
MY_CUDA_CHECK( hipEventRecord(start, 0) );
hipLaunchKernelGGL(( set_delta), dim3((nrow+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_frbuf, d_delta, nrow);
MY_CUDA_CHECK( hipEventRecord(stop, 0) );
MY_CHECK_ERROR("set_delta");
MY_CUDA_CHECK( hipEventSynchronize(stop) );
MY_CUDA_CHECK( hipEventElapsedTime(&et, start, stop) );
return nrow;
}
void init_bc_1degree_device(LOCINT *reach) {
//MY_CUDA_CHECK( hipMemcpy(d_bc, bc_val, row_pp*sizeof(*bc_val), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_reach, reach, row_pp*sizeof(*reach), hipMemcpyHostToDevice) );
return;
}
__global__ void init_delta(LOCINT *reach, float * delta, int nrow) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= nrow) return;
delta[tid] = (float)reach[tid];
return;
}
void setcuda(uint64_t ned, LOCINT *col, LOCINT *row, LOCINT reach_v0) {
MY_CUDA_CHECK( hipMemset(d_lvl,-1, row_pp*sizeof(*d_lvl)) );
MY_CUDA_CHECK( hipMemset(d_sig, 0, row_pp*sizeof(*d_sig)) );
hipLaunchKernelGGL(( init_delta), dim3((row_pp+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_reach, d_delta, row_pp);
MY_CUDA_CHECK( hipMemset(d_msk, 0, ((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk)) );
MY_CUDA_CHECK( hipMemcpyToSymbol(d_reach_v0, &reach_v0, sizeof(d_reach_v0), 0, hipMemcpyHostToDevice) );
#ifdef ONEPREFIX
memset(tlvl, 0, sizeof(*tlvl)*MAX_LVL);
d_cbuf = d_cbuf_start;
#endif
d_frt = d_frt_start;
return;
}
size_t initcuda(uint64_t ned, LOCINT *col, LOCINT *row) {
int dev;
dev = 0; //FORCED
MY_CUDA_CHECK( hipSetDevice(dev) );
d_col = (LOCINT *)CudaMallocSet((col_bl+1)*sizeof(*d_col), 0);
d_row = (LOCINT *)CudaMallocSet(ned*sizeof(*d_row), 0);
MY_CUDA_CHECK( hipMemcpy(d_col, col, (col_bl+1)*sizeof(*col), hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpy(d_row, row, ned*sizeof(*row), hipMemcpyHostToDevice) );
d_deg = (LOCINT *)CudaMallocSet(col_bl*sizeof(*d_deg), 0);
hipLaunchKernelGGL(( set_degree), dim3((col_bl+THREADS-1)/THREADS), dim3(THREADS), 0, 0, d_col, d_deg, col_bl);
if (!mono) { // Run Multi-GPU code
printf("MPI REMOVED\n");
} else {
#ifdef ONEPREFIX
d_cbuf_start = (LOCINT *)CudaMallocSet(MAX(row_bl,row_pp)*sizeof(*d_cbuf_start), 0);
#else
d_cbuf_start = (LOCINT *)CudaMallocSet(MAX(col_bl, C)*sizeof(*d_cbuf_start), 0);
#endif
}
d_sbuf = (LOCINT *)CudaMallocSet(MAX(row_bl,row_pp)*sizeof(*d_sbuf), 0);
d_snum = (uint32_t *)CudaMallocSet((C+1)*sizeof(*d_snum), 0);
d_msk = (LOCINT *)CudaMallocSet(((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk), 0);
d_lvl = (int *)CudaMallocSet(row_pp*sizeof(*d_lvl), -1);
d_all = (uint32_t *)CudaMallocSet(1*sizeof(*d_all), 0);
d_frt = d_sbuf;
d_frt_start = d_sbuf;
d_cbuf = d_cbuf_start;
d_sig = (LOCINT *)CudaMallocSet(row_pp*sizeof(*d_sig), 0);
d_delta = (float *)CudaMallocSet(row_pp*sizeof(*d_delta), 0);
d_bc = (float *)CudaMallocSet(row_pp*sizeof(*d_bc), 0);
d_reach = (LOCINT*)CudaMallocSet(row_pp*sizeof(*d_reach), 0);
printf("ROWBL = %i - ROWPP = %i\n",row_bl,row_pp);
MY_CUDA_CHECK( hipEventCreate(&start) );
MY_CUDA_CHECK( hipEventCreate(&stop) );
MY_CUDA_CHECK( hipStreamCreate(stream+0) );
MY_CUDA_CHECK( hipStreamCreate(stream+1) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dN, &N, sizeof(dN), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dC, &C, sizeof(dC), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dR, &R, sizeof(dR), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dmyrow, &myrow, sizeof(dmyrow), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dmycol, &mycol, sizeof(dmycol), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(drow_bl, &row_bl, sizeof(drow_bl), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(dcol_bl, &col_bl, sizeof(dcol_bl), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipMemcpyToSymbol(drow_pp, &row_pp, sizeof(drow_pp), 0, hipMemcpyHostToDevice) );
MY_CUDA_CHECK( hipFuncSetCacheConfig(read_edge_count, hipFuncCachePreferL1) );
MY_CUDA_CHECK( hipFuncSetCacheConfig(update_bc, hipFuncCachePreferL1) );
MY_CUDA_CHECK( hipFuncSetCacheConfig(deviceReduceKernel, hipFuncCachePreferL1) );
if (!mono){
printf("MPI REMOVED\n");
}
else{
// set cache mono
MY_CUDA_CHECK( hipFuncSetCacheConfig(scan_frt_mono, hipFuncCachePreferL1) );
MY_CUDA_CHECK( hipFuncSetCacheConfig(scan_col_mono, hipFuncCachePreferL1) );
}
return tot_dev_mem;
}
void fincuda() {
MY_CUDA_CHECK( hipFree(d_col) );
MY_CUDA_CHECK( hipFree(d_deg) ); //////////////////////
MY_CUDA_CHECK( hipFree(d_row) );
MY_CUDA_CHECK( hipFree(d_rbuf) );
MY_CUDA_CHECK( hipFree(d_cbuf_start) );
MY_CUDA_CHECK( hipFree(d_sbuf) );
MY_CUDA_CHECK( hipFree(d_snum) );
MY_CUDA_CHECK( hipFree(d_msk) );
MY_CUDA_CHECK( hipFree(d_lvl) );
MY_CUDA_CHECK( hipFree(d_tmp_sig) );
MY_CUDA_CHECK( hipFree(d_sig) );
MY_CUDA_CHECK( hipFree(d_rbuf_sig) );
MY_CUDA_CHECK( hipFree(d_sbuf_sig) );
MY_CUDA_CHECK( hipFree(d_frbuf) );
MY_CUDA_CHECK( hipFree(d_fsbuf) );
MY_CUDA_CHECK( hipFree(d_delta) );
MY_CUDA_CHECK( hipFree(d_bc) );
MY_CUDA_CHECK( hipEventDestroy(start) );
MY_CUDA_CHECK( hipEventDestroy(stop) );
MY_CUDA_CHECK( hipStreamDestroy(stream[0]) );
MY_CUDA_CHECK( hipStreamDestroy(stream[1]) );
return;
}
|
e353a272a0d24e4d12ae84f686b49adc02f0cebd.cu
|
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/time.h>
#include <cuda.h>
//#include <mpi.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/sort.h>
#include "cudamacro.h"
#include "bc2d.h"
// BEST DRAKE 128 1 2
// BEST PizDaint 2 2 256
#define THREADS (128)
#define ROWXTH 2
#define ROWXTHD 1
__device__ __constant__ LOCINT dN;
__device__ __constant__ LOCINT drow_bl;
__device__ __constant__ LOCINT dcol_bl;
__device__ __constant__ LOCINT drow_pp;
__device__ __constant__ int dC;
__device__ __constant__ int dR;
__device__ __constant__ int dmyrow;
__device__ __constant__ int dmycol;
__device__ LOCINT dnfrt;
__device__ LOCINT d_reach_v0;
static LOCINT *d_msk=NULL;
static int *d_lvl=NULL;
static LOCINT *d_col=NULL;
static LOCINT *d_row=NULL;
static LOCINT *d_deg=NULL;
static LOCINT *d_rbuf=NULL;
static LOCINT *d_cbuf=NULL;
static LOCINT *d_cbuf_start=NULL;
static LOCINT *d_sbuf=NULL;
static uint32_t *d_snum=NULL;
static LOCINT *d_frt=NULL;
static LOCINT *d_frt_start=NULL;
static LOCINT *d_frt_sig=NULL;
static LOCINT *d_sig=NULL;
static LOCINT *d_tmp_sig=NULL;
static LOCINT *d_rbuf_sig=NULL;
static LOCINT *d_sbuf_sig=NULL;
static float *d_delta=NULL;
static float *d_fsbuf=NULL;
static float *d_frbuf=NULL;
static float *d_bc=NULL;
static LOCINT *d_reach= NULL;
static LOCINT *d_all = NULL;
cudaEvent_t start, stop;
cudaStream_t stream[2];
FILE *Fopen(const char *path, const char *mode) {
FILE *fp = NULL;
fp = fopen(path, mode);
if (!fp) {
fprintf(stderr, "Cannot open file %s...\n", path);
exit(EXIT_FAILURE);
}
return fp;
}
void dump_device_array(const char *name, LOCINT *d_arr, int n) {
FILE *fp=NULL;
char fname[MAX_LINE];
int i;
LOCINT *in;
snprintf(fname, MAX_LINE, "%s_%d", name, myid);
fp = Fopen(fname, "a");
in = (LOCINT *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( cudaMemcpy(in, d_arr, n*sizeof(*in), cudaMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(fp, " %d,", in[i]);
fprintf(fp, "\n");
fclose(fp);
free(in);
return;
}
void dump_array2(int *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_uarray2(LOCINT *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_farray2(float *arr, int n, const char *name) {
if (outdebug==NULL) return;
int i;
fprintf(outdebug, "%s - %d\n",name, n);
for (i = 0; i < n ; i++)
fprintf(outdebug, " %f,", arr[i]);
fprintf(outdebug, "\n");
return;
}
void dump_device_array2(int *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
int *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (int *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( cudaMemcpy(in, d_arr, n*sizeof(*in), cudaMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
void dump_device_uarray2(LOCINT *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
LOCINT *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (LOCINT *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( cudaMemcpy(in, d_arr, n*sizeof(*in), cudaMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %d,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
void dump_device_farray2(float *d_arr, int n, const char * name) {
if (outdebug==NULL) return;
int i;
float *in;
fprintf(outdebug, "%s - %d\n",name, n);
in = (float *)Malloc(n*sizeof(*in));
MY_CUDA_CHECK( cudaMemcpy(in, d_arr, n*sizeof(*d_arr), cudaMemcpyDeviceToHost) );
for (i = 0; i < n ; i++)
fprintf(outdebug, " %f,", in[i]);
fprintf(outdebug, "\n");
fflush(outdebug);
free(in);
return;
}
// returns the index of the maximum i | v[i] <= val
__device__ LOCINT bmaxlt(const LOCINT *__restrict__ v, LOCINT num, LOCINT val) {
LOCINT min = 0;
LOCINT max = num-1;
LOCINT mid = max >> 1;
while(min <= max) {
if (v[mid] == val) return mid;
if (v[mid] < val) min = mid+1;
else max = mid-1;
mid = (max>>1)+(min>>1)+((min&max)&1); //(max + min) >> 1
}
return mid;
}
__global__ void read_edge_count(const LOCINT *__restrict__ deg, const LOCINT *__restrict__ rbuf, LOCINT n, LOCINT *cbuf) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
cbuf[tid] = deg[rbuf[tid]];
return;
}
/*
* write_sigma (d_sbuf+i*ld, d_sig, d_tmp_sig, snum[i], d_sbuf_sig+i*ld);
*/
__global__ void write_sigma(const LOCINT *__restrict__ sbuf, const LOCINT *__restrict__ sigma,
LOCINT * tmp_sig, LOCINT n, LOCINT *sbuf_sig) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
sbuf_sig[tid] = sigma[sbuf[tid]] + tmp_sig[sbuf[tid]]; // Calculate the total sigma and prepare for sending
tmp_sig[sbuf[tid]] = 0; // So we already have the array zero for next round
return;
}
__global__ void update_bc(const float *__restrict__ delta, int r0, LOCINT n, float *bc, LOCINT *reach, const uint64_t nvisited) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
if (r0 == tid) {
if (d_reach_v0 > 0) bc[tid] = (bc[tid])+ (d_reach_v0)*(nvisited-2);
return;
}
bc[tid] = (bc[tid]) + delta[tid]*(d_reach_v0 + 1);
return;
}
void update_bc_cuda(uint64_t v0, int ncol, const uint64_t __restrict__ nvisited) {
int r0 = -1;
if (GI2PI(v0) == myrow) {
r0 = GI2LOCI(v0);
}
update_bc<<<(ncol+THREADS-1)/THREADS, THREADS>>>(d_delta, r0, ncol, d_bc, d_reach, nvisited);
}
void sort_by_degree(LOCINT *deg, LOCINT *bc_order){
thrust::sort_by_key(deg, deg + N, bc_order);
}
__inline__ __device__ int warpReduceSum(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__ int blockReduceSum(int val) {
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane==0) shared[wid]=val; __syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val); //Final reduce within first warp
return val;
}
__global__ void deviceReduceKernel(const LOCINT *__restrict__ in, LOCINT* out, int N, const int * __restrict__ cond) {
LOCINT sum = 0;
const uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N; i += THREADS/4 * THREADS/4) {
//if (cond[i] > 0) sum+= in[i];
sum += in[i]*(cond[i] > 0 );
// p = in[i];
// if ( cond[i] == -1 ) p=0;
// sum += p;
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)atomicAdd(out, sum);
}
void pre_update_bc_cuda(LOCINT *reach, uint64_t v0, LOCINT *all){
cudaMemsetAsync(d_all,0,sizeof(LOCINT));
deviceReduceKernel<<<THREADS/4, THREADS/4>>>(d_reach, d_all, row_pp, d_lvl);
cudaMemcpy(all,d_all,sizeof(int),cudaMemcpyDeviceToHost);
}
__global__ void write_delta(const LOCINT *__restrict__ frt, const LOCINT *__restrict__ sigma,
const LOCINT *__restrict__ reach,
float *rbuf, LOCINT n, float *sbuf) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
LOCINT i;
if (tid >= n) return;
if (CUDA_ISMYCOLL(frt[tid])) {
// No race condition since a node appears only ones in the frontier
// Calculate delta only for my own vertices
// Here delta is updated using row index
i = CUDA_MYLOCJ2LOCI(frt[tid]);
//sbuf[i] = rbuf[tid] * sigma[i] + reach[i]; // add reach[i]
sbuf[i] = rbuf[tid] * sigma[i];
// Copy back the value into the send-receive buffer
//srbuf[tid] = delta[i] ;
}
rbuf[tid] = 0;
}
LOCINT write_delta_cuda(LOCINT ncol, float *hRFbuf, float *hSFbuf) {
float et=0;
TIMER_DEF(1);
TIMER_START(1);
// Reset send buffer
MY_CUDA_CHECK( cudaMemset(d_fsbuf, 0, row_pp*sizeof(*d_fsbuf)) );
if (!ncol) {
TIMER_STOP(1);
goto out;
}
// Copy receive buffer into device memory
MY_CUDA_CHECK( cudaMemcpy(d_frbuf, hRFbuf , ncol*sizeof(*hRFbuf), cudaMemcpyHostToDevice ));
TIMER_STOP(1);
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
// READ_DFRT
write_delta<<<(ncol+THREADS-1)/THREADS, THREADS>>>(d_rbuf, d_sig, d_reach, d_frbuf, ncol, d_fsbuf);
// Here we have d_delta updated
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("write_delta");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
out:
MY_CUDA_CHECK( cudaMemcpy(hSFbuf, d_fsbuf, MAX(row_pp,col_bl)*sizeof(*hSFbuf), cudaMemcpyDeviceToHost ));
return ncol;
}
__global__ void scan_col_mono(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf,
const LOCINT *__restrict__ cbuf, LOCINT ncol,
LOCINT *msk, int *lvl, LOCINT* sig, int level,
LOCINT *sbuf, uint32_t *snum) {
// This processes ROWXTH elements together
LOCINT r[ROWXTH];
LOCINT c[ROWXTH]; // Vertex in the current frontier
LOCINT s[ROWXTH]; // Sigma of the vertex in the current frontier
LOCINT m[ROWXTH], q[ROWXTH], i[ROWXTH];
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTH;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, /*(tid<ncol)?tid+1:*/ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= cbuf[i[k]+1]; i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]);
s[k] = (sig[c[k]]);
} //c[k] is the predecessor, s[k] is its sigma
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
m[k] = ((LOCINT)1) << (r[k]%BITS(msk)); // its mask value
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
if ((msk[r[k]/BITS(msk)])&m[k]) //continue;
q[k] = m[k]; // the if below will eval to false...
else
q[k] = atomicOr(msk+r[k]/BITS(msk), m[k]);
if (!(m[k]&q[k])) { // New vertex
uint32_t off = atomicInc(snum, 0xFFFFFFFF); // Offset // Increment + 1
// This is the GLOBAL VERTEX !!
sbuf[off] = r[k]; // Copy the new discovered vertex into the sbuf for sending
lvl[r[k]] = level; // Update level
}
if ((lvl[r[k]]) == -1 || (lvl[r[k]]) == level) { // Order in the OR is important! // Update sigma
// Update sigma
atomicAdd(sig + r[k], s[k]);
}
} // end for over k
return;
}
__global__ void scan_col_mono2(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf,
const LOCINT *__restrict__ cbuf, LOCINT ncol,
LOCINT *msk, int *lvl, LOCINT* sig, int level,
LOCINT *sbuf, uint32_t *snum) {
// This processes ROWXTH elements together
LOCINT r[ROWXTH];
LOCINT c[ROWXTH]; // Vertex in the current frontier
LOCINT m[ROWXTH], q[ROWXTH], i[ROWXTH];
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTH;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, /*(tid<ncol)?tid+1:*/ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= cbuf[i[k]+1]; i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]);
} //c[k] is the predecessor, s[k] is its sigma
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
m[k] = ((LOCINT)1) << (r[k]%BITS(msk)); // its mask value
}
#pragma unroll
for(int k = 0; k < ROWXTH; k++) {
if (tid+k >= nrow) break;
if ((msk[r[k]/BITS(msk)])&m[k]) //continue;
q[k] = m[k]; // the if below will eval to false...
else
q[k] = atomicOr(msk+r[k]/BITS(msk), m[k]);
if (!(m[k]&q[k])) { // New vertex
uint32_t off = atomicInc(snum, 0xFFFFFFFF); // Offset // Increment + 1
// This is the GLOBAL VERTEX !!
sbuf[off] = r[k]; // Copy the new discovered vertex into the sbuf for sending
lvl[r[k]] = level; // Update level
}
const int l = (lvl[r[k]]);
if (l == -1 || l == level) { // Order in the OR is important! // Update sigma
// Update sigma
atomicAdd(sig + r[k], (sig[c[k]]));
}
} // end for over k
return;
}
__global__ void scan_frt_mono(const LOCINT *__restrict__ row, const LOCINT *__restrict__ col, LOCINT nrow,
const LOCINT *__restrict__ rbuf, const LOCINT *__restrict__ cbuf, LOCINT ncol,
const LOCINT *__restrict__ sigma, float *delta,
const int *__restrict__ lvl, int depth) {
// This processes ROWXTH elements together
LOCINT r[ROWXTHD];
LOCINT c[ROWXTHD]; // Vertex in the current frontier
LOCINT i[ROWXTHD];
float a;
const uint32_t tid = (blockDim.x*blockIdx.x + threadIdx.x)*ROWXTHD;
if (tid >= nrow) return;
// Use binary search to calculate predecessor position in the rbuf array
i[0] = bmaxlt(cbuf, ncol, tid);
for(; (i[0]+1 < ncol) && (tid+0) >= cbuf[i[0]+1]; i[0]++); // Here increment i[0]
#pragma unroll
for(int k = 1; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
for(i[k]=i[k-1]; (i[k]+1 < ncol) && (tid+k) >= (cbuf[i[k]+1]); i[k]++); // Here increment i[k]
}
#pragma unroll
for(int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
c[k] = (rbuf[i[k]]); } //c[k] is the vertex in the input buffer
// Here r[k] corresponds to the row and from it I can determine the processor hproc
// col[c[k]] offset in the CSC where neightbour of c[k] starts
// row[col[c[k]] first neightbour
// r[k] this is the visited vertex
#pragma unroll
for(int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
r[k] = row[col[c[k]]+(tid+k)-cbuf[i[k]]]; // new vertex
}
#pragma unroll
for (int k = 0; k < ROWXTHD; k++) {
if (tid+k >= nrow) break;
if (lvl[r[k]] == depth+1) { // this is a successor
// sigma and delta are indexed by row
a = ((delta[r[k]]) + 1)/sigma[r[k]]*sigma[c[k]];
// IN SINGLE DEVICE we multiply a * sigma[c[k]]
// Need to add into the SRbuffer using the same index used to access rbuf
atomicAdd(delta+c[k], a);
}
} // end for over k
return;
}
__global__ void append_row(const LOCINT *__restrict__ row, const LOCINT *__restrict__ row_sig, LOCINT n,
const LOCINT *__restrict__ cbuf, LOCINT np,
LOCINT *msk, const LOCINT * __restrict__ reach, int *lvl,
int level, LOCINT *frt, LOCINT *tmp_sig, LOCINT * frt_sig, uint32_t *all) {
LOCINT r, m, q, s;
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
r = row[tid];
s = row_sig[tid];
m = ((LOCINT)1) << (r%BITS(msk));
if (!(msk[r/BITS(msk)]&m)) { // Check if the vertex was already visited
q = atomicOr(msk+r/BITS(msk), m); // Mark visited
if (!(m&q)) { // Check if the vertex was already visited
uint32_t off = atomicInc(&dnfrt, 0xFFFFFFFF);
frt[off] = r; // Still Global
frt_sig[off] = 0;
lvl[r] = level;
}
}
if (lvl[r] == level || lvl[r] == -1) {
// Update sigma with the value provided
atomicAdd(tmp_sig+r, s);
}
return;
}
// append_sigma<<<(nfrt+THREADS-1)/THREADS, THREADS>>>(d_frt, d_sig, d_frt_sig, d_tmp_sig, nfrt);
__global__ void append_sigma(LOCINT * sbuf, LOCINT * sigma, LOCINT *sbuf_sig, LOCINT * tmp_sig, LOCINT n) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
sbuf_sig[tid] = sbuf_sig[tid] + tmp_sig[sbuf[tid]]; // this is d_sbuf_sig
sigma[sbuf[tid]] = sbuf_sig[tid];
tmp_sig[sbuf[tid]] = 0;
sbuf[tid] = CUDA_MYLOCI2LOCJ(sbuf[tid]); // Row index to Column Index
return;
}
static size_t tot_dev_mem = 0;
static void *CudaMallocSet(size_t size, int val) {
void *ptr;
MY_CUDA_CHECK( cudaMalloc(&ptr, size) );
MY_CUDA_CHECK( cudaMemset(ptr, val, size) );
tot_dev_mem += size;
return ptr;
}
void *CudaMallocHostSet(size_t size, int val) {
void *ptr;
MY_CUDA_CHECK( cudaMallocHost(&ptr, size) );
memset(ptr, val, size);
return ptr;
}
void CudaFreeHost(void *ptr) {
MY_CUDA_CHECK( cudaFreeHost(ptr) );
return;
}
__global__ void set_degree(LOCINT *col, LOCINT *deg, LOCINT n) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n) return;
deg[tid] = col[tid+1] - col[tid];
return;
}
void set_mlp_cuda(LOCINT row, int level, int sigma) {
LOCINT v;
MY_CUDA_CHECK( cudaMemcpy(&v, d_msk+row/BITS(d_msk), sizeof(v), cudaMemcpyDeviceToHost) );
v |= (1ULL<<(row%BITS(d_msk)));
MY_CUDA_CHECK( cudaMemcpy(d_msk+row/BITS(d_msk), &v, sizeof(*d_msk), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_lvl+row, &level, sizeof(level), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_sig+row, &sigma, sizeof(sigma), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_frt, &row, sizeof(row), cudaMemcpyHostToDevice) );
return;
}
__global__ void compact(LOCINT *col, LOCINT *row, LOCINT *deg, LOCINT *msk) {
int n;
LOCINT *v;
int bid = threadIdx.x;
int lid = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
int goff = 0;
int bcount = 0;
__shared__ uint32_t sh_v[32];
v = row + col[blockIdx.x];
n = deg[blockIdx.x];
// sync()s in the loop don't cause stall because
// every warp that cycle has either all threads
// cycling or some cycling and some returned.
for(; bid < n; bid += blockDim.x) {
LOCINT s = v[bid];
uint32_t m;
int t;
m = ((LOCINT)1) << (s%BITS(msk));
t = (msk[s/BITS(msk)]&m) == 0;
m = __ballot(t);
if (lid == wid) sh_v[wid] = __popc(m);
bcount = __syncthreads_count(t);
if (wid == 0) {
uint32_t k;
uint32_t l = sh_v[lid];
for (k=1; k <= 16; k <<= 1) {
// source th is always present so shfl
// never returns the calling th's var
int r = __shfl_up((int)l, k, 32);
if (lid >= k) l += r;
}
sh_v[lid] = l;
}
uint32_t i = __popc(m & ((1<<lid)-1));
__syncthreads();
uint32_t off = (!wid) ? 0 : sh_v[wid-1];
if (t) v[goff + off + i] = s;
goff += bcount;
}
if (threadIdx.x == 0) deg[blockIdx.x] = goff;
return;
}
/*
* scan_frt_csc_cuda(frt, ncol, depth, hRFbuf);
*/
LOCINT scan_frt_csc_cuda_mono(int offset, int ncol, int depth) {
LOCINT i;
int blocks, nrow=0;
float et=0;
TIMER_DEF(1);
LOCINT *d_ncbuf;
TIMER_START(1);
if (!ncol) {
TIMER_STOP(1);
goto out;
}
#ifdef ONEPREFIX
nrow = tlvl[depth+1];
d_ncbuf = d_cbuf_start+offset;
#else
static thrust::device_ptr<LOCINT> d_val(d_cbuf);
// calculate degree for each vertex in frt
read_edge_count<<<(ncol+THREADS-1)/THREADS, THREADS>>>(d_deg, d_frt_start+offset, ncol, d_cbuf);
MY_CUDA_CHECK( cudaMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), cudaMemcpyDeviceToHost) );
nrow = i;
// Prefix sum to count how many threads to launch
thrust::exclusive_scan(d_val, d_val+ncol, d_val);
MY_CUDA_CHECK( cudaMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), cudaMemcpyDeviceToHost) );
nrow += i;
d_ncbuf = d_cbuf;
#endif
if (!nrow) {
TIMER_STOP(1);
goto out;
}
TIMER_STOP(1);
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
blocks = (((nrow+ROWXTHD-1)/ROWXTHD)+THREADS-1)/THREADS;
//dump_device_farray2(d_delta, row_pp, "d_delta");
// Store result directly into d_delta
scan_frt_mono<<<blocks, THREADS>>>(d_row, d_col, nrow, d_frt_start+offset, d_ncbuf, ncol,
d_sig, d_delta, d_lvl, depth);
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("scan_frt");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
out:
return ncol;
}
/**
*/
LOCINT scan_col_csc_cuda_mono(int ncol, int level) {
int blocks;
LOCINT i;
float et=0;
LOCINT nfrt=0, nrow=0;
#ifdef ONEPREFIX
#ifdef THRUST
thrust::device_ptr<LOCINT> d_val(d_cbuf);
#endif
int *d_out = NULL;
#else
#ifdef THRUST
static thrust::device_ptr<LOCINT> d_val(d_cbuf);
#endif
#endif
TIMER_DEF(1);
TIMER_DEF(2);
TIMER_START(1);
MY_CUDA_CHECK( cudaMemset(d_snum, 0, sizeof(*d_snum)) );
read_edge_count<<<(ncol+THREADS-1)/THREADS, THREADS>>>(d_deg, d_frt, ncol, d_cbuf);
MY_CHECK_ERROR("read_edge_count");
MY_CUDA_CHECK( cudaDeviceSynchronize() );
MY_CUDA_CHECK( cudaMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), cudaMemcpyDeviceToHost) );
nrow = i;
// Prefix sum to count how many threads to launch
thrust::exclusive_scan(d_val, d_val+ncol, d_val);
MY_CUDA_CHECK( cudaMemcpy(&i, d_cbuf+ncol-1, sizeof(*d_cbuf), cudaMemcpyDeviceToHost) );
nrow += i;
#ifdef ONEPREFIX
tlvl[level] = nrow;
#endif
if (!nrow) {
TIMER_STOP(1);
goto out;
}
TIMER_STOP(1);
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
blocks = (((nrow+ROWXTH-1)/ROWXTH)+THREADS-1)/THREADS;
scan_col_mono2<<<blocks, THREADS>>>(d_row, d_col, nrow, d_frt, d_cbuf, ncol, d_msk, d_lvl,
d_sig, level, d_frt+ncol, d_snum);
// Here we have d_sbuf updated with the new discovered vertices and d_tmp_sig with the local value of the accumulated sigma
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("scan_col");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
//dump_device_uarray2(d_sig, row_pp, "scan_col d_sig 2");
out:
TIMER_START(2);
// Prepare sbuf to send vertices to other processors (We need to send Sigma as well
// copy d_snum back into CPU
MY_CUDA_CHECK( cudaMemcpy(&nfrt, d_snum, sizeof(nfrt), cudaMemcpyDeviceToHost) );
//dump_device_uarray2(d_frt+ncol, nfrt, "scan_col d_frt 3");
d_frt = d_frt + ncol;
#ifdef ONEPREFIX
d_cbuf = d_cbuf + ncol;
#endif
TIMER_STOP(2);
return nfrt;
}
LOCINT append_rows_cuda(LOCINT *rbuf, LOCINT ld, int *rnum, int np,
LOCINT *frt, LOCINT *frt_sigma, LOCINT nfrt, int level) {
float et=0;
LOCINT nrow=0;
LOCINT p, q;
LOCINT ld2 = ld*2;
TIMER_DEF(1);
TIMER_START(1);
nrow = 0;
for(int i = 0; i < np; i++) {
if (rnum[i]) {
MY_CUDA_CHECK( cudaMemcpy(d_rbuf+nrow, rbuf+i*ld2, rnum[i]*sizeof(*rbuf), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_rbuf_sig+nrow, rbuf+i*ld2+rnum[i], rnum[i]*sizeof(*rbuf), cudaMemcpyHostToDevice) );
nrow += rnum[i];
}
}
if (nrow > 0) {
// MY_CUDA_CHECK( cudaMemcpy(d_rbuf, rbuf, nrow*sizeof(*rbuf), cudaMemcpyHostToDevice) );
// in-place prefix-sum of rnum (too small to bother thrust)
p = rnum[0]; rnum[0] = 0;
for(int i = 1; i < np; i++) {
q = rnum[i];
rnum[i] = p + rnum[i-i];
p = q;
}
MY_CUDA_CHECK( cudaMemcpy(d_cbuf, rnum, np*sizeof(*rnum), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dnfrt, &nfrt, sizeof(dnfrt), 0, cudaMemcpyHostToDevice) );
TIMER_STOP(1);
//dump_device_array2(d_rbuf_sig, nrow, "append d_rbuf_sig");
//dump_device_array2(d_frt, nfrt, "append d_frt 1 ");
//dump_device_array2(d_frt_sig, nfrt, "append d_frt_sig 1 ");
//dump_device_array2(d_tmp_sig, row_pp, "append d_tmp_sig 1");
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
// Here update d_sbuf and d_sig, after that we need to update d_sbuf_sig
// UPDATE DFRT sostituito d_pred con di reach... 7 arg
append_row<<<(nrow+THREADS-1)/THREADS, THREADS>>>(d_rbuf, d_rbuf_sig, nrow, d_cbuf, np,
d_msk, d_reach, d_lvl, level, d_frt, d_tmp_sig, d_frt_sig, d_all);
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("append_row");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
//if (myid == 0) fprintf(stdout, "\tappend_row time = %f + %f\n", TIMER_ELAPSED(1)/1.0E+6, et/1.0E3);
MY_CUDA_CHECK( cudaMemcpyFromSymbol(&nfrt, dnfrt, sizeof(nfrt), 0, cudaMemcpyDeviceToHost) );
}
if (nfrt > 0) {
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
// READ DFRT
append_sigma<<<(nfrt+THREADS-1)/THREADS, THREADS>>>(d_frt, d_sig, d_frt_sig, d_tmp_sig, nfrt);
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("append_sigma");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
// Add new vertices to the frontier
MY_CUDA_CHECK( cudaMemcpy(frt, d_frt, nfrt*sizeof(*d_frt), cudaMemcpyDeviceToHost) );
MY_CUDA_CHECK( cudaMemcpy(frt_sigma, d_frt_sig, nfrt*sizeof(*d_frt_sig), cudaMemcpyDeviceToHost) );
}
//dump_device_array2(d_frt, nfrt, "append d_frt 3");
//dump_device_array2(d_frt_sig, nfrt, "append d_frt_sig 3");
//dump_device_array2(d_tmp_sig, row_pp, "append d_sbuf_sig 3");
//dump_device_array2(d_sig, row_pp, "append d_sig 3");
return nfrt;
}
void get_lvl(int *lvl) {
MY_CUDA_CHECK( cudaMemcpy(lvl+(mycol*row_bl), d_lvl+(mycol*row_bl), row_bl*sizeof(*d_lvl), cudaMemcpyDeviceToHost) );
}
void set_lvl(int *lvl) {
MY_CUDA_CHECK( cudaMemcpy(d_lvl, lvl, row_pp*sizeof(*lvl), cudaMemcpyHostToDevice) );
}
void get_all(LOCINT *all) {
MY_CUDA_CHECK( cudaMemcpy(all, d_all, sizeof(*d_all), cudaMemcpyDeviceToHost) );
// and set to zero for next bc
MY_CUDA_CHECK(cudaMemset(d_all, 0,sizeof(*d_all)));
}
void get_frt(LOCINT *frt) {
MY_CUDA_CHECK( cudaMemcpy(frt, d_frt_start, row_pp*sizeof(LOCINT), cudaMemcpyDeviceToHost) );
}
void get_cbuf(LOCINT *cbuf) {
MY_CUDA_CHECK( cudaMemcpy(cbuf, d_cbuf_start, row_pp*sizeof(LOCINT), cudaMemcpyDeviceToHost) );
}
void get_msk(LOCINT *msk) {
MY_CUDA_CHECK( cudaMemcpy(msk, d_msk, ((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk), cudaMemcpyDeviceToHost) );
}
void get_deg(LOCINT *deg) {
MY_CUDA_CHECK( cudaMemcpy(deg, d_deg, col_bl*sizeof(*d_deg), cudaMemcpyDeviceToHost) );
}
void get_sigma(LOCINT *sigma) {
MY_CUDA_CHECK( cudaMemcpy(sigma+(mycol*row_bl), d_sig+(mycol*row_bl), row_bl*sizeof(*d_sig), cudaMemcpyDeviceToHost) );
}
void get_bc(float *bc) {
MY_CUDA_CHECK( cudaMemcpy(bc, d_bc, row_pp*sizeof(*d_bc), cudaMemcpyDeviceToHost) );
}
void set_sigma(LOCINT *sigma) {
MY_CUDA_CHECK( cudaMemcpy(d_sig, sigma, row_pp*sizeof(*sigma), cudaMemcpyHostToDevice) );
}
__global__ void set_delta(float *srbuf, float * delta, int nrow) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= nrow) return;
delta[tid] += srbuf[tid];
srbuf[tid] = 0;
return;
}
int set_delta_cuda(float *hSRbuf, int nrow) {
float et=0;
TIMER_DEF(1);
TIMER_START(1);
if (!nrow) {
TIMER_STOP(1);
return nrow;
}
//MY_CUDA_CHECK( cudaMemset(d_frbuf, 0, row_pp*sizeof(*d_frbuf)) );
MY_CUDA_CHECK( cudaMemcpy(d_frbuf, hSRbuf , nrow*sizeof(*hSRbuf), cudaMemcpyHostToDevice ));
MY_CUDA_CHECK( cudaEventRecord(start, 0) );
set_delta<<<(nrow+THREADS-1)/THREADS, THREADS>>>(d_frbuf, d_delta, nrow);
MY_CUDA_CHECK( cudaEventRecord(stop, 0) );
MY_CHECK_ERROR("set_delta");
MY_CUDA_CHECK( cudaEventSynchronize(stop) );
MY_CUDA_CHECK( cudaEventElapsedTime(&et, start, stop) );
return nrow;
}
void init_bc_1degree_device(LOCINT *reach) {
//MY_CUDA_CHECK( cudaMemcpy(d_bc, bc_val, row_pp*sizeof(*bc_val), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_reach, reach, row_pp*sizeof(*reach), cudaMemcpyHostToDevice) );
return;
}
__global__ void init_delta(LOCINT *reach, float * delta, int nrow) {
const uint32_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= nrow) return;
delta[tid] = (float)reach[tid];
return;
}
void setcuda(uint64_t ned, LOCINT *col, LOCINT *row, LOCINT reach_v0) {
MY_CUDA_CHECK( cudaMemset(d_lvl,-1, row_pp*sizeof(*d_lvl)) );
MY_CUDA_CHECK( cudaMemset(d_sig, 0, row_pp*sizeof(*d_sig)) );
init_delta<<<(row_pp+THREADS-1)/THREADS, THREADS>>>(d_reach, d_delta, row_pp);
MY_CUDA_CHECK( cudaMemset(d_msk, 0, ((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk)) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(d_reach_v0, &reach_v0, sizeof(d_reach_v0), 0, cudaMemcpyHostToDevice) );
#ifdef ONEPREFIX
memset(tlvl, 0, sizeof(*tlvl)*MAX_LVL);
d_cbuf = d_cbuf_start;
#endif
d_frt = d_frt_start;
return;
}
size_t initcuda(uint64_t ned, LOCINT *col, LOCINT *row) {
int dev;
dev = 0; //FORCED
MY_CUDA_CHECK( cudaSetDevice(dev) );
d_col = (LOCINT *)CudaMallocSet((col_bl+1)*sizeof(*d_col), 0);
d_row = (LOCINT *)CudaMallocSet(ned*sizeof(*d_row), 0);
MY_CUDA_CHECK( cudaMemcpy(d_col, col, (col_bl+1)*sizeof(*col), cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpy(d_row, row, ned*sizeof(*row), cudaMemcpyHostToDevice) );
d_deg = (LOCINT *)CudaMallocSet(col_bl*sizeof(*d_deg), 0);
set_degree<<<(col_bl+THREADS-1)/THREADS, THREADS>>>(d_col, d_deg, col_bl);
if (!mono) { // Run Multi-GPU code
printf("MPI REMOVED\n");
} else {
#ifdef ONEPREFIX
d_cbuf_start = (LOCINT *)CudaMallocSet(MAX(row_bl,row_pp)*sizeof(*d_cbuf_start), 0);
#else
d_cbuf_start = (LOCINT *)CudaMallocSet(MAX(col_bl, C)*sizeof(*d_cbuf_start), 0);
#endif
}
d_sbuf = (LOCINT *)CudaMallocSet(MAX(row_bl,row_pp)*sizeof(*d_sbuf), 0);
d_snum = (uint32_t *)CudaMallocSet((C+1)*sizeof(*d_snum), 0);
d_msk = (LOCINT *)CudaMallocSet(((row_pp+BITS(d_msk)-1)/BITS(d_msk))*sizeof(*d_msk), 0);
d_lvl = (int *)CudaMallocSet(row_pp*sizeof(*d_lvl), -1);
d_all = (uint32_t *)CudaMallocSet(1*sizeof(*d_all), 0);
d_frt = d_sbuf;
d_frt_start = d_sbuf;
d_cbuf = d_cbuf_start;
d_sig = (LOCINT *)CudaMallocSet(row_pp*sizeof(*d_sig), 0);
d_delta = (float *)CudaMallocSet(row_pp*sizeof(*d_delta), 0);
d_bc = (float *)CudaMallocSet(row_pp*sizeof(*d_bc), 0);
d_reach = (LOCINT*)CudaMallocSet(row_pp*sizeof(*d_reach), 0);
printf("ROWBL = %i - ROWPP = %i\n",row_bl,row_pp);
MY_CUDA_CHECK( cudaEventCreate(&start) );
MY_CUDA_CHECK( cudaEventCreate(&stop) );
MY_CUDA_CHECK( cudaStreamCreate(stream+0) );
MY_CUDA_CHECK( cudaStreamCreate(stream+1) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dN, &N, sizeof(dN), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dC, &C, sizeof(dC), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dR, &R, sizeof(dR), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dmyrow, &myrow, sizeof(dmyrow), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dmycol, &mycol, sizeof(dmycol), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(drow_bl, &row_bl, sizeof(drow_bl), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(dcol_bl, &col_bl, sizeof(dcol_bl), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaMemcpyToSymbol(drow_pp, &row_pp, sizeof(drow_pp), 0, cudaMemcpyHostToDevice) );
MY_CUDA_CHECK( cudaFuncSetCacheConfig(read_edge_count, cudaFuncCachePreferL1) );
MY_CUDA_CHECK( cudaFuncSetCacheConfig(update_bc, cudaFuncCachePreferL1) );
MY_CUDA_CHECK( cudaFuncSetCacheConfig(deviceReduceKernel, cudaFuncCachePreferL1) );
if (!mono){
printf("MPI REMOVED\n");
}
else{
// set cache mono
MY_CUDA_CHECK( cudaFuncSetCacheConfig(scan_frt_mono, cudaFuncCachePreferL1) );
MY_CUDA_CHECK( cudaFuncSetCacheConfig(scan_col_mono, cudaFuncCachePreferL1) );
}
return tot_dev_mem;
}
void fincuda() {
MY_CUDA_CHECK( cudaFree(d_col) );
MY_CUDA_CHECK( cudaFree(d_deg) ); //////////////////////
MY_CUDA_CHECK( cudaFree(d_row) );
MY_CUDA_CHECK( cudaFree(d_rbuf) );
MY_CUDA_CHECK( cudaFree(d_cbuf_start) );
MY_CUDA_CHECK( cudaFree(d_sbuf) );
MY_CUDA_CHECK( cudaFree(d_snum) );
MY_CUDA_CHECK( cudaFree(d_msk) );
MY_CUDA_CHECK( cudaFree(d_lvl) );
MY_CUDA_CHECK( cudaFree(d_tmp_sig) );
MY_CUDA_CHECK( cudaFree(d_sig) );
MY_CUDA_CHECK( cudaFree(d_rbuf_sig) );
MY_CUDA_CHECK( cudaFree(d_sbuf_sig) );
MY_CUDA_CHECK( cudaFree(d_frbuf) );
MY_CUDA_CHECK( cudaFree(d_fsbuf) );
MY_CUDA_CHECK( cudaFree(d_delta) );
MY_CUDA_CHECK( cudaFree(d_bc) );
MY_CUDA_CHECK( cudaEventDestroy(start) );
MY_CUDA_CHECK( cudaEventDestroy(stop) );
MY_CUDA_CHECK( cudaStreamDestroy(stream[0]) );
MY_CUDA_CHECK( cudaStreamDestroy(stream[1]) );
return;
}
|
7a63ae3da6e81e06d414344780d6b30b4c917102.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void matTranspose(int *matrixA, int *matrixB, int matSize)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int srcIndex = threadRow*matSize+threadCol;
int dstnIndex = threadCol*matSize+threadRow;
matrixB[dstnIndex] = matrixA[srcIndex];
}
void printMatrix(int *matrix, int size, char * matrixName)
{
if(size > 10)
return;
int i = 0;
printf("Printing Matrix: %s\n", matrixName);
for( ; i < size * size ; i ++)
{
if(i % size == 0)
printf("\n");
printf("%-3d ", matrix[i]);
}
printf("\n\n");
}
void checkError(hipError_t error, char * function)
{
if(error != hipSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, hipGetErrorString(error));
exit(-1);
}
}
bool checkIfMatricesEqual(int * mat1, int * mat2, int matSize)
{
int i = 0;
for( ; i < matSize; i++)
if(mat1[i] != mat2[i]){
printf("values different for i: %d\n", i);
printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]);
return false;
}
return true;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
hipEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int matrixSize = 0;
size_t size; //variable to have the size of arrays on device
int *matA, *matB, *matC, *matCFromGPU; //matrices for host
int *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device
//initialize cuda timing variables
hipEventCreate(&hostStart);
hipEventCreate(&hostStop);
hipEventCreate(&deviceStart);
hipEventCreate(&deviceStop);
printf("Enter the size of the matrix: ");
scanf("%d", &matrixSize);
//calculate the size required on GPU
size = matrixSize * matrixSize * sizeof(int);
matA = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matB = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matC = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
for(int i = 0 ; i < matrixSize * matrixSize; i ++)
matA[i] = (i*2)%10;
printMatrix(matA, matrixSize, "Matrix A");
//printMatrix(matB, matrixSize, "Matrix B");
printf("Transposing matrix on CPU...\n");
hipEventRecord(hostStart, 0);
//matrix multiplication code goes here.
for(int i = 0 ; i < matrixSize ; i ++)
{
for(int j = 0 ; j < matrixSize ; j ++)
{
//printf("Src Indx: %d, Dstn Indx: %d\n", i*matrixSize+j, j*matrixSize+i);
matB[j*matrixSize+i] = matA[i*matrixSize+j];
}
}
hipEventRecord(hostStop, 0);
hipEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix transpose over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);
printMatrix(matB, matrixSize, "Transpose Matrix");
//allocate memory on GPU
checkError(hipMalloc((void**)&gpuMatA, size), "Malloc for Matrix A");
checkError(hipMalloc((void**)&gpuMatB, size), "Malloc for Matrix B");
//checkError(hipMalloc((void**)&gpuMatC, size), "Malloc for Matrix C");
//copy the matrix A and matrix B
checkError(hipMemcpy(gpuMatA, matA, size, hipMemcpyHostToDevice), "Matrix A Copy");
//checkError(hipMemcpy(gpuMatB, matB, size, hipMemcpyHostToDevice), "Matrix B Copy");
bool done = false;
while(!done)
{
matCFromGPU = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Multiplying matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
hipEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
hipLaunchKernelGGL(( matTranspose), dim3(grid), dim3(blocks), 0, 0, gpuMatA, gpuMatB, matrixSize);
hipEventRecord(deviceStop, 0);
hipEventSynchronize(deviceStop);
hipEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
//copy the result back into host memory
checkError(hipMemcpy(matCFromGPU, gpuMatB, size, hipMemcpyDeviceToHost), "Matrix C Copy from device to Host");
if(checkIfMatricesEqual(matB, matCFromGPU, matrixSize))
printf("Kernels correct!\n");
else
printf("Kernel logic wrong!\n");
printf("Finished transposing on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
printMatrix(matCFromGPU, matrixSize, "Transpose Matrix from GPU");
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(matCFromGPU);
}
free(matA);
free(matB);
free(matC);
hipEventDestroy(deviceStart);
hipEventDestroy(deviceStop);
hipEventDestroy(hostStart);
hipEventDestroy(hostStop);
return 0;
}
|
7a63ae3da6e81e06d414344780d6b30b4c917102.cu
|
#include<stdio.h>
#include<stdlib.h>
__global__ void matTranspose(int *matrixA, int *matrixB, int matSize)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int srcIndex = threadRow*matSize+threadCol;
int dstnIndex = threadCol*matSize+threadRow;
matrixB[dstnIndex] = matrixA[srcIndex];
}
void printMatrix(int *matrix, int size, char * matrixName)
{
if(size > 10)
return;
int i = 0;
printf("Printing Matrix: %s\n", matrixName);
for( ; i < size * size ; i ++)
{
if(i % size == 0)
printf("\n");
printf("%-3d ", matrix[i]);
}
printf("\n\n");
}
void checkError(cudaError_t error, char * function)
{
if(error != cudaSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error));
exit(-1);
}
}
bool checkIfMatricesEqual(int * mat1, int * mat2, int matSize)
{
int i = 0;
for( ; i < matSize; i++)
if(mat1[i] != mat2[i]){
printf("values different for i: %d\n", i);
printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]);
return false;
}
return true;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int matrixSize = 0;
size_t size; //variable to have the size of arrays on device
int *matA, *matB, *matC, *matCFromGPU; //matrices for host
int *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device
//initialize cuda timing variables
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
printf("Enter the size of the matrix: ");
scanf("%d", &matrixSize);
//calculate the size required on GPU
size = matrixSize * matrixSize * sizeof(int);
matA = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matB = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matC = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
for(int i = 0 ; i < matrixSize * matrixSize; i ++)
matA[i] = (i*2)%10;
printMatrix(matA, matrixSize, "Matrix A");
//printMatrix(matB, matrixSize, "Matrix B");
printf("Transposing matrix on CPU...\n");
cudaEventRecord(hostStart, 0);
//matrix multiplication code goes here.
for(int i = 0 ; i < matrixSize ; i ++)
{
for(int j = 0 ; j < matrixSize ; j ++)
{
//printf("Src Indx: %d, Dstn Indx: %d\n", i*matrixSize+j, j*matrixSize+i);
matB[j*matrixSize+i] = matA[i*matrixSize+j];
}
}
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix transpose over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);
printMatrix(matB, matrixSize, "Transpose Matrix");
//allocate memory on GPU
checkError(cudaMalloc((void**)&gpuMatA, size), "Malloc for Matrix A");
checkError(cudaMalloc((void**)&gpuMatB, size), "Malloc for Matrix B");
//checkError(cudaMalloc((void**)&gpuMatC, size), "Malloc for Matrix C");
//copy the matrix A and matrix B
checkError(cudaMemcpy(gpuMatA, matA, size, cudaMemcpyHostToDevice), "Matrix A Copy");
//checkError(cudaMemcpy(gpuMatB, matB, size, cudaMemcpyHostToDevice), "Matrix B Copy");
bool done = false;
while(!done)
{
matCFromGPU = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Multiplying matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
cudaEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
matTranspose<<<grid, blocks>>>(gpuMatA, gpuMatB, matrixSize);
cudaEventRecord(deviceStop, 0);
cudaEventSynchronize(deviceStop);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
//copy the result back into host memory
checkError(cudaMemcpy(matCFromGPU, gpuMatB, size, cudaMemcpyDeviceToHost), "Matrix C Copy from device to Host");
if(checkIfMatricesEqual(matB, matCFromGPU, matrixSize))
printf("Kernels correct!\n");
else
printf("Kernel logic wrong!\n");
printf("Finished transposing on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
printMatrix(matCFromGPU, matrixSize, "Transpose Matrix from GPU");
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(matCFromGPU);
}
free(matA);
free(matB);
free(matC);
cudaEventDestroy(deviceStart);
cudaEventDestroy(deviceStop);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
return 0;
}
|
28fa01d34a32d2012579da8562d54d94b5f66bc5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*Purpose:
Sparse multiplication using MKL Library implementation for Intel Xeon Phi 5110P and Intel Xeon E5 2670
Description:
The OpenMP implementation of the sparse matrix - matrix multiplication performs a matrix-matrix operation using the "hipsparseScsrgemm" routine defined as
C := op(A)*op(B);
where:
A, B, C are the sparse matrices in the CSR format (3-array variation);
op(A) is one of op(A) = A, i.e. sparse matrix in this case.
The program computes the sparse matrix - sparse matrix multiplication.
The sparse matrices are stored in CSR(compressed storage row) format.
The output is also a sparse matrix in CSR format.
The program's aim is to record the time taken for 1000 iterations for different input sizes. The time information is recorded in a file called "output.txt".
Modified:
14 April 2015
Author:
Nikhil Pratap Ghanathe
[email protected]
*/
#include<stdio.h>
#include<stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "cusparse_v2.h"
double timerval ()
{
struct timeval st;
gettimeofday(&st, NULL);
return (st.tv_sec+st.tv_usec*1e-6);
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float *nz, *nzc;
int *ia,*ja, *ic,*jc;
float *d_nz, *d_nzc;
int *d_ia,*d_ja, *d_ic,*d_jc;
int i;
double avg_time = 0, s_time, e_time;
//file to write results
FILE *fp1,*fp2,*fp3,*fp4;
int m=4;
int density;
int iterations;
/* iterate the loop for input size from 2exp3 to 2exp10 */
for (iterations=0; iterations<10; iterations++)
{
//int request = 0;
m *= 2; // increase the dimension of Matrix with every iteration
int n = m; // Assuming a square matrix.
if((fp1 = fopen("column.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if((fp3 = fopen("row.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if((fp4 = fopen("nz.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if(iterations==0)
{
fseek(fp1,0,SEEK_SET);
fseek(fp4,0,SEEK_SET);
fseek(fp3,0,SEEK_SET);
}
//memory allocation for matrix A and B
nz = (float *)calloc(m*n,sizeof(float));
ia = (int *)calloc(m*n,sizeof(int));
ja = (int *)calloc(m*n,sizeof(int));
hipMalloc((void**) &d_nz, m*n);
hipMalloc((void**) &d_ia, m*n);
hipMalloc((void**) &d_ja, m*n);
//memory allocation for product matrix C
nzc = (float *)calloc(m*n,sizeof(float));
ic = (int *)calloc(m*n,sizeof(int));
jc = (int *)calloc(m*n,sizeof(int));
hipMalloc((void**) &d_nzc, m*n);
hipMalloc((void**) &d_ic, m*n);
hipMalloc((void**) &d_ic, m*n);
//density of the sparse matrix to be created.
double dense_const = 0.05;
density=(m*n)*(dense_const);
printf("density is %d\n",density);
/*read the matrix data from the files*/
//read column
for(i=0;i<=density;i++)
{
fscanf(fp1,"%d",&ia[i]);
}
//read row ptr
for(i=0;i<=density;i++)
{
fscanf(fp3,"%d",&ja[i]);
}
//read nz values
for(i=0;i<=density;i++)
{
fscanf(fp4,"%f",&nz[i]);
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
/*start computation of sparse matrix * sparse matrix */
int baseC,nnzC;
// nnzTotalDevHostPtr points to host memory
int *nnzTotalDevHostPtr = &nnzC;
hipsparseHandle_t handle=0;
hipsparseStatus_t cusparseStatus;
//descriptor for matrices
hipsparseMatDescr_t descrA=0;
hipsparseMatDescr_t descrC=0;
//handle for cuSparse context
cusparseStatus = hipsparseCreate(&handle);
hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST);
//Host to mem copy of input matrix
hipMemcpy(d_nz, nz, (m*n)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ja, ja, (m+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_ia, ia, (m*n)*sizeof(int), hipMemcpyHostToDevice);
cusparseStatus = hipsparseCreateMatDescr(&descrA);
hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ONE);
cusparseStatus = hipsparseCreateMatDescr(&descrC);
hipsparseSetMatType(descrC,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrC,HIPSPARSE_INDEX_BASE_ONE);
//////////////////////////////////////////////////////////////////////////
hipMalloc((void**)&d_nzc, sizeof(int)*(m*n));
/* extract number of non zero elements of 'C' */
hipsparseXcsrgemmNnz(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
n,
descrA,
density,
d_ja,
d_ia,
descrA,
density,
d_ja,
d_ia,
descrC,
d_jc,
nnzTotalDevHostPtr);
if (NULL != nnzTotalDevHostPtr)
{
nnzC = *nnzTotalDevHostPtr;
}
else
{
hipMemcpy(&nnzC, d_jc+m, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&baseC, d_jc, sizeof(int), hipMemcpyDeviceToHost);
nnzC -= baseC;
}
hipMalloc((void**)&d_ic, sizeof(int)*nnzC);
hipMalloc((void**)&d_nzc, sizeof(float)*nnzC);
s_time = timerval();
/* compute for 1000 times and average out the execution time */
for(i=0;i<1000;i++)
{
hipsparseScsrgemm(
handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
n,
descrA,
density,
d_nz,
d_ja,
d_ia,
descrA,
density,
d_nz,
d_ja,
d_ia,
descrC,
d_nzc,
d_jc,
d_ic);
}
e_time = timerval();
hipMemcpy(nzc, d_nzc, (m*n)*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(jc, d_jc, (m*n)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(ic, ic, (m*n)*sizeof(int), hipMemcpyDeviceToHost);
hipsparseDestroy(handle);
avg_time = (e_time - s_time);
avg_time = avg_time / 1000;
if((fp2 = fopen("output.txt","a"))==NULL)
{
printf("error opening file\n");
}
fprintf (fp2, "\n Input size: %d x %d ,Time: %lf and density is %d \n", m,n, avg_time, density);
fclose(fp1);
hipFree(d_nz);
hipFree(d_ia);
hipFree(d_ja);
hipFree(d_nzc);
hipFree(d_jc);
hipFree(d_ic);
free(ja);
free(ia);
free(nz);
free(jc);
free(ic);
free(nzc);
}
return 0;
}
|
28fa01d34a32d2012579da8562d54d94b5f66bc5.cu
|
/*Purpose:
Sparse multiplication using MKL Library implementation for Intel Xeon Phi 5110P and Intel Xeon E5 2670
Description:
The OpenMP implementation of the sparse matrix - matrix multiplication performs a matrix-matrix operation using the "cusparseScsrgemm" routine defined as
C := op(A)*op(B);
where:
A, B, C are the sparse matrices in the CSR format (3-array variation);
op(A) is one of op(A) = A, i.e. sparse matrix in this case.
The program computes the sparse matrix - sparse matrix multiplication.
The sparse matrices are stored in CSR(compressed storage row) format.
The output is also a sparse matrix in CSR format.
The program's aim is to record the time taken for 1000 iterations for different input sizes. The time information is recorded in a file called "output.txt".
Modified:
14 April 2015
Author:
Nikhil Pratap Ghanathe
[email protected]
*/
#include<stdio.h>
#include<stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "cusparse_v2.h"
double timerval ()
{
struct timeval st;
gettimeofday(&st, NULL);
return (st.tv_sec+st.tv_usec*1e-6);
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float *nz, *nzc;
int *ia,*ja, *ic,*jc;
float *d_nz, *d_nzc;
int *d_ia,*d_ja, *d_ic,*d_jc;
int i;
double avg_time = 0, s_time, e_time;
//file to write results
FILE *fp1,*fp2,*fp3,*fp4;
int m=4;
int density;
int iterations;
/* iterate the loop for input size from 2exp3 to 2exp10 */
for (iterations=0; iterations<10; iterations++)
{
//int request = 0;
m *= 2; // increase the dimension of Matrix with every iteration
int n = m; // Assuming a square matrix.
if((fp1 = fopen("column.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if((fp3 = fopen("row.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if((fp4 = fopen("nz.txt","rw"))==NULL)
{
printf("error opening file\n");
}
if(iterations==0)
{
fseek(fp1,0,SEEK_SET);
fseek(fp4,0,SEEK_SET);
fseek(fp3,0,SEEK_SET);
}
//memory allocation for matrix A and B
nz = (float *)calloc(m*n,sizeof(float));
ia = (int *)calloc(m*n,sizeof(int));
ja = (int *)calloc(m*n,sizeof(int));
cudaMalloc((void**) &d_nz, m*n);
cudaMalloc((void**) &d_ia, m*n);
cudaMalloc((void**) &d_ja, m*n);
//memory allocation for product matrix C
nzc = (float *)calloc(m*n,sizeof(float));
ic = (int *)calloc(m*n,sizeof(int));
jc = (int *)calloc(m*n,sizeof(int));
cudaMalloc((void**) &d_nzc, m*n);
cudaMalloc((void**) &d_ic, m*n);
cudaMalloc((void**) &d_ic, m*n);
//density of the sparse matrix to be created.
double dense_const = 0.05;
density=(m*n)*(dense_const);
printf("density is %d\n",density);
/*read the matrix data from the files*/
//read column
for(i=0;i<=density;i++)
{
fscanf(fp1,"%d",&ia[i]);
}
//read row ptr
for(i=0;i<=density;i++)
{
fscanf(fp3,"%d",&ja[i]);
}
//read nz values
for(i=0;i<=density;i++)
{
fscanf(fp4,"%f",&nz[i]);
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
/*start computation of sparse matrix * sparse matrix */
int baseC,nnzC;
// nnzTotalDevHostPtr points to host memory
int *nnzTotalDevHostPtr = &nnzC;
cusparseHandle_t handle=0;
cusparseStatus_t cusparseStatus;
//descriptor for matrices
cusparseMatDescr_t descrA=0;
cusparseMatDescr_t descrC=0;
//handle for cuSparse context
cusparseStatus = cusparseCreate(&handle);
cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST);
//Host to mem copy of input matrix
cudaMemcpy(d_nz, nz, (m*n)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ja, ja, (m+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ia, ia, (m*n)*sizeof(int), cudaMemcpyHostToDevice);
cusparseStatus = cusparseCreateMatDescr(&descrA);
cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ONE);
cusparseStatus = cusparseCreateMatDescr(&descrC);
cusparseSetMatType(descrC,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrC,CUSPARSE_INDEX_BASE_ONE);
//////////////////////////////////////////////////////////////////////////
cudaMalloc((void**)&d_nzc, sizeof(int)*(m*n));
/* extract number of non zero elements of 'C' */
cusparseXcsrgemmNnz(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
n,
descrA,
density,
d_ja,
d_ia,
descrA,
density,
d_ja,
d_ia,
descrC,
d_jc,
nnzTotalDevHostPtr);
if (NULL != nnzTotalDevHostPtr)
{
nnzC = *nnzTotalDevHostPtr;
}
else
{
cudaMemcpy(&nnzC, d_jc+m, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&baseC, d_jc, sizeof(int), cudaMemcpyDeviceToHost);
nnzC -= baseC;
}
cudaMalloc((void**)&d_ic, sizeof(int)*nnzC);
cudaMalloc((void**)&d_nzc, sizeof(float)*nnzC);
s_time = timerval();
/* compute for 1000 times and average out the execution time */
for(i=0;i<1000;i++)
{
cusparseScsrgemm(
handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
n,
descrA,
density,
d_nz,
d_ja,
d_ia,
descrA,
density,
d_nz,
d_ja,
d_ia,
descrC,
d_nzc,
d_jc,
d_ic);
}
e_time = timerval();
cudaMemcpy(nzc, d_nzc, (m*n)*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(jc, d_jc, (m*n)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(ic, ic, (m*n)*sizeof(int), cudaMemcpyDeviceToHost);
cusparseDestroy(handle);
avg_time = (e_time - s_time);
avg_time = avg_time / 1000;
if((fp2 = fopen("output.txt","a"))==NULL)
{
printf("error opening file\n");
}
fprintf (fp2, "\n Input size: %d x %d ,Time: %lf and density is %d \n", m,n, avg_time, density);
fclose(fp1);
cudaFree(d_nz);
cudaFree(d_ia);
cudaFree(d_ja);
cudaFree(d_nzc);
cudaFree(d_jc);
cudaFree(d_ic);
free(ja);
free(ia);
free(nz);
free(jc);
free(ic);
free(nzc);
}
return 0;
}
|
d172160eb0f694e7cb5f8c61000ed1c067273251.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
//#define COLS 1000
//#define ROWS 1000
#define MAX_TEMP_ERROR 0.01
#define THREADS_PER_BLOCK 128
//double temperature[ROWS+2][COLS+2];
//double temperature_last[ROWS+2][COLS+2];
double temperature[(ROWS+2)*(COLS+2)];
double temperature_last[(ROWS+2)*(COLS+2)];
void initialize();
void track_progress(int iter);
void checkCUDAError(const char*);
__global__ void calcAvg(float device_t[(ROWS+2)*(COLS+2)] ,
float device_t_last[(ROWS+2)*(COLS+2)],
float d_dtmax[ROWS*COLS/THREADS_PER_BLOCK+1]){
__shared__ float block_dt_min[THREADS_PER_BLOCK];
__syncthreads();
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < ROWS * COLS){
int idx_ip = threadIdx.x + 1 + (blockIdx.x * blockDim.x);
int idx_im = threadIdx.x - 1 + (blockIdx.x * blockDim.x);
int idx_jp = threadIdx.x + ((blockIdx.x+1) * blockDim.x);
int idx_jm = threadIdx.x + ((blockIdx.x-1) * blockDim.x);
device_t[idx] = 0.25 * (device_t_last[idx_ip] + device_t_last[idx_im] +
device_t_last[idx_jp] + device_t_last[idx_jm]);
block_dt_min[idx] = (device_t[idx] - device_t_last[idx]);
block_dt_min[idx] = block_dt_min[idx]< 0? -block_dt_min[idx]: block_dt_min[idx];
__syncthreads();
float dt = 0;
if (threadIdx.x == 0){
for (int i = 0 ; i < ROWS*COLS/THREADS_PER_BLOCK+1 ; i ++){
dt = dt > block_dt_min[idx] ? dt : block_dt_min[idx];
if (i == 12) dt = 330.3;
}
d_dtmax[blockIdx.x] = dt;
}
}
}
int main(int argc, char**argv){
int i , j;
int max_iterations = 4000;
int iteration=1;
double dt=100;
struct timeval start_time, stop_time, elapsed_time;
int deviceNum;
hipGetDevice(&deviceNum);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceNum);
printf("Device name: %s\n",prop.name);
//printf("Maximum iterations ?\n");
//scanf("%d" , &max_iterations);
gettimeofday(&start_time, NULL);
printf("iteration %d\n" , iteration);
printf("initialize");
initialize();
printf(" done\n");
// cuda specific init
int threads_per_block = THREADS_PER_BLOCK;
int num_blocks = ROWS*COLS / threads_per_block;
if (num_blocks % threads_per_block !=0)
num_blocks++;
dim3 blocksPerGrid(num_blocks, 1 ,1);
dim3 threadsPerBlock(threads_per_block, 1, 1);
#pragma acc data copyin(temperature, temperature_last)
int N = (ROWS+2)*(COLS+2);
float * device_t, *device_t_last, *device_dt , *block_dt , * host_t;
printf("defined variables\n" );
hipMalloc(&device_t, (ROWS+2)*(COLS*2)*sizeof(float));
printf("cudamalloc device_t\n");
hipMalloc(&device_t_last, (ROWS+2)*(COLS*2)*sizeof(float));
printf("cudamalloc device_t_last\n");
// max reduction
hipMalloc(&device_dt, num_blocks*sizeof(float));
printf("cudamalloc device_dt\n");
// copies the 2D array to a 1D array on GPU implicitly
hipMemcpy(device_t_last, temperature_last, N * sizeof(float),
hipMemcpyHostToDevice);
printf("hipMemcpy device_t_last\n");
block_dt = (float*)malloc(num_blocks * sizeof(float));
printf("malloc block_dt\n");
host_t = (float*)malloc(N * sizeof(float));
printf("iteration %d \n" , 0);
while (dt > MAX_TEMP_ERROR && iteration <= max_iterations){
#pragma acc kernels present(temperature, temperature_last)
{
/*
for (i = 1; i<= ROWS; i++){
for (j = 1; j<= COLS; j++){
temperature[i][j] = 0.25 * (temperature_last[i+1][j] + temperature_last[i-1][j] +
temperature_last[i][j+1] + temperature_last[i][j-1]);
}
}
*/
hipLaunchKernelGGL(( calcAvg), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_t, device_t_last, device_dt);
hipDeviceSynchronize();
//printf("calculated\n" , iteration);
//dt =0.0;
//#pragma acc kernels
/*for (i = 1; i<= ROWS; i++){
for (j = 1; j<= COLS; j++){
dt = fmax( fabs(temperature[i][j]-temperature_last[i][j]), dt);
temperature_last[i][j] = temperature[i][j];
}
}
*/
// update the temperature last
hipMemcpy(device_t_last, device_t, N, hipMemcpyDeviceToDevice);
//printf("updated on device\n" , iteration);
hipMemcpy(block_dt, device_dt, num_blocks, hipMemcpyDeviceToHost);
//dt = 0.;
//printf("copy deltas\n");
for (j = 0 ; j < num_blocks ; j ++){
dt = dt > block_dt[j] ? dt: block_dt[j];
}
}
if ((iteration %100 ) == 0){
#pragma acc update host(temperature[ROWS-5:ROWS])
hipMemcpy(host_t, device_t, N, hipMemcpyDeviceToHost);
//track_progress(iteration);
printf("iteration %d\nhost_t[%d]=%.2f\n",iteration,
(ROWS-1)+(COLS-1)*(ROWS-2), host_t[(ROWS-1)+(COLS-1)*(ROWS-2)]);
printf("current dt %.2f\n", dt);
}
iteration++;
}
#pragma acc data copyout(temperature)
gettimeofday(&stop_time, NULL);
timersub(&stop_time, &start_time, &elapsed_time);
printf("\nMax error at iteration %d was %f\n" , iteration-1, dt);
printf("Total time was %d %f seconds.\n", elapsed_time.tv_sec, ((float)elapsed_time.tv_sec + ((float)elapsed_time.tv_usec/1000000.0f)));
exit(0);
}
void initialize(){
int i,j, idx;
for (i = 0; i<= ROWS; i++){
for (j = 0; j<= COLS; j++){
idx = i + ROWS*j;
//temperature_last[i][j] = 0.0;
temperature_last[idx] = 0.0;
}
}
// boundary condition
for (i = 0; i<= ROWS; i++){
idx = i;
//temperature_last[i][0] = 0.0;
temperature_last[idx] = 0.0;
idx = i + ROWS*(COLS+1);
//temperature_last[i][COLS+1] = (100.0/ROWS)*i;
temperature_last[idx] = (100.0/ROWS)*i;
}
for (j = 0; j<= COLS; j++){
idx = j* ROWS;
//temperature_last[0][j] = 0.0;
temperature_last[idx] = 0.0;
idx = ROWS+1 + ROWS * j;
//temperature_last[ROWS+1][j] = (100.0/COLS)*j;
temperature_last[idx] = (100.0/COLS)*j;
}
}
void track_progress(int iteration){
int i ;
printf("---------- Iteration number: %d -------------\n", iteration);
for (i = ROWS-5; i<= ROWS; i=i+2){
printf("[%d,%d]: %5.2f ", i,i, temperature[i+ROWS*i]);
}
printf("\n");
}
|
d172160eb0f694e7cb5f8c61000ed1c067273251.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
//#define COLS 1000
//#define ROWS 1000
#define MAX_TEMP_ERROR 0.01
#define THREADS_PER_BLOCK 128
//double temperature[ROWS+2][COLS+2];
//double temperature_last[ROWS+2][COLS+2];
double temperature[(ROWS+2)*(COLS+2)];
double temperature_last[(ROWS+2)*(COLS+2)];
void initialize();
void track_progress(int iter);
void checkCUDAError(const char*);
__global__ void calcAvg(float device_t[(ROWS+2)*(COLS+2)] ,
float device_t_last[(ROWS+2)*(COLS+2)],
float d_dtmax[ROWS*COLS/THREADS_PER_BLOCK+1]){
__shared__ float block_dt_min[THREADS_PER_BLOCK];
__syncthreads();
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < ROWS * COLS){
int idx_ip = threadIdx.x + 1 + (blockIdx.x * blockDim.x);
int idx_im = threadIdx.x - 1 + (blockIdx.x * blockDim.x);
int idx_jp = threadIdx.x + ((blockIdx.x+1) * blockDim.x);
int idx_jm = threadIdx.x + ((blockIdx.x-1) * blockDim.x);
device_t[idx] = 0.25 * (device_t_last[idx_ip] + device_t_last[idx_im] +
device_t_last[idx_jp] + device_t_last[idx_jm]);
block_dt_min[idx] = (device_t[idx] - device_t_last[idx]);
block_dt_min[idx] = block_dt_min[idx]< 0? -block_dt_min[idx]: block_dt_min[idx];
__syncthreads();
float dt = 0;
if (threadIdx.x == 0){
for (int i = 0 ; i < ROWS*COLS/THREADS_PER_BLOCK+1 ; i ++){
dt = dt > block_dt_min[idx] ? dt : block_dt_min[idx];
if (i == 12) dt = 330.3;
}
d_dtmax[blockIdx.x] = dt;
}
}
}
int main(int argc, char**argv){
int i , j;
int max_iterations = 4000;
int iteration=1;
double dt=100;
struct timeval start_time, stop_time, elapsed_time;
int deviceNum;
cudaGetDevice(&deviceNum);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceNum);
printf("Device name: %s\n",prop.name);
//printf("Maximum iterations ?\n");
//scanf("%d" , &max_iterations);
gettimeofday(&start_time, NULL);
printf("iteration %d\n" , iteration);
printf("initialize");
initialize();
printf(" done\n");
// cuda specific init
int threads_per_block = THREADS_PER_BLOCK;
int num_blocks = ROWS*COLS / threads_per_block;
if (num_blocks % threads_per_block !=0)
num_blocks++;
dim3 blocksPerGrid(num_blocks, 1 ,1);
dim3 threadsPerBlock(threads_per_block, 1, 1);
#pragma acc data copyin(temperature, temperature_last)
int N = (ROWS+2)*(COLS+2);
float * device_t, *device_t_last, *device_dt , *block_dt , * host_t;
printf("defined variables\n" );
cudaMalloc(&device_t, (ROWS+2)*(COLS*2)*sizeof(float));
printf("cudamalloc device_t\n");
cudaMalloc(&device_t_last, (ROWS+2)*(COLS*2)*sizeof(float));
printf("cudamalloc device_t_last\n");
// max reduction
cudaMalloc(&device_dt, num_blocks*sizeof(float));
printf("cudamalloc device_dt\n");
// copies the 2D array to a 1D array on GPU implicitly
cudaMemcpy(device_t_last, temperature_last, N * sizeof(float),
cudaMemcpyHostToDevice);
printf("cudaMemcpy device_t_last\n");
block_dt = (float*)malloc(num_blocks * sizeof(float));
printf("malloc block_dt\n");
host_t = (float*)malloc(N * sizeof(float));
printf("iteration %d \n" , 0);
while (dt > MAX_TEMP_ERROR && iteration <= max_iterations){
#pragma acc kernels present(temperature, temperature_last)
{
/*
for (i = 1; i<= ROWS; i++){
for (j = 1; j<= COLS; j++){
temperature[i][j] = 0.25 * (temperature_last[i+1][j] + temperature_last[i-1][j] +
temperature_last[i][j+1] + temperature_last[i][j-1]);
}
}
*/
calcAvg<<<blocksPerGrid, threadsPerBlock>>>(device_t, device_t_last, device_dt);
cudaThreadSynchronize();
//printf("calculated\n" , iteration);
//dt =0.0;
//#pragma acc kernels
/*for (i = 1; i<= ROWS; i++){
for (j = 1; j<= COLS; j++){
dt = fmax( fabs(temperature[i][j]-temperature_last[i][j]), dt);
temperature_last[i][j] = temperature[i][j];
}
}
*/
// update the temperature last
cudaMemcpy(device_t_last, device_t, N, cudaMemcpyDeviceToDevice);
//printf("updated on device\n" , iteration);
cudaMemcpy(block_dt, device_dt, num_blocks, cudaMemcpyDeviceToHost);
//dt = 0.;
//printf("copy deltas\n");
for (j = 0 ; j < num_blocks ; j ++){
dt = dt > block_dt[j] ? dt: block_dt[j];
}
}
if ((iteration %100 ) == 0){
#pragma acc update host(temperature[ROWS-5:ROWS])
cudaMemcpy(host_t, device_t, N, cudaMemcpyDeviceToHost);
//track_progress(iteration);
printf("iteration %d\nhost_t[%d]=%.2f\n",iteration,
(ROWS-1)+(COLS-1)*(ROWS-2), host_t[(ROWS-1)+(COLS-1)*(ROWS-2)]);
printf("current dt %.2f\n", dt);
}
iteration++;
}
#pragma acc data copyout(temperature)
gettimeofday(&stop_time, NULL);
timersub(&stop_time, &start_time, &elapsed_time);
printf("\nMax error at iteration %d was %f\n" , iteration-1, dt);
printf("Total time was %d %f seconds.\n", elapsed_time.tv_sec, ((float)elapsed_time.tv_sec + ((float)elapsed_time.tv_usec/1000000.0f)));
exit(0);
}
void initialize(){
int i,j, idx;
for (i = 0; i<= ROWS; i++){
for (j = 0; j<= COLS; j++){
idx = i + ROWS*j;
//temperature_last[i][j] = 0.0;
temperature_last[idx] = 0.0;
}
}
// boundary condition
for (i = 0; i<= ROWS; i++){
idx = i;
//temperature_last[i][0] = 0.0;
temperature_last[idx] = 0.0;
idx = i + ROWS*(COLS+1);
//temperature_last[i][COLS+1] = (100.0/ROWS)*i;
temperature_last[idx] = (100.0/ROWS)*i;
}
for (j = 0; j<= COLS; j++){
idx = j* ROWS;
//temperature_last[0][j] = 0.0;
temperature_last[idx] = 0.0;
idx = ROWS+1 + ROWS * j;
//temperature_last[ROWS+1][j] = (100.0/COLS)*j;
temperature_last[idx] = (100.0/COLS)*j;
}
}
void track_progress(int iteration){
int i ;
printf("---------- Iteration number: %d -------------\n", iteration);
for (i = ROWS-5; i<= ROWS; i=i+2){
printf("[%d,%d]: %5.2f ", i,i, temperature[i+ROWS*i]);
}
printf("\n");
}
|
7e4b2478e17bcb155694c3fa7e4dab10532f44cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <iostream>
#include <memory>
#include <limits>
#include <cassert>
__global__ void
function(float a, int n_per, double step, float temp, double * sum_array){
double sum_part = 0.0;
double x = a + (temp * (threadIdx.x));
for(int i = 0; i < n_per; i++){
if(x != 0.0){
double val = sin(x)/x;
sum_part += val;
}
x += step;
}
sum_array[threadIdx.x] = sum_part;
__syncthreads();
}
int main(int argc, char **argv){
hipError_t rv;
rv = hipDeviceReset();
assert(rv == hipSuccess);
if(argc != 5) {std::cerr<< "Incorrect number of arguments" << std::endl; return EINVAL;};
float a = std::stod(argv[1]);
float b = std::stod(argv[2]);
int n = atoi(argv[3]);
int n_threads = std::stoull(argv[4]);
if(n_threads < 1) {std::cerr << "Incorrect number of arguments" << std::endl; return EINVAL;};
//Here the number of steps per thread is calculated and the size of each subsection is also calculated and set to temp
float temp = std::abs((b-a)) / n_threads;
double step = (b-a)/n;
int n_per = n / n_threads;
//create sum on global mem
double sum = 0.0;
double *sum_array;
rv = hipMalloc(&sum_array, n_threads * sizeof(double));
assert(rv == hipSuccess);
double *sum_temp = (double *)malloc(n_threads * sizeof(double));
for(int i = 0; i < n_threads; i++){
sum_temp[i] = 0.0;
}
hipMemcpy(sum_array, sum_temp, n_threads * sizeof(double), hipMemcpyHostToDevice);
//and have it set to 0
//cuda kernel call
hipLaunchKernelGGL(( function), dim3(1), dim3(n_threads), 0, 0, a, n_per, step, temp, sum_array);
hipMemcpy(sum_temp, sum_array, n_threads*sizeof(double), hipMemcpyDeviceToHost);
for(int i = 0; i < n_threads; i++){
sum += sum_temp[i];
}
//Here the different values of the trapezoidal rule are calculated to give the result as "answer"
double val2 = 0.0;
if(a != 0) val2 = (sin(a)/a);
double val3 = 0.0;
if(b != 0) val3 = (sin(b)/b);
val3 = val3 / 2;
val2 = val2 / 2;
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::max_digits10);
double answer = step * (val2 + sum + val3);
std::cout << answer << std::endl;
rv = hipFree(sum_array);
assert(rv == hipSuccess);
free(sum_temp);
return 0;
}
|
7e4b2478e17bcb155694c3fa7e4dab10532f44cd.cu
|
#include <cuda.h>
#include <cmath>
#include <iostream>
#include <memory>
#include <limits>
#include <cassert>
__global__ void
function(float a, int n_per, double step, float temp, double * sum_array){
double sum_part = 0.0;
double x = a + (temp * (threadIdx.x));
for(int i = 0; i < n_per; i++){
if(x != 0.0){
double val = sin(x)/x;
sum_part += val;
}
x += step;
}
sum_array[threadIdx.x] = sum_part;
__syncthreads();
}
int main(int argc, char **argv){
cudaError_t rv;
rv = cudaDeviceReset();
assert(rv == cudaSuccess);
if(argc != 5) {std::cerr<< "Incorrect number of arguments" << std::endl; return EINVAL;};
float a = std::stod(argv[1]);
float b = std::stod(argv[2]);
int n = atoi(argv[3]);
int n_threads = std::stoull(argv[4]);
if(n_threads < 1) {std::cerr << "Incorrect number of arguments" << std::endl; return EINVAL;};
//Here the number of steps per thread is calculated and the size of each subsection is also calculated and set to temp
float temp = std::abs((b-a)) / n_threads;
double step = (b-a)/n;
int n_per = n / n_threads;
//create sum on global mem
double sum = 0.0;
double *sum_array;
rv = cudaMalloc(&sum_array, n_threads * sizeof(double));
assert(rv == cudaSuccess);
double *sum_temp = (double *)malloc(n_threads * sizeof(double));
for(int i = 0; i < n_threads; i++){
sum_temp[i] = 0.0;
}
cudaMemcpy(sum_array, sum_temp, n_threads * sizeof(double), cudaMemcpyHostToDevice);
//and have it set to 0
//cuda kernel call
function<<<1, n_threads>>>(a, n_per, step, temp, sum_array);
cudaMemcpy(sum_temp, sum_array, n_threads*sizeof(double), cudaMemcpyDeviceToHost);
for(int i = 0; i < n_threads; i++){
sum += sum_temp[i];
}
//Here the different values of the trapezoidal rule are calculated to give the result as "answer"
double val2 = 0.0;
if(a != 0) val2 = (sin(a)/a);
double val3 = 0.0;
if(b != 0) val3 = (sin(b)/b);
val3 = val3 / 2;
val2 = val2 / 2;
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::max_digits10);
double answer = step * (val2 + sum + val3);
std::cout << answer << std::endl;
rv = cudaFree(sum_array);
assert(rv == cudaSuccess);
free(sum_temp);
return 0;
}
|
89eccdf6b6452ec6f13bab350197f3e5f8f17e9b.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Ashbaugh-Hatch pair potential
template hipError_t compute_pair_potential<azplugins::detail::PairEvaluatorAshbaugh>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorAshbaugh::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
89eccdf6b6452ec6f13bab350197f3e5f8f17e9b.cu
|
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Ashbaugh-Hatch pair potential
template cudaError_t compute_pair_potential<azplugins::detail::PairEvaluatorAshbaugh>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorAshbaugh::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
f8d00806427dca71e2725e85f4a9d47adf73684a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "debug.h"
#include "timer.h"
#include "utils_cuda.h"
#include "utils_file.h"
#include "params.h"
//#define REORDER
#define WARP 32
#define NREUSES 100
#define NCUDABLOCKS 1000
int device=0;
__device__ __inline__ float2 Get_W_value(int N, int m){
float2 ctemp;
ctemp.x=cosf( -2.0f*3.141592654f*fdividef( (float) m, (float) N) );
ctemp.y=sinf( -2.0f*3.141592654f*fdividef( (float) m, (float) N) );
return(ctemp);
}
__device__ void do_FFT(float2 *s_input){
float2 A_DFT_value, B_DFT_value;
float2 W;
float2 Aftemp, Bftemp;
int local_id, warp_id;
int j, m_param, parity;
int A_read_index, B_read_index;
int PoT, PoTm1, q;
local_id = threadIdx.x & (WARP - 1);
warp_id = threadIdx.x/WARP;
//-----> FFT
//-->
PoTm1 = (FFT_LENGTH>>1);
PoT = FFT_LENGTH;
for(q=(FFT_EXP-1);q>4;q--){
__syncthreads();
m_param = threadIdx.x & (PoTm1 - 1);
j=threadIdx.x>>q;
W=Get_W_value(PoT, m_param);
A_read_index=j*PoT + m_param;
B_read_index=j*PoT + m_param + PoTm1;
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
__syncthreads();
A_DFT_value=s_input[local_id + warp_id*2*WARP];
B_DFT_value=s_input[local_id + warp_id*2*WARP + WARP];
for(q=4;q>=0;q--){
m_param = (local_id & (PoT - 1));
j = m_param>>q;
parity=(1-j*2);
W = Get_W_value(PoT, j*(m_param-PoTm1));
Aftemp.x = parity*A_DFT_value.x + __shfl_xor(A_DFT_value.x, PoTm1);
Aftemp.y = parity*A_DFT_value.y + __shfl_xor(A_DFT_value.y, PoTm1);
Bftemp.x = parity*B_DFT_value.x + __shfl_xor(B_DFT_value.x, PoTm1);
Bftemp.y = parity*B_DFT_value.y + __shfl_xor(B_DFT_value.y, PoTm1);
A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y;
A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x;
B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y;
B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
s_input[local_id + warp_id*2*WARP] = A_DFT_value;
s_input[local_id + warp_id*2*WARP + WARP] = B_DFT_value;
__syncthreads();
#ifdef REORDER
int load_id, i, n;
load_id = threadIdx.x;
n=threadIdx.x;
for(i=1; i<FFT_EXP; i++) {
n >>= 1;
load_id <<= 1;
load_id |= n & 1;
}
load_id &= FFT_LENGTH-1;
//-----> Scrambling input
__syncthreads();
A_DFT_value=s_input[load_id];
B_DFT_value=s_input[load_id + 1];
__syncthreads();
s_input[threadIdx.x] = A_DFT_value;
s_input[threadIdx.x+FFT_LENGTH/2] = B_DFT_value;
__syncthreads();
#endif
}
__device__ void do_FFT_4way(float2 *s_input){
float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value;
float2 W;
float2 Aftemp, Bftemp, Cftemp, Dftemp;
int local_id, warp_id;
int j, m_param, parity;
int A_read_index, B_read_index, C_read_index, D_read_index;
int PoT, PoTm1, q;
local_id = threadIdx.x & (WARP - 1);
warp_id = threadIdx.x/WARP;
//-----> FFT
//-->
PoTm1 = (FFT_LENGTH>>1);
PoT = FFT_LENGTH;
//Highest iteration
m_param = threadIdx.x;
j=0;
A_read_index = m_param;
B_read_index = m_param + PoTm1;
C_read_index = m_param + (PoTm1>>1);
D_read_index = m_param + 3*(PoTm1>>1);
W=Get_W_value(PoT, m_param);
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
Cftemp = s_input[C_read_index];
Dftemp = s_input[D_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
C_DFT_value.x = Cftemp.x + Dftemp.x;
C_DFT_value.y = Cftemp.y + Dftemp.y;
D_DFT_value.x = W.y*(Cftemp.x - Dftemp.x) + W.x*(Cftemp.y - Dftemp.y);
D_DFT_value.y = W.y*(Cftemp.y - Dftemp.y) - W.x*(Cftemp.x - Dftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
s_input[C_read_index]=C_DFT_value;
s_input[D_read_index]=D_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
for(q=(FFT_EXP-2);q>4;q--){
__syncthreads();
m_param = threadIdx.x & (PoTm1 - 1);
j=threadIdx.x>>q;
W=Get_W_value(PoT, m_param);
A_read_index=j*(PoT<<1) + m_param;
B_read_index=j*(PoT<<1) + m_param + PoTm1;
C_read_index=j*(PoT<<1) + m_param + PoT;
D_read_index=j*(PoT<<1) + m_param + 3*PoTm1;
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
Cftemp = s_input[C_read_index];
Dftemp = s_input[D_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
C_DFT_value.x = Cftemp.x + Dftemp.x;
C_DFT_value.y = Cftemp.y + Dftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
D_DFT_value.x = W.x*(Cftemp.x - Dftemp.x) - W.y*(Cftemp.y - Dftemp.y);
D_DFT_value.y = W.x*(Cftemp.y - Dftemp.y) + W.y*(Cftemp.x - Dftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
s_input[C_read_index]=C_DFT_value;
s_input[D_read_index]=D_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
__syncthreads();
j = local_id + (warp_id<<2)*WARP;
A_DFT_value = s_input[j];
B_DFT_value = s_input[j + WARP];
C_DFT_value = s_input[j + 2*WARP];
D_DFT_value = s_input[j + 3*WARP];
for(q=4;q>=0;q--){
m_param = (local_id & (PoT - 1));
j = m_param>>q;
parity=(1-j*2);
W = Get_W_value(PoT, j*(m_param-PoTm1));
Aftemp.x = parity*A_DFT_value.x + __shfl_xor(A_DFT_value.x, PoTm1);
Aftemp.y = parity*A_DFT_value.y + __shfl_xor(A_DFT_value.y, PoTm1);
Bftemp.x = parity*B_DFT_value.x + __shfl_xor(B_DFT_value.x, PoTm1);
Bftemp.y = parity*B_DFT_value.y + __shfl_xor(B_DFT_value.y, PoTm1);
Cftemp.x = parity*C_DFT_value.x + __shfl_xor(C_DFT_value.x, PoTm1);
Cftemp.y = parity*C_DFT_value.y + __shfl_xor(C_DFT_value.y, PoTm1);
Dftemp.x = parity*D_DFT_value.x + __shfl_xor(D_DFT_value.x, PoTm1);
Dftemp.y = parity*D_DFT_value.y + __shfl_xor(D_DFT_value.y, PoTm1);
A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y;
A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x;
B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y;
B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x;
C_DFT_value.x = W.x*Cftemp.x - W.y*Cftemp.y;
C_DFT_value.y = W.x*Cftemp.y + W.y*Cftemp.x;
D_DFT_value.x = W.x*Dftemp.x - W.y*Dftemp.y;
D_DFT_value.y = W.x*Dftemp.y + W.y*Dftemp.x;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
j = local_id + (warp_id<<2)*WARP;
s_input[j] = A_DFT_value;
s_input[j + WARP] = B_DFT_value;
s_input[j + 2*WARP] = C_DFT_value;
s_input[j + 3*WARP] = D_DFT_value;
__syncthreads();
#ifdef REORDER
__syncthreads();
int A_load_id, B_load_id, i, A_n, B_n;
A_load_id = threadIdx.x;
B_load_id = threadIdx.x + FFT_LENGTH/4;
A_n=threadIdx.x;
B_n=threadIdx.x + FFT_LENGTH/4;
for(i=1; i<FFT_EXP; i++) {
A_n >>= 1;
B_n >>= 1;
A_load_id <<= 1;
A_load_id |= A_n & 1;
B_load_id <<= 1;
B_load_id |= B_n & 1;
}
A_load_id &= FFT_LENGTH-1;
B_load_id &= FFT_LENGTH-1;
//-----> Scrambling input
A_DFT_value=s_input[A_load_id];
B_DFT_value=s_input[A_load_id + 1];
C_DFT_value=s_input[B_load_id];
D_DFT_value=s_input[B_load_id + 1];
__syncthreads();
s_input[threadIdx.x] = A_DFT_value;
s_input[threadIdx.x + FFT_LENGTH/2] = B_DFT_value;
s_input[threadIdx.x + FFT_LENGTH/4] = C_DFT_value;
s_input[threadIdx.x + 3*FFT_LENGTH/4] = D_DFT_value;
__syncthreads();
#endif
}
__global__ void FFT_GPU_external(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2];
__syncthreads();
do_FFT(s_input);
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2]=s_input[threadIdx.x + FFT_LENGTH/2];
}
__global__ void FFT_GPU_external_4way(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + (FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)];
s_input[threadIdx.x + (FFT_LENGTH>>1)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)];
s_input[threadIdx.x + 3*(FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)];
__syncthreads();
do_FFT_4way(s_input);
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)]=s_input[threadIdx.x + (FFT_LENGTH>>2)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)]=s_input[threadIdx.x + (FFT_LENGTH>>1)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)]=s_input[threadIdx.x + 3*(FFT_LENGTH>>2)];
}
__global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2];
__syncthreads();
for(int f=0;f<NREUSES;f++){
do_FFT(s_input);
}
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2]=s_input[threadIdx.x + FFT_LENGTH/2];
}
__global__ void FFT_GPU_multiple_4way(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + (FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)];
s_input[threadIdx.x + (FFT_LENGTH>>1)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)];
s_input[threadIdx.x + 3*(FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)];
__syncthreads();
for(int f=0;f<NREUSES;f++){
do_FFT_4way(s_input);
}
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)]=s_input[threadIdx.x + (FFT_LENGTH>>2)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)]=s_input[threadIdx.x + (FFT_LENGTH>>1)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)]=s_input[threadIdx.x + 3*(FFT_LENGTH>>2)];
}
int Max_columns_in_memory_shared(int nSamples, int nSpectra) {
long int nColumns,maxgrid_x;
size_t free_mem,total_mem;
hipDeviceProp_t devProp;
checkCudaErrors(hipSetDevice(device));
checkCudaErrors(hipGetDeviceProperties(&devProp,device));
maxgrid_x = devProp.maxGridSize[0];
hipMemGetInfo(&free_mem,&total_mem);
nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples);
if(nColumns>maxgrid_x) nColumns=maxgrid_x;
nColumns=(int) nColumns*0.9;
return(nColumns);
}
void FFT_init(){
//---------> Specific nVidia stuff
hipDeviceSetCacheConfig(hipFuncCachePreferEqual);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
}
void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1; //Head size
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/2, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_external), dim3(gridSize), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_external_benchmark_4way(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1; //Head size
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/4, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_external_4way), dim3(gridSize), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(NCUDABLOCKS, 1, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/2, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_multiple), dim3(gridSize_multiple), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark_4way(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(NCUDABLOCKS, 1, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/4, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_multiple_4way), dim3(gridSize_multiple), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
// ***********************************************************************************
// ***********************************************************************************
// ***********************************************************************************
int GPU_FFT(float2 *input, float2 *output, int nSamples, int nSpectra, int nRuns){
//---------> Initial nVidia stuff
int devCount;
hipDeviceProp_t devProp;
size_t free_mem,total_mem;
checkCudaErrors(hipGetDeviceCount(&devCount));
if (DEBUG) {
printf("\nThere are %d devices.", devCount);
for (int i = 0; i < devCount; i++){
checkCudaErrors(hipGetDeviceProperties(&devProp,i));
printf("\n\t Using device:\t\t\t%s\n", devProp.name);
printf("\n\t Max grid size:\t\t\t%d\n", devProp.maxGridSize[1]);
printf("\n\t Shared mem per block:\t\t%d\n", devProp.sharedMemPerBlock);
}
}
checkCudaErrors(hipSetDevice(device));
checkCudaErrors(hipGetDeviceProperties(&devProp,device));
hipMemGetInfo(&free_mem,&total_mem);
if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000));
//---------> Measurements
double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time;
double FFT_external_time_total, FFT_multiple_time_total;
GpuTimer timer; // if set before set device getting errors - invalid handle
//------------------------------------------------------------------------------
//---------> Shared memory kernel
transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0;
FFT_external_time_total=0.0; FFT_multiple_time_total=0.0;
//---------> Spectra
int maxColumns,Sremainder,nRepeats,Spectra_to_allocate;
maxColumns=Max_columns_in_memory_shared(nSamples,nSpectra); // Maximum number of columns which fits into memory
nRepeats=(int) (nSpectra/maxColumns);
Sremainder=nSpectra-nRepeats*maxColumns;
Spectra_to_allocate=Sremainder;
if(nRepeats>0) Spectra_to_allocate=maxColumns;
if(nRepeats>0) {printf("Array is too big. Choose smaller number of FFTs\n"); exit(1);}
if(Spectra_to_allocate>maxColumns) {printf("Remainder is greater then maxColumns");exit(2);}
if (DEBUG) printf("Maximum number of spectra %d which is %e MB \n",maxColumns, (double) (maxColumns*nSamples*sizeof(float)/(1000.0*1000.0)) );
if (DEBUG) printf("nColumns is split into %d chunks of %d spectra and into remainder of %d spectra.\n",nRepeats,maxColumns,Sremainder);
if (DEBUG) printf("Number of columns execute is %d.\n",Sremainder);
//---------> Channels
//if( nSamples%32!=0) {printf("Number of channels must be divisible by 32"); exit(2);}
//---------> Memory allocation
if (DEBUG) printf("Device memory allocation...: \t\t");
int input_size=nSamples*Spectra_to_allocate;
int output_size=nSamples*Spectra_to_allocate;
float2 *d_output;
float2 *d_input;
timer.Start();
checkCudaErrors(hipMalloc((void **) &d_input, sizeof(float2)*input_size));
checkCudaErrors(hipMalloc((void **) &d_output, sizeof(float2)*output_size));
timer.Stop();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> FFT calculation
for (int r = 0; r < nRepeats; r++){
}
if (Sremainder>0){
//-----> Copy chunk of input data to a device
if (DEBUG) printf("Transferring data into device memory...: \t\t");
timer.Start();
checkCudaErrors(hipMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), hipMemcpyHostToDevice));
timer.Stop();
transfer_in+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//-----> Compute FFT on the chunk
if(CUFFT){
//---------> FFT
hipfftHandle plan;
hipfftResult error;
error = hipfftPlan1d(&plan, nSamples, HIPFFT_C2C, Sremainder);
if (HIPFFT_SUCCESS != error){
printf("CUFFT error: %d", error);
}
timer.Start();
hipfftExecC2C(plan, (hipfftComplex *)d_input, (hipfftComplex *)d_output, HIPFFT_FORWARD);
timer.Stop();
cuFFT_time += timer.Elapsed();
hipfftDestroy(plan);
}
//------------------------------> 2way (normal)
if(MULTIPLE){
if (DEBUG) printf("\nApplying MULTIPLE FFT...: \t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), hipMemcpyHostToDevice));
FFT_multiple_benchmark(d_input, d_output, nSamples, Sremainder, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("\nApplying EXTERNAL FFT...: \t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), hipMemcpyHostToDevice));
FFT_external_benchmark(d_input, d_output, nSamples, Sremainder, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//----------------------------------<
//-------------------------> 4way
if(MULTIPLE){
if (DEBUG) printf("\nApplying MULTIPLE FFT 4way...: \t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), hipMemcpyHostToDevice));
FFT_multiple_benchmark_4way(d_input, d_output, nSamples, Sremainder, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("\nApplying EXTERNAL FFT 4way...: \t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), hipMemcpyHostToDevice));
FFT_external_benchmark_4way(d_input, d_output, nSamples, Sremainder, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//-----------------------------------<
//-----> Copy chunk of output data to host
if (DEBUG) printf("Transferring data to host...: \t\t");
timer.Start();
checkCudaErrors(hipMemcpy( &output[nRepeats*output_size], d_output, (Sremainder)*nSamples*sizeof(float2), hipMemcpyDeviceToHost));
timer.Stop();
transfer_out+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
}
//---------> error check -----
checkCudaErrors(hipGetLastError());
//---------> Feeing allocated resources
checkCudaErrors(hipFree(d_input));
checkCudaErrors(hipFree(d_output));
if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms; \n",nSpectra,nSamples,cuFFT_time, FFT_external_time, FFT_multiple_time);
if (WRITE){
char str[200];
sprintf(str,"GPU-polyphase-precisioncontrol.dat");
if (DEBUG) printf("\n Write results into file...\t");
save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out);
if (DEBUG) printf("\t done.\n-------------------------------------\n");
}
}
|
f8d00806427dca71e2725e85f4a9d47adf73684a.cu
|
#include <cufft.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "debug.h"
#include "timer.h"
#include "utils_cuda.h"
#include "utils_file.h"
#include "params.h"
//#define REORDER
#define WARP 32
#define NREUSES 100
#define NCUDABLOCKS 1000
int device=0;
__device__ __inline__ float2 Get_W_value(int N, int m){
float2 ctemp;
ctemp.x=cosf( -2.0f*3.141592654f*fdividef( (float) m, (float) N) );
ctemp.y=sinf( -2.0f*3.141592654f*fdividef( (float) m, (float) N) );
return(ctemp);
}
__device__ void do_FFT(float2 *s_input){
float2 A_DFT_value, B_DFT_value;
float2 W;
float2 Aftemp, Bftemp;
int local_id, warp_id;
int j, m_param, parity;
int A_read_index, B_read_index;
int PoT, PoTm1, q;
local_id = threadIdx.x & (WARP - 1);
warp_id = threadIdx.x/WARP;
//-----> FFT
//-->
PoTm1 = (FFT_LENGTH>>1);
PoT = FFT_LENGTH;
for(q=(FFT_EXP-1);q>4;q--){
__syncthreads();
m_param = threadIdx.x & (PoTm1 - 1);
j=threadIdx.x>>q;
W=Get_W_value(PoT, m_param);
A_read_index=j*PoT + m_param;
B_read_index=j*PoT + m_param + PoTm1;
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
__syncthreads();
A_DFT_value=s_input[local_id + warp_id*2*WARP];
B_DFT_value=s_input[local_id + warp_id*2*WARP + WARP];
for(q=4;q>=0;q--){
m_param = (local_id & (PoT - 1));
j = m_param>>q;
parity=(1-j*2);
W = Get_W_value(PoT, j*(m_param-PoTm1));
Aftemp.x = parity*A_DFT_value.x + __shfl_xor(A_DFT_value.x, PoTm1);
Aftemp.y = parity*A_DFT_value.y + __shfl_xor(A_DFT_value.y, PoTm1);
Bftemp.x = parity*B_DFT_value.x + __shfl_xor(B_DFT_value.x, PoTm1);
Bftemp.y = parity*B_DFT_value.y + __shfl_xor(B_DFT_value.y, PoTm1);
A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y;
A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x;
B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y;
B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
s_input[local_id + warp_id*2*WARP] = A_DFT_value;
s_input[local_id + warp_id*2*WARP + WARP] = B_DFT_value;
__syncthreads();
#ifdef REORDER
int load_id, i, n;
load_id = threadIdx.x;
n=threadIdx.x;
for(i=1; i<FFT_EXP; i++) {
n >>= 1;
load_id <<= 1;
load_id |= n & 1;
}
load_id &= FFT_LENGTH-1;
//-----> Scrambling input
__syncthreads();
A_DFT_value=s_input[load_id];
B_DFT_value=s_input[load_id + 1];
__syncthreads();
s_input[threadIdx.x] = A_DFT_value;
s_input[threadIdx.x+FFT_LENGTH/2] = B_DFT_value;
__syncthreads();
#endif
}
__device__ void do_FFT_4way(float2 *s_input){
float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value;
float2 W;
float2 Aftemp, Bftemp, Cftemp, Dftemp;
int local_id, warp_id;
int j, m_param, parity;
int A_read_index, B_read_index, C_read_index, D_read_index;
int PoT, PoTm1, q;
local_id = threadIdx.x & (WARP - 1);
warp_id = threadIdx.x/WARP;
//-----> FFT
//-->
PoTm1 = (FFT_LENGTH>>1);
PoT = FFT_LENGTH;
//Highest iteration
m_param = threadIdx.x;
j=0;
A_read_index = m_param;
B_read_index = m_param + PoTm1;
C_read_index = m_param + (PoTm1>>1);
D_read_index = m_param + 3*(PoTm1>>1);
W=Get_W_value(PoT, m_param);
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
Cftemp = s_input[C_read_index];
Dftemp = s_input[D_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
C_DFT_value.x = Cftemp.x + Dftemp.x;
C_DFT_value.y = Cftemp.y + Dftemp.y;
D_DFT_value.x = W.y*(Cftemp.x - Dftemp.x) + W.x*(Cftemp.y - Dftemp.y);
D_DFT_value.y = W.y*(Cftemp.y - Dftemp.y) - W.x*(Cftemp.x - Dftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
s_input[C_read_index]=C_DFT_value;
s_input[D_read_index]=D_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
for(q=(FFT_EXP-2);q>4;q--){
__syncthreads();
m_param = threadIdx.x & (PoTm1 - 1);
j=threadIdx.x>>q;
W=Get_W_value(PoT, m_param);
A_read_index=j*(PoT<<1) + m_param;
B_read_index=j*(PoT<<1) + m_param + PoTm1;
C_read_index=j*(PoT<<1) + m_param + PoT;
D_read_index=j*(PoT<<1) + m_param + 3*PoTm1;
Aftemp = s_input[A_read_index];
Bftemp = s_input[B_read_index];
Cftemp = s_input[C_read_index];
Dftemp = s_input[D_read_index];
A_DFT_value.x = Aftemp.x + Bftemp.x;
A_DFT_value.y = Aftemp.y + Bftemp.y;
C_DFT_value.x = Cftemp.x + Dftemp.x;
C_DFT_value.y = Cftemp.y + Dftemp.y;
B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y);
B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x);
D_DFT_value.x = W.x*(Cftemp.x - Dftemp.x) - W.y*(Cftemp.y - Dftemp.y);
D_DFT_value.y = W.x*(Cftemp.y - Dftemp.y) + W.y*(Cftemp.x - Dftemp.x);
s_input[A_read_index]=A_DFT_value;
s_input[B_read_index]=B_DFT_value;
s_input[C_read_index]=C_DFT_value;
s_input[D_read_index]=D_DFT_value;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
__syncthreads();
j = local_id + (warp_id<<2)*WARP;
A_DFT_value = s_input[j];
B_DFT_value = s_input[j + WARP];
C_DFT_value = s_input[j + 2*WARP];
D_DFT_value = s_input[j + 3*WARP];
for(q=4;q>=0;q--){
m_param = (local_id & (PoT - 1));
j = m_param>>q;
parity=(1-j*2);
W = Get_W_value(PoT, j*(m_param-PoTm1));
Aftemp.x = parity*A_DFT_value.x + __shfl_xor(A_DFT_value.x, PoTm1);
Aftemp.y = parity*A_DFT_value.y + __shfl_xor(A_DFT_value.y, PoTm1);
Bftemp.x = parity*B_DFT_value.x + __shfl_xor(B_DFT_value.x, PoTm1);
Bftemp.y = parity*B_DFT_value.y + __shfl_xor(B_DFT_value.y, PoTm1);
Cftemp.x = parity*C_DFT_value.x + __shfl_xor(C_DFT_value.x, PoTm1);
Cftemp.y = parity*C_DFT_value.y + __shfl_xor(C_DFT_value.y, PoTm1);
Dftemp.x = parity*D_DFT_value.x + __shfl_xor(D_DFT_value.x, PoTm1);
Dftemp.y = parity*D_DFT_value.y + __shfl_xor(D_DFT_value.y, PoTm1);
A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y;
A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x;
B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y;
B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x;
C_DFT_value.x = W.x*Cftemp.x - W.y*Cftemp.y;
C_DFT_value.y = W.x*Cftemp.y + W.y*Cftemp.x;
D_DFT_value.x = W.x*Dftemp.x - W.y*Dftemp.y;
D_DFT_value.y = W.x*Dftemp.y + W.y*Dftemp.x;
PoT=PoT>>1;
PoTm1=PoTm1>>1;
}
j = local_id + (warp_id<<2)*WARP;
s_input[j] = A_DFT_value;
s_input[j + WARP] = B_DFT_value;
s_input[j + 2*WARP] = C_DFT_value;
s_input[j + 3*WARP] = D_DFT_value;
__syncthreads();
#ifdef REORDER
__syncthreads();
int A_load_id, B_load_id, i, A_n, B_n;
A_load_id = threadIdx.x;
B_load_id = threadIdx.x + FFT_LENGTH/4;
A_n=threadIdx.x;
B_n=threadIdx.x + FFT_LENGTH/4;
for(i=1; i<FFT_EXP; i++) {
A_n >>= 1;
B_n >>= 1;
A_load_id <<= 1;
A_load_id |= A_n & 1;
B_load_id <<= 1;
B_load_id |= B_n & 1;
}
A_load_id &= FFT_LENGTH-1;
B_load_id &= FFT_LENGTH-1;
//-----> Scrambling input
A_DFT_value=s_input[A_load_id];
B_DFT_value=s_input[A_load_id + 1];
C_DFT_value=s_input[B_load_id];
D_DFT_value=s_input[B_load_id + 1];
__syncthreads();
s_input[threadIdx.x] = A_DFT_value;
s_input[threadIdx.x + FFT_LENGTH/2] = B_DFT_value;
s_input[threadIdx.x + FFT_LENGTH/4] = C_DFT_value;
s_input[threadIdx.x + 3*FFT_LENGTH/4] = D_DFT_value;
__syncthreads();
#endif
}
__global__ void FFT_GPU_external(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2];
__syncthreads();
do_FFT(s_input);
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2]=s_input[threadIdx.x + FFT_LENGTH/2];
}
__global__ void FFT_GPU_external_4way(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + (FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)];
s_input[threadIdx.x + (FFT_LENGTH>>1)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)];
s_input[threadIdx.x + 3*(FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)];
__syncthreads();
do_FFT_4way(s_input);
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)]=s_input[threadIdx.x + (FFT_LENGTH>>2)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)]=s_input[threadIdx.x + (FFT_LENGTH>>1)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)]=s_input[threadIdx.x + 3*(FFT_LENGTH>>2)];
}
__global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2];
__syncthreads();
for(int f=0;f<NREUSES;f++){
do_FFT(s_input);
}
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + FFT_LENGTH/2]=s_input[threadIdx.x + FFT_LENGTH/2];
}
__global__ void FFT_GPU_multiple_4way(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH];
s_input[threadIdx.x + (FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)];
s_input[threadIdx.x + (FFT_LENGTH>>1)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)];
s_input[threadIdx.x + 3*(FFT_LENGTH>>2)]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)];
__syncthreads();
for(int f=0;f<NREUSES;f++){
do_FFT_4way(s_input);
}
__syncthreads();
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>2)]=s_input[threadIdx.x + (FFT_LENGTH>>2)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + (FFT_LENGTH>>1)]=s_input[threadIdx.x + (FFT_LENGTH>>1)];
d_output[threadIdx.x + blockIdx.x*FFT_LENGTH + 3*(FFT_LENGTH>>2)]=s_input[threadIdx.x + 3*(FFT_LENGTH>>2)];
}
int Max_columns_in_memory_shared(int nSamples, int nSpectra) {
long int nColumns,maxgrid_x;
size_t free_mem,total_mem;
cudaDeviceProp devProp;
checkCudaErrors(cudaSetDevice(device));
checkCudaErrors(cudaGetDeviceProperties(&devProp,device));
maxgrid_x = devProp.maxGridSize[0];
cudaMemGetInfo(&free_mem,&total_mem);
nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples);
if(nColumns>maxgrid_x) nColumns=maxgrid_x;
nColumns=(int) nColumns*0.9;
return(nColumns);
}
void FFT_init(){
//---------> Specific nVidia stuff
cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
}
void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1; //Head size
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/2, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
FFT_GPU_external<<<gridSize, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_external_benchmark_4way(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1; //Head size
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/4, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
FFT_GPU_external_4way<<<gridSize, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(NCUDABLOCKS, 1, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/2, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
FFT_GPU_multiple<<<gridSize_multiple, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark_4way(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(NCUDABLOCKS, 1, 1); //nCUDAblocks_y goes through spectra
dim3 blockSize(nSamples/4, 1, 1); //nCUDAblocks_x goes through channels
//---------> FIR filter part
timer.Start();
FFT_GPU_multiple_4way<<<gridSize_multiple, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
// ***********************************************************************************
// ***********************************************************************************
// ***********************************************************************************
int GPU_FFT(float2 *input, float2 *output, int nSamples, int nSpectra, int nRuns){
//---------> Initial nVidia stuff
int devCount;
cudaDeviceProp devProp;
size_t free_mem,total_mem;
checkCudaErrors(cudaGetDeviceCount(&devCount));
if (DEBUG) {
printf("\nThere are %d devices.", devCount);
for (int i = 0; i < devCount; i++){
checkCudaErrors(cudaGetDeviceProperties(&devProp,i));
printf("\n\t Using device:\t\t\t%s\n", devProp.name);
printf("\n\t Max grid size:\t\t\t%d\n", devProp.maxGridSize[1]);
printf("\n\t Shared mem per block:\t\t%d\n", devProp.sharedMemPerBlock);
}
}
checkCudaErrors(cudaSetDevice(device));
checkCudaErrors(cudaGetDeviceProperties(&devProp,device));
cudaMemGetInfo(&free_mem,&total_mem);
if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000));
//---------> Measurements
double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time;
double FFT_external_time_total, FFT_multiple_time_total;
GpuTimer timer; // if set before set device getting errors - invalid handle
//------------------------------------------------------------------------------
//---------> Shared memory kernel
transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0;
FFT_external_time_total=0.0; FFT_multiple_time_total=0.0;
//---------> Spectra
int maxColumns,Sremainder,nRepeats,Spectra_to_allocate;
maxColumns=Max_columns_in_memory_shared(nSamples,nSpectra); // Maximum number of columns which fits into memory
nRepeats=(int) (nSpectra/maxColumns);
Sremainder=nSpectra-nRepeats*maxColumns;
Spectra_to_allocate=Sremainder;
if(nRepeats>0) Spectra_to_allocate=maxColumns;
if(nRepeats>0) {printf("Array is too big. Choose smaller number of FFTs\n"); exit(1);}
if(Spectra_to_allocate>maxColumns) {printf("Remainder is greater then maxColumns");exit(2);}
if (DEBUG) printf("Maximum number of spectra %d which is %e MB \n",maxColumns, (double) (maxColumns*nSamples*sizeof(float)/(1000.0*1000.0)) );
if (DEBUG) printf("nColumns is split into %d chunks of %d spectra and into remainder of %d spectra.\n",nRepeats,maxColumns,Sremainder);
if (DEBUG) printf("Number of columns execute is %d.\n",Sremainder);
//---------> Channels
//if( nSamples%32!=0) {printf("Number of channels must be divisible by 32"); exit(2);}
//---------> Memory allocation
if (DEBUG) printf("Device memory allocation...: \t\t");
int input_size=nSamples*Spectra_to_allocate;
int output_size=nSamples*Spectra_to_allocate;
float2 *d_output;
float2 *d_input;
timer.Start();
checkCudaErrors(cudaMalloc((void **) &d_input, sizeof(float2)*input_size));
checkCudaErrors(cudaMalloc((void **) &d_output, sizeof(float2)*output_size));
timer.Stop();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> FFT calculation
for (int r = 0; r < nRepeats; r++){
}
if (Sremainder>0){
//-----> Copy chunk of input data to a device
if (DEBUG) printf("Transferring data into device memory...: \t\t");
timer.Start();
checkCudaErrors(cudaMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), cudaMemcpyHostToDevice));
timer.Stop();
transfer_in+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//-----> Compute FFT on the chunk
if(CUFFT){
//---------> FFT
cufftHandle plan;
cufftResult error;
error = cufftPlan1d(&plan, nSamples, CUFFT_C2C, Sremainder);
if (CUFFT_SUCCESS != error){
printf("CUFFT error: %d", error);
}
timer.Start();
cufftExecC2C(plan, (cufftComplex *)d_input, (cufftComplex *)d_output, CUFFT_FORWARD);
timer.Stop();
cuFFT_time += timer.Elapsed();
cufftDestroy(plan);
}
//------------------------------> 2way (normal)
if(MULTIPLE){
if (DEBUG) printf("\nApplying MULTIPLE FFT...: \t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), cudaMemcpyHostToDevice));
FFT_multiple_benchmark(d_input, d_output, nSamples, Sremainder, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("\nApplying EXTERNAL FFT...: \t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), cudaMemcpyHostToDevice));
FFT_external_benchmark(d_input, d_output, nSamples, Sremainder, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//----------------------------------<
//-------------------------> 4way
if(MULTIPLE){
if (DEBUG) printf("\nApplying MULTIPLE FFT 4way...: \t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), cudaMemcpyHostToDevice));
FFT_multiple_benchmark_4way(d_input, d_output, nSamples, Sremainder, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("\nApplying EXTERNAL FFT 4way...: \t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, &input[nRepeats*output_size], (Sremainder)*nSamples*sizeof(float2), cudaMemcpyHostToDevice));
FFT_external_benchmark_4way(d_input, d_output, nSamples, Sremainder, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//-----------------------------------<
//-----> Copy chunk of output data to host
if (DEBUG) printf("Transferring data to host...: \t\t");
timer.Start();
checkCudaErrors(cudaMemcpy( &output[nRepeats*output_size], d_output, (Sremainder)*nSamples*sizeof(float2), cudaMemcpyDeviceToHost));
timer.Stop();
transfer_out+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
}
//---------> error check -----
checkCudaErrors(cudaGetLastError());
//---------> Feeing allocated resources
checkCudaErrors(cudaFree(d_input));
checkCudaErrors(cudaFree(d_output));
if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms; \n",nSpectra,nSamples,cuFFT_time, FFT_external_time, FFT_multiple_time);
if (WRITE){
char str[200];
sprintf(str,"GPU-polyphase-precisioncontrol.dat");
if (DEBUG) printf("\n Write results into file...\t");
save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out);
if (DEBUG) printf("\t done.\n-------------------------------------\n");
}
}
|
63d6c6934ff3fdff4920e1d8ce3173eeaf63d08e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transform.h>
__device__ double op(double d1,double *params) {
return log(d1);
}
extern "C"
__global__ void log_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
}
|
63d6c6934ff3fdff4920e1d8ce3173eeaf63d08e.cu
|
#include <transform.h>
__device__ double op(double d1,double *params) {
return log(d1);
}
extern "C"
__global__ void log_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
}
|
113ba4f7aded7a55877d3348dde9616b47b478f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "complex_mul.h"
#include "complex_mul_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__device__ __inline__ void _ComplexMul(T a0, T a1, T b0, T b1, T* output_data, bool is_conj) {
if (is_conj) {
T out_real = a0 * b0 + a1 * b1;
T out_imag = a1 * b0 - a0 * b1;
output_data[0] = out_real;
output_data[1] = out_imag;
} else {
T out_real = a0 * b0 - a1 * b1;
T out_imag = a0 * b1 + a1 * b0;
output_data[0] = out_real;
output_data[1] = out_imag;
}
};
// broadcast by computing output coordinate from offset, using fast_divmod
template <typename T, bool lhs_need_compute, bool rhs_need_compute, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _ElementWiseWithStrideTwo(
int32_t output_rank,
const TArray<int64_t> lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t> rhs_padded_strides,
const T* rhs_data,
const TArray<fast_divmod> fdm_output_strides,
T* output_data,
CUDA_LONG N,
int64_t lhs_size,
int64_t rhs_size,
bool is_conj) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
T a[NumElementsPerThread];
T b[NumElementsPerThread];
T c[NumElementsPerThread];
T d[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N / 2) {
CUDA_LONG lhs_index = (lhs_need_compute ? 0 : id);
CUDA_LONG rhs_index = (rhs_need_compute ? 0 : id);
// compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
CUDA_LONG offset = id;
#pragma unroll
for (auto dim = 0; dim < fdm_output_strides.Capacity(); dim++) {
if (dim >= output_rank) {
break;
}
int q, r;
fdm_output_strides[dim].divmod(offset, q, r);
if (lhs_need_compute) {
lhs_index += static_cast<int>(lhs_padded_strides[dim]) * q;
}
if (rhs_need_compute) {
rhs_index += static_cast<int>(rhs_padded_strides[dim]) * q;
}
offset = r;
}
a[i] = lhs_data[(2 * lhs_index) % lhs_size];
b[i] = lhs_data[(2 * lhs_index + 1) % lhs_size];
c[i] = rhs_data[(2 * rhs_index) % rhs_size];
d[i] = rhs_data[(2 * rhs_index + 1) % rhs_size];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N / 2) {
_ComplexMul(a[i], b[i], c[i], d[i], &output_data[2 * id], is_conj);
id += NumThreadsPerBlock;
}
}
};
template <typename T>
void ComplexMul_Impl(
int32_t output_rank_or_simple_broadcast,
const TArray<int64_t>* lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t>* rhs_padded_strides,
const T* rhs_data,
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides,
const onnxruntime::cuda::fast_divmod& fdm_H,
const onnxruntime::cuda::fast_divmod& fdm_C,
T* output_data,
int64_t count,
int64_t lhs_size,
int64_t rhs_size,
bool is_conj) {
if (count == 0) // special case where there's a dim value of 0 in the output shape
return;
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
if (lhs_padded_strides && rhs_padded_strides && lhs_padded_strides->Size() && rhs_padded_strides->Size())
hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, true, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
else if (lhs_padded_strides && lhs_padded_strides->Size())
hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, true, false, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
else
hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, false, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
};
#define SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(T) \
template void ComplexMul_Impl<T>( \
int32_t output_rank_or_simple_broadcast, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \
const onnxruntime::cuda::fast_divmod& fdm_H, \
const onnxruntime::cuda::fast_divmod& fdm_C, \
T* output_data, \
int64_t count, \
int64_t lhs_size, \
int64_t rhs_size, \
bool is_conj);
SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(float)
SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(half)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
113ba4f7aded7a55877d3348dde9616b47b478f4.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "complex_mul.h"
#include "complex_mul_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__device__ __inline__ void _ComplexMul(T a0, T a1, T b0, T b1, T* output_data, bool is_conj) {
if (is_conj) {
T out_real = a0 * b0 + a1 * b1;
T out_imag = a1 * b0 - a0 * b1;
output_data[0] = out_real;
output_data[1] = out_imag;
} else {
T out_real = a0 * b0 - a1 * b1;
T out_imag = a0 * b1 + a1 * b0;
output_data[0] = out_real;
output_data[1] = out_imag;
}
};
// broadcast by computing output coordinate from offset, using fast_divmod
template <typename T, bool lhs_need_compute, bool rhs_need_compute, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _ElementWiseWithStrideTwo(
int32_t output_rank,
const TArray<int64_t> lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t> rhs_padded_strides,
const T* rhs_data,
const TArray<fast_divmod> fdm_output_strides,
T* output_data,
CUDA_LONG N,
int64_t lhs_size,
int64_t rhs_size,
bool is_conj) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
T a[NumElementsPerThread];
T b[NumElementsPerThread];
T c[NumElementsPerThread];
T d[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N / 2) {
CUDA_LONG lhs_index = (lhs_need_compute ? 0 : id);
CUDA_LONG rhs_index = (rhs_need_compute ? 0 : id);
// compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
CUDA_LONG offset = id;
#pragma unroll
for (auto dim = 0; dim < fdm_output_strides.Capacity(); dim++) {
if (dim >= output_rank) {
break;
}
int q, r;
fdm_output_strides[dim].divmod(offset, q, r);
if (lhs_need_compute) {
lhs_index += static_cast<int>(lhs_padded_strides[dim]) * q;
}
if (rhs_need_compute) {
rhs_index += static_cast<int>(rhs_padded_strides[dim]) * q;
}
offset = r;
}
a[i] = lhs_data[(2 * lhs_index) % lhs_size];
b[i] = lhs_data[(2 * lhs_index + 1) % lhs_size];
c[i] = rhs_data[(2 * rhs_index) % rhs_size];
d[i] = rhs_data[(2 * rhs_index + 1) % rhs_size];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N / 2) {
_ComplexMul(a[i], b[i], c[i], d[i], &output_data[2 * id], is_conj);
id += NumThreadsPerBlock;
}
}
};
template <typename T>
void ComplexMul_Impl(
int32_t output_rank_or_simple_broadcast,
const TArray<int64_t>* lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t>* rhs_padded_strides,
const T* rhs_data,
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides,
const onnxruntime::cuda::fast_divmod& fdm_H,
const onnxruntime::cuda::fast_divmod& fdm_C,
T* output_data,
int64_t count,
int64_t lhs_size,
int64_t rhs_size,
bool is_conj) {
if (count == 0) // special case where there's a dim value of 0 in the output shape
return;
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
if (lhs_padded_strides && rhs_padded_strides && lhs_padded_strides->Size() && rhs_padded_strides->Size())
_ElementWiseWithStrideTwo<T, true, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
else if (lhs_padded_strides && lhs_padded_strides->Size())
_ElementWiseWithStrideTwo<T, true, false, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
else
_ElementWiseWithStrideTwo<T, false, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_rank_or_simple_broadcast,
*lhs_padded_strides,
lhs_data,
*rhs_padded_strides,
rhs_data,
*fdm_output_strides,
output_data,
N,
lhs_size,
rhs_size,
is_conj);
};
#define SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(T) \
template void ComplexMul_Impl<T>( \
int32_t output_rank_or_simple_broadcast, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \
const onnxruntime::cuda::fast_divmod& fdm_H, \
const onnxruntime::cuda::fast_divmod& fdm_C, \
T* output_data, \
int64_t count, \
int64_t lhs_size, \
int64_t rhs_size, \
bool is_conj);
SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(float)
SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(half)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
98fbe771528fb8fefc0be77201a5738901ac7fe3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
namespace cudf {
namespace detail {
struct nan_dispatcher {
template <typename T, typename Predicate>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto input_device_view = column_device_view::create(input);
if (input.has_nulls()) {
auto input_pair_iterator = make_pair_iterator<T, true>(*input_device_view);
return true_if(
input_pair_iterator, input_pair_iterator + input.size(), input.size(), predicate, mr);
} else {
auto input_pair_iterator = make_pair_iterator<T, false>(*input_device_view);
return true_if(
input_pair_iterator, input_pair_iterator + input.size(), input.size(), predicate, mr);
}
}
template <typename T, typename Predicate>
std::enable_if_t<!std::is_floating_point<T>::value, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("NAN is not supported in a Non-floating point type column");
}
};
std::unique_ptr<column> is_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto predicate = [] __device__(auto element_validity_pair) {
return element_validity_pair.second and std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, mr, stream);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto predicate = [] __device__(auto element_validity_pair) {
return !element_validity_pair.second or !std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, mr, stream);
}
} // namespace detail
std::unique_ptr<column> is_nan(cudf::column_view const& input, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_nan(input, mr);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_not_nan(input, mr);
}
} // namespace cudf
|
98fbe771528fb8fefc0be77201a5738901ac7fe3.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
namespace cudf {
namespace detail {
struct nan_dispatcher {
template <typename T, typename Predicate>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto input_device_view = column_device_view::create(input);
if (input.has_nulls()) {
auto input_pair_iterator = make_pair_iterator<T, true>(*input_device_view);
return true_if(
input_pair_iterator, input_pair_iterator + input.size(), input.size(), predicate, mr);
} else {
auto input_pair_iterator = make_pair_iterator<T, false>(*input_device_view);
return true_if(
input_pair_iterator, input_pair_iterator + input.size(), input.size(), predicate, mr);
}
}
template <typename T, typename Predicate>
std::enable_if_t<!std::is_floating_point<T>::value, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("NAN is not supported in a Non-floating point type column");
}
};
std::unique_ptr<column> is_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto predicate = [] __device__(auto element_validity_pair) {
return element_validity_pair.second and std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, mr, stream);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto predicate = [] __device__(auto element_validity_pair) {
return !element_validity_pair.second or !std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, mr, stream);
}
} // namespace detail
std::unique_ptr<column> is_nan(cudf::column_view const& input, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_nan(input, mr);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_not_nan(input, mr);
}
} // namespace cudf
|
d81258566de8a821ad0c7778cc7f7b2f2aa303c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/scalar/scalar.hpp>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
#define wrapper cudf::test::fixed_width_column_wrapper
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// output
std::unique_ptr<cudf::column> out =
cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_view = cudf::column_device_view::create(lhs);
auto rhs_view = cudf::column_device_view::create(rhs);
auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view);
auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
hipLaunchKernelGGL(( cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>)
, dim3(1), dim3(32), 0, stream, lhs_iter, rhs_iter, filter, *out_dv, nullptr);
return out;
}
template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("Unexpected test execution");
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__(cudf::size_type i) {
return bool_mask_device.element<bool>(i);
};
return cudf::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::mr::get_current_device_resource(),
(hipStream_t)0);
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
lhs_v);
bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6},
rhs_v);
bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
exp_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{};
wrapper<T> lhs_w{};
wrapper<T> rhs_w{};
wrapper<T> expected_w{};
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
// mask length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
// column length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
}
template <typename T>
struct CopyTestNumeric : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes);
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6});
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
bool mask_v[] = {1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v);
const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5});
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v);
cudf::numeric_scalar<T> rhs_w(6);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6});
wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
cudf::numeric_scalar<T> rhs_w(6, false);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
template <typename T>
struct create_chrono_scalar {
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value,
cudf::timestamp_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::timestamp_scalar<T>(std::forward<Args>(args)...);
}
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value,
cudf::duration_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::duration_scalar<T>(std::forward<Args>(args)...);
}
};
template <typename T>
struct CopyTestChrono : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes);
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct CopyTestUntyped : public cudf::test::BaseFixture {
};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<float> lhs_w{5, 5, 5, 5};
wrapper<int32_t> rhs_w{6, 6, 6, 6};
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", ""};
cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids);
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 1, 0, 1, 0, 1};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) {
if (mask[idx] and mask_v[idx])
h_expected.push_back(h_strings1[idx]);
else
h_expected.push_back(h_strings2[idx]);
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 0, 1, 0, 1, 0};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx] and mask_v[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_strings2[idx]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {0, 1, 1, 1, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6);
auto results = cudf::copy_if_else(strings2, strings1, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_strings2[idx]);
} else {
h_expected.push_back(h_string1[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar string1{h_string1[0]};
std::vector<const char*> h_string2{"aaa"};
cudf::string_scalar string2{h_string2[0], false};
constexpr cudf::size_type mask_size = 6;
bool mask[] = {1, 0, 1, 0, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size);
auto results = cudf::copy_if_else(string1, string2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_string2[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
|
d81258566de8a821ad0c7778cc7f7b2f2aa303c2.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/scalar/scalar.hpp>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
#define wrapper cudf::test::fixed_width_column_wrapper
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// output
std::unique_ptr<cudf::column> out =
cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_view = cudf::column_device_view::create(lhs);
auto rhs_view = cudf::column_device_view::create(rhs);
auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view);
auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>
<<<1, 32, 0, stream>>>(lhs_iter, rhs_iter, filter, *out_dv, nullptr);
return out;
}
template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("Unexpected test execution");
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__(cudf::size_type i) {
return bool_mask_device.element<bool>(i);
};
return cudf::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::mr::get_current_device_resource(),
(cudaStream_t)0);
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
lhs_v);
bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6},
rhs_v);
bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
exp_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{};
wrapper<T> lhs_w{};
wrapper<T> rhs_w{};
wrapper<T> expected_w{};
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
// mask length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
// column length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
}
template <typename T>
struct CopyTestNumeric : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes);
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6});
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
bool mask_v[] = {1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v);
const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5});
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v);
cudf::numeric_scalar<T> rhs_w(6);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6});
wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
cudf::numeric_scalar<T> rhs_w(6, false);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
template <typename T>
struct create_chrono_scalar {
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value,
cudf::timestamp_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::timestamp_scalar<T>(std::forward<Args>(args)...);
}
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value,
cudf::duration_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::duration_scalar<T>(std::forward<Args>(args)...);
}
};
template <typename T>
struct CopyTestChrono : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes);
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct CopyTestUntyped : public cudf::test::BaseFixture {
};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<float> lhs_w{5, 5, 5, 5};
wrapper<int32_t> rhs_w{6, 6, 6, 6};
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", "ééé"};
cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids);
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 1, 0, 1, 0, 1};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) {
if (mask[idx] and mask_v[idx])
h_expected.push_back(h_strings1[idx]);
else
h_expected.push_back(h_strings2[idx]);
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 0, 1, 0, 1, 0};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx] and mask_v[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_strings2[idx]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {0, 1, 1, 1, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6);
auto results = cudf::copy_if_else(strings2, strings1, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_strings2[idx]);
} else {
h_expected.push_back(h_string1[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar)
{
auto valids = cudf::test::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar string1{h_string1[0]};
std::vector<const char*> h_string2{"aaa"};
cudf::string_scalar string2{h_string2[0], false};
constexpr cudf::size_type mask_size = 6;
bool mask[] = {1, 0, 1, 0, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size);
auto results = cudf::copy_if_else(string1, string2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_string2[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
|
3769eda2505c7ec3a5ef22abe44ab50fbab99b8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fcuda.h>
#include <math.h>
#include "nn.h"
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
#pragma FCUDA GRID x_dim=256 y_dim=256
#pragma FCUDA COREINFO num_cores=1 pipeline=no
#pragma FCUDA PORTMERGE remove_port_name=d_locations port_id=0 data_pack=yes
#pragma FCUDA PORTMERGE remove_port_name=d_distances port_id=1
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
#pragma FCUDA COMPUTE cores=[1] name=compute begin unroll=1 mpart=1 array_split=[]
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x;
LatLong *latLong = &d_locations[globalId];
if (globalId < numRecords) {
float *dist=&d_distances[globalId];
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
#pragma FCUDA COMPUTE cores=[1] name=compute end unroll=1 mpart=1 array_split=[]
}
|
3769eda2505c7ec3a5ef22abe44ab50fbab99b8a.cu
|
#include <fcuda.h>
#include <math.h>
#include "nn.h"
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
#pragma FCUDA GRID x_dim=256 y_dim=256
#pragma FCUDA COREINFO num_cores=1 pipeline=no
#pragma FCUDA PORTMERGE remove_port_name=d_locations port_id=0 data_pack=yes
#pragma FCUDA PORTMERGE remove_port_name=d_distances port_id=1
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
#pragma FCUDA COMPUTE cores=[1] name=compute begin unroll=1 mpart=1 array_split=[]
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x;
LatLong *latLong = &d_locations[globalId];
if (globalId < numRecords) {
float *dist=&d_distances[globalId];
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
#pragma FCUDA COMPUTE cores=[1] name=compute end unroll=1 mpart=1 array_split=[]
}
|
9f50770e119d18b5ab307331e44913d2af6d7a31.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
$ nvcc -o device_prop device_prop.cu
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
void getCudaDeviceInfo()
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Multi-processor count: %d\n", prop.multiProcessorCount);
printf(" Maximum size of each dimension of a grid: %d\n", *(prop.maxGridSize));
printf(" Maximum size of each dimension of a block: %d\n", *(prop.maxThreadsDim));
printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
//printf("Maximum number of resident blocks per multiprocessor: %d\n", prop.maxBlocksPerMultiProcessor);
printf(" Maximum resident threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
}
}
int main(void)
{
// GPU info
getCudaDeviceInfo();
}
|
9f50770e119d18b5ab307331e44913d2af6d7a31.cu
|
/*
$ nvcc -o device_prop device_prop.cu
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
void getCudaDeviceInfo()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Multi-processor count: %d\n", prop.multiProcessorCount);
printf(" Maximum size of each dimension of a grid: %d\n", *(prop.maxGridSize));
printf(" Maximum size of each dimension of a block: %d\n", *(prop.maxThreadsDim));
printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
//printf("Maximum number of resident blocks per multiprocessor: %d\n", prop.maxBlocksPerMultiProcessor);
printf(" Maximum resident threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
}
}
int main(void)
{
// GPU info
getCudaDeviceInfo();
}
|
923659de3968893f2c5c74ba499a5dac619d334c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<cuda.h>
#include "archivos_csv.c"
using namespace std;
#define TILE_WIDTH 32
__global__
void MultiplicaMatricesCU(int* A,int filA,int colA,int* B,int filB,int colB,int* C){//filC=filA,colC=colB
//Tamao total de los elementos con que vamos a trabajar
__shared__ float A_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_s[TILE_WIDTH][TILE_WIDTH];
//Para saber en qu bloque y qu hilo estamos
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = gridDim.x;
int gy = gridDim.y;
//Para el resultado de C
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int suma = 0;//para llevar la suma de las multiplicaciones
int n = 0, m = 0;
while(m < gx && n < gy){
/* De A queremos sacar las columnas, por eso:
* col = ( ( m * TILE_WIDTH ) + tx )
* col = ( ( bx * TILE_WIDTH ) + tx )
* Hacemos la comparacin entre ambas.
* Vemos que m se mueve entre los bloques en el eje x (las columnas)
*/
if(( ( m * TILE_WIDTH ) + tx ) < colA && row < filA) //Si no se pasa
A_s[ty][tx] = A[ (row * colA) + ( ( m * TILE_WIDTH ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA)
else A_s[ty][tx] = 0;
/* De B queremos sacar las filas, por eso:
* row = ( ( m * TILE_WIDTH ) + tx )
* row = ( ( by * TILE_WIDTH ) + tx )
* Hacemos la comparacin entre ambas.
* Vemos que n se mueve entre los bloques en el eje y (las filas)
*/
if(( n * TILE_WIDTH + ty) < filB && col < colB)
B_s[ty][tx] = B[( ( n * TILE_WIDTH + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB
else B_s[ty][tx] = 0;
m++; n++;
__syncthreads();//espera a todos los hilos
for (int k=0; k < TILE_WIDTH ; ++k) {
suma += A_s[ty][k] * B_s[k][tx];
}
__syncthreads();
}
if(row < filA && col < colB)
C[ (row * colB) + col] = suma; //C[filA][colB]
}
__host__
void multiplicaMatrices(int* X,int filX,int colX,int* Y,int filY,int colY,int* Z){
for(int i=0;i<filX;i++){
for(int j=0;j<colY;j++){
int suma=0;
for(int k=0;k<filY;k++){
suma=suma+X[(i*colX)+k]*Y[(k*colY)+j];
}
Z[(i*colY)+j]=suma;
}
}
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
for(int i=0;i<filas*columnas;i++){
A[i]=1;
}
}
__host__
bool compara(int *A, int *B, int filas, int columnas){
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
if(A[i*columnas+j] != B[i*columnas+j]) return false;
}
}
return true;
}
int main(int argc, char *argv[]){
if(argc != 3){
printf("no se han ingresado los archivos necesarios\n");
}
FILE *fp;
fp = fopen(argv[1], "r");
if(fp==NULL){
fputs("File error",stderr);
return 1;
}
fclose(fp);
fp = fopen(argv[2], "r");
if(fp == NULL){
fputs("file error", stderr);
return 1;
}
fclose(fp);
clock_t startCPU,endCPU,startGPU,endGPU;
hipError_t error = hipSuccess;
int *A,*B,*C; //A[filA][colA],B[filB][colB],C[filA][colB]
int *d_A,*d_B,*d_C,*h_C;
//int filA=2048,colA=2048,filB=2048,colB=2048;
//int filA=1024,colA=1024,filB=1024,colB=1024;
//-------------------------------CPU--------------------------------------------------------------------
//inicializa(A,filA,colA);
//inicializa(B,filB,colB);
//archivos csv
int filA = Detected_rows(fp, argv[1]);
int colA = Detected_columns(fp, argv[1]);
int filB = Detected_rows(fp, argv[2]);
int colB = Detected_columns(fp, argv[2]);
A=(int*)malloc(filA*colA*sizeof(int));
B=(int*)malloc(filB*colB*sizeof(int));
C=(int*)malloc(filA*colB*sizeof(int));
ExtracData(fp, A, argv[1], filA, colA);
ExtracData(fp, B, argv[2], filB, colB);
if(colA==filB){//para que sean multiplicables
startCPU = clock();
multiplicaMatrices(A,filA,colA,B,filB,colB,C);
endCPU = clock();
//imprime(C,filA,colB);
}else{
cout<<"Error, no se pueden multiplicar"<<endl;
return 0;
}
double time_CPU=((double)(endCPU-startCPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//-------------------------------GPU--------------------------------------------------------------------
h_C=(int*)malloc(filA*colB*sizeof(int));
startGPU = clock();
error=hipMalloc((void**)&d_A,filA*colA*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_A"<<endl;
//return -1;
}
hipMalloc((void**)&d_B,filB*colB*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_B"<<endl;
//return -1;
}
hipMalloc((void**)&d_C,filA*colB*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_C"<<endl;
//return -1;
}
hipMemcpy(d_A,A,filA*colA*sizeof(int),hipMemcpyHostToDevice);//destino d_A y origen A
hipMemcpy(d_B,B,filB*colB*sizeof(int),hipMemcpyHostToDevice);
//Depende directamente de la dimensin de las matrices
dim3 dimblock(32,32,1);
//dim3 dimGrid(32,32,1);
dim3 dimGrid(ceil((double)(colB/32)),ceil((double)(filA/32)),1);
hipLaunchKernelGGL(( MultiplicaMatricesCU), dim3(dimGrid),dim3(dimblock), 0, 0, d_A,filA,colA,d_B,filB,colB,d_C);
hipDeviceSynchronize();
hipMemcpy(h_C,d_C,filA*colB*sizeof(int),hipMemcpyDeviceToHost);
endGPU = clock();
/*
COMENTARIO: IMPRESIONES DE PRUEBA
cout << "MATRIZ A" << endl;
imprime(A, filA,colA);
cout << endl << "MATRIZ B" << endl;
imprime(B, filB,colB);
cout << endl << "MATRIZ RESULTADO" << endl;
imprime(h_C, filA,colB);
*/
double time_GPU=((double)(endGPU-startGPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//-----------------------------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(compara(h_C, C, filA, colB)) cout << "Buen clculo" << endl;
else cout << "Mal clculo" << endl;
free(A);free(B);free(C);free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
|
923659de3968893f2c5c74ba499a5dac619d334c.cu
|
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<cuda.h>
#include "archivos_csv.c"
using namespace std;
#define TILE_WIDTH 32
__global__
void MultiplicaMatricesCU(int* A,int filA,int colA,int* B,int filB,int colB,int* C){//filC=filA,colC=colB
//Tamaño total de los elementos con que vamos a trabajar
__shared__ float A_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_s[TILE_WIDTH][TILE_WIDTH];
//Para saber en qué bloque y qué hilo estamos
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = gridDim.x;
int gy = gridDim.y;
//Para el resultado de C
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int suma = 0;//para llevar la suma de las multiplicaciones
int n = 0, m = 0;
while(m < gx && n < gy){
/* De A queremos sacar las columnas, por eso:
* col = ( ( m * TILE_WIDTH ) + tx )
* col = ( ( bx * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que m se mueve entre los bloques en el eje x (las columnas)
*/
if(( ( m * TILE_WIDTH ) + tx ) < colA && row < filA) //Si no se pasa
A_s[ty][tx] = A[ (row * colA) + ( ( m * TILE_WIDTH ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA)
else A_s[ty][tx] = 0;
/* De B queremos sacar las filas, por eso:
* row = ( ( m * TILE_WIDTH ) + tx )
* row = ( ( by * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que n se mueve entre los bloques en el eje y (las filas)
*/
if(( n * TILE_WIDTH + ty) < filB && col < colB)
B_s[ty][tx] = B[( ( n * TILE_WIDTH + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB
else B_s[ty][tx] = 0;
m++; n++;
__syncthreads();//espera a todos los hilos
for (int k=0; k < TILE_WIDTH ; ++k) {
suma += A_s[ty][k] * B_s[k][tx];
}
__syncthreads();
}
if(row < filA && col < colB)
C[ (row * colB) + col] = suma; //C[filA][colB]
}
__host__
void multiplicaMatrices(int* X,int filX,int colX,int* Y,int filY,int colY,int* Z){
for(int i=0;i<filX;i++){
for(int j=0;j<colY;j++){
int suma=0;
for(int k=0;k<filY;k++){
suma=suma+X[(i*colX)+k]*Y[(k*colY)+j];
}
Z[(i*colY)+j]=suma;
}
}
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
for(int i=0;i<filas*columnas;i++){
A[i]=1;
}
}
__host__
bool compara(int *A, int *B, int filas, int columnas){
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
if(A[i*columnas+j] != B[i*columnas+j]) return false;
}
}
return true;
}
int main(int argc, char *argv[]){
if(argc != 3){
printf("no se han ingresado los archivos necesarios\n");
}
FILE *fp;
fp = fopen(argv[1], "r");
if(fp==NULL){
fputs("File error",stderr);
return 1;
}
fclose(fp);
fp = fopen(argv[2], "r");
if(fp == NULL){
fputs("file error", stderr);
return 1;
}
fclose(fp);
clock_t startCPU,endCPU,startGPU,endGPU;
cudaError_t error = cudaSuccess;
int *A,*B,*C; //A[filA][colA],B[filB][colB],C[filA][colB]
int *d_A,*d_B,*d_C,*h_C;
//int filA=2048,colA=2048,filB=2048,colB=2048;
//int filA=1024,colA=1024,filB=1024,colB=1024;
//-------------------------------CPU--------------------------------------------------------------------
//inicializa(A,filA,colA);
//inicializa(B,filB,colB);
//archivos csv
int filA = Detected_rows(fp, argv[1]);
int colA = Detected_columns(fp, argv[1]);
int filB = Detected_rows(fp, argv[2]);
int colB = Detected_columns(fp, argv[2]);
A=(int*)malloc(filA*colA*sizeof(int));
B=(int*)malloc(filB*colB*sizeof(int));
C=(int*)malloc(filA*colB*sizeof(int));
ExtracData(fp, A, argv[1], filA, colA);
ExtracData(fp, B, argv[2], filB, colB);
if(colA==filB){//para que sean multiplicables
startCPU = clock();
multiplicaMatrices(A,filA,colA,B,filB,colB,C);
endCPU = clock();
//imprime(C,filA,colB);
}else{
cout<<"Error, no se pueden multiplicar"<<endl;
return 0;
}
double time_CPU=((double)(endCPU-startCPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//-------------------------------GPU--------------------------------------------------------------------
h_C=(int*)malloc(filA*colB*sizeof(int));
startGPU = clock();
error=cudaMalloc((void**)&d_A,filA*colA*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_A"<<endl;
//return -1;
}
cudaMalloc((void**)&d_B,filB*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_B"<<endl;
//return -1;
}
cudaMalloc((void**)&d_C,filA*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_C"<<endl;
//return -1;
}
cudaMemcpy(d_A,A,filA*colA*sizeof(int),cudaMemcpyHostToDevice);//destino d_A y origen A
cudaMemcpy(d_B,B,filB*colB*sizeof(int),cudaMemcpyHostToDevice);
//Depende directamente de la dimensión de las matrices
dim3 dimblock(32,32,1);
//dim3 dimGrid(32,32,1);
dim3 dimGrid(ceil((double)(colB/32)),ceil((double)(filA/32)),1);
MultiplicaMatricesCU<<<dimGrid,dimblock>>>(d_A,filA,colA,d_B,filB,colB,d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C,d_C,filA*colB*sizeof(int),cudaMemcpyDeviceToHost);
endGPU = clock();
/*
COMENTARIO: IMPRESIONES DE PRUEBA
cout << "MATRIZ A" << endl;
imprime(A, filA,colA);
cout << endl << "MATRIZ B" << endl;
imprime(B, filB,colB);
cout << endl << "MATRIZ RESULTADO" << endl;
imprime(h_C, filA,colB);
*/
double time_GPU=((double)(endGPU-startGPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//-----------------------------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(compara(h_C, C, filA, colB)) cout << "Buen cálculo" << endl;
else cout << "Mal cálculo" << endl;
free(A);free(B);free(C);free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
164130a5db888e75664aa522873ba5f6c197f20a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "CTF.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
namespace gtom
{
__global__ void BeamTiltKernel(tcomplex* d_input, tcomplex* d_output, int2 dims, uint elements, const tfloat2* __restrict__ d_beamtilt, const tfloat* __restrict__ d_factors);
//////////////////////////
//Corrects for beam tilt//
//////////////////////////
void d_BeamTilt(tcomplex* d_input, tcomplex* d_output, int2 dims, tfloat2* d_beamtilt, CTFParams* h_params, uint batch)
{
tfloat* h_factors;
hipHostMalloc((void**)&h_factors, batch * sizeof(tfloat));
for (uint b = 0; b < batch; b++)
{
CTFParamsLean lean = CTFParamsLean(h_params[b], toInt3(dims));
tfloat boxsize = (tfloat)dims.x * (h_params[b].pixelsize * 1e10);
tfloat factor = 1e-3f * PI2 * lean.Cs * lean.lambda * lean.lambda / (boxsize * boxsize * boxsize);
h_factors[b] = factor;
}
tfloat* d_factors = (tfloat*)CudaMallocFromHostArray(h_factors, batch * sizeof(tfloat));
hipHostFree(h_factors);
int TpB = tmin(NextMultipleOf(ElementsFFT2(dims), 32), 128);
dim3 grid = dim3((ElementsFFT2(dims) + TpB - 1) / TpB, batch);
hipLaunchKernelGGL(( BeamTiltKernel) , dim3(grid), dim3(TpB), 0, 0, d_input, d_output, dims, ElementsFFT2(dims), d_beamtilt, d_factors);
}
__global__ void BeamTiltKernel(tcomplex* d_input, tcomplex* d_output, int2 dims, uint elements, const tfloat2* __restrict__ d_beamtilt, const tfloat* __restrict__ d_factors)
{
d_input += elements * blockIdx.y;
d_output += elements * blockIdx.y;
tfloat2 beamtilt = d_beamtilt[blockIdx.y];
tfloat factor = d_factors[blockIdx.y];
for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < elements; id += gridDim.x * blockDim.x)
{
int y = id / ElementsFFT1(dims.x);
uint x = id - y * ElementsFFT1(dims.x);
tfloat xx = x;
tfloat yy = y <= dims.y / 2 ? y : y - dims.y;
tfloat phase = factor * (xx * xx + yy * yy) * (xx * beamtilt.x + yy * beamtilt.y);
tcomplex shift = make_cuComplex(cos(phase), sin(phase));
d_output[id] = cmul(d_input[id], shift);
}
}
}
|
164130a5db888e75664aa522873ba5f6c197f20a.cu
|
#include "Prerequisites.cuh"
#include "CTF.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
namespace gtom
{
__global__ void BeamTiltKernel(tcomplex* d_input, tcomplex* d_output, int2 dims, uint elements, const tfloat2* __restrict__ d_beamtilt, const tfloat* __restrict__ d_factors);
//////////////////////////
//Corrects for beam tilt//
//////////////////////////
void d_BeamTilt(tcomplex* d_input, tcomplex* d_output, int2 dims, tfloat2* d_beamtilt, CTFParams* h_params, uint batch)
{
tfloat* h_factors;
cudaMallocHost((void**)&h_factors, batch * sizeof(tfloat));
for (uint b = 0; b < batch; b++)
{
CTFParamsLean lean = CTFParamsLean(h_params[b], toInt3(dims));
tfloat boxsize = (tfloat)dims.x * (h_params[b].pixelsize * 1e10);
tfloat factor = 1e-3f * PI2 * lean.Cs * lean.lambda * lean.lambda / (boxsize * boxsize * boxsize);
h_factors[b] = factor;
}
tfloat* d_factors = (tfloat*)CudaMallocFromHostArray(h_factors, batch * sizeof(tfloat));
cudaFreeHost(h_factors);
int TpB = tmin(NextMultipleOf(ElementsFFT2(dims), 32), 128);
dim3 grid = dim3((ElementsFFT2(dims) + TpB - 1) / TpB, batch);
BeamTiltKernel <<<grid, TpB>>> (d_input, d_output, dims, ElementsFFT2(dims), d_beamtilt, d_factors);
}
__global__ void BeamTiltKernel(tcomplex* d_input, tcomplex* d_output, int2 dims, uint elements, const tfloat2* __restrict__ d_beamtilt, const tfloat* __restrict__ d_factors)
{
d_input += elements * blockIdx.y;
d_output += elements * blockIdx.y;
tfloat2 beamtilt = d_beamtilt[blockIdx.y];
tfloat factor = d_factors[blockIdx.y];
for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < elements; id += gridDim.x * blockDim.x)
{
int y = id / ElementsFFT1(dims.x);
uint x = id - y * ElementsFFT1(dims.x);
tfloat xx = x;
tfloat yy = y <= dims.y / 2 ? y : y - dims.y;
tfloat phase = factor * (xx * xx + yy * yy) * (xx * beamtilt.x + yy * beamtilt.y);
tcomplex shift = make_cuComplex(cos(phase), sin(phase));
d_output[id] = cmul(d_input[id], shift);
}
}
}
|
6136c0ecf821cc341265099d199f038f33938b50.hip
|
// !!! This is a file automatically generated by hipify!!!
//this version is 250 ms
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<cudnn.h>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include<rocblas.h>
#include<string>
#include<fstream>
#include<cmath>
#include<ctime>
using namespace std;
void readWeights(float* weights, int m/*output*/, int n/*input*/, int h, int w, string baseFileName,bool readWeights=true) {
string fileName = "weights2/" + baseFileName;
if (readWeights) {
fileName+="Weights.data";
}
else {
fileName += "Biases.data";
}
ifstream in(fileName, ios::in|ios::binary);
//cout << fileName << "\n";
if (!in.is_open())
{
cout << "file "<<baseFileName<<" didn't open \n";
return;
}
in.read((char*)weights, m*n*h*w * sizeof(float));
in.close();
//cout << baseFileName << " : " << weights[0] << " " << weights[1] << "\n";
}
#define cudnnCheck(exp){\
cudnnStatus_t status=(exp);\
if(status!=CUDNN_STATUS_SUCCESS){\
std::cout<<"Error at line "<<__LINE__<<" "<<cudnnGetErrorString(status)<<"\n";\
std::exit(EXIT_FAILURE);\
}\
}\
#define cudaCheck(exp) {\
hipError_t status=(exp);\
if(status!=hipSuccess){\
cerr<<"error at cuda "<<__LINE__<<" "<<hipGetErrorString(status)<<"\n";\
exit(EXIT_FAILURE);\
}\
}\
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
if (image.empty()) { cerr << "couldn't open image\n"; }
cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
cv::Mat resizedImage(416, 416, CV_32FC2);
cv::resize(image, resizedImage, cv::Size(416, 416), 0, 0, cv::INTER_CUBIC);
if (resizedImage.empty())cerr << "resized image empty\n";
//cout << "ok\n";
return resizedImage;
}
void save_image(const char* output_filename,cv::Mat output_image) {
//cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR);
//cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
//output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
}
//incomplete
__global__ void leaky_relu_v2(float* d_data, float alpha, int size) {
int index = (blockIdx.y*gridDim.x + blockIdx.x);
if (index < size) {
float x = d_data[index];
if (x<0) d_data[index] = alpha*x;
}
}
//try constant shift
__global__ void leaky_relu_v3(float* d_data, float alpha, int size,int step) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < step) {
int channels = (size / step);
index *= channels;
for (int i = index; i < index+channels; i++) {
float x = d_data[i];
if (x<0) d_data[i] = alpha*x;
}
}
}
__global__ void leaky_relu_v4(float* d_data, float alpha, int size, int shift) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
index *= shift;
if (index < size-shift) {
for (int i = index; i < index + shift; i++) {
float x = d_data[i];
if (x<0) d_data[i] = alpha*x;
}
}
}
__global__ void leaky_relu(float* d_data, float alpha, int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
float x = d_data[index];
if (x<0) d_data[index] = alpha*x;
}
}
//step is width*height of the output of convolution
/*
@param size is width x height x channels
@Param step is width x height
the data in the format HxWxC
k is computed as index%(size/step)
*/
__global__ void add_biase(float* d_data, float* biases, int size/*WxHxC*/, int step/*WxH*/) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < step) {
int biaseSize = (size / step);
index *= biaseSize;
for (int i = 0; i < biaseSize; i++) {
d_data[index+i] += biases[i];
}
}
}
__device__ float iou(float bx1x1,float bx1y1,float bx1x2,float bx1y2, float bx2x1, float bx2y1, float bx2x2, float bx2y2) {
float x1 = (bx1x1 > bx2x1) ? bx1x1 : bx2x1;
float y1 = (bx1y1> bx2y1) ? bx1y1 : bx2y1;
float x2 = (bx1x2 > bx2x2) ? bx2x2 : bx1x2;
float y2 = (bx1y2 > bx2y2) ? bx2y2 : bx1y2;
float A1 = (bx1x2 - bx1x1)*(bx1y2 - bx1y1);
float A2 = (bx2x2 - bx2x1)*(bx2y2 - bx2y1);
float A_inter = ((x2 - x1) > 0 ? (x2 - x1) : 0)*((y2 - y1) > 0 ? (y2 - y1) : 0);
return(A_inter / (A1 + A2 - A_inter));
}
//consider calculating the necessary points only
__global__ void calculate_points(float* boxes_dims,float* points,bool* boxes,int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
//int left = h_boxes_dims[index] - (h_boxes_dims[index + 2] / 2.0);
//int right = h_boxes_dims[index] + (h_boxes_dims[index + 2] / 2.0);
//int top = h_boxes_dims[index + 1] - (h_boxes_dims[index + 3] / 2.0);
//int bottom = h_boxes_dims[index + 1] + (h_boxes_dims[index + 3] / 2.0);
int step = index * 4;
float center_x = boxes_dims[step];
float w = boxes_dims[step + 2];
float center_y = boxes_dims[step + 1];
float h = boxes_dims[step + 3];
points[step] = center_x - ((w) / 2.0);
points[step+2]= center_x + ((w) / 2.0);
points[step + 1] = center_y - ((h) / 2.0);
points[step + 3] = center_y + ((h) / 2.0);
}
}
__global__ void non_max_supression(float* points, bool* boxes,float* maxClassScore, int* maxClassIndex,float threashold=0.3,int size=13*13*5) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
float maxClass = maxClassScore[index];
if (maxClass < 0.3) {
boxes[index] = false;
return;
}
int maxClassInd = maxClassIndex[index];
float x1 = points[index * 4];
float y1 = points[index * 4 + 1];
float x2 = points[index * 4 + 2];
float y2 = points[index * 4 + 3];
for (int i = 0; i < size; i++) {
if (boxes[i] && i != index) {
if ( maxClassInd== maxClassIndex[i]) {
if (maxClass > maxClassScore[i]) {
float x = iou(x1,y1,x2,y2, points[i * 4]
, points[i * 4 + 1], points[i * 4 + 2], points[i * 4 + 3]);
if (x >= threashold) {
boxes[i] = false;
}
}
}
}
}
}
}
//20 classes
__global__ void exp(float* classes,int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x+threadIdx.x;
if (index<size) {
classes[index] = exp(classes[index]);
}
}
__global__ void softmax(float* classes,int offset, float sum) {
if (threadIdx.x < 20) {
classes[threadIdx.x + offset] /= sum;
}
}
__global__ void filter(float* classes,bool* boxes,float threshold=0.4,int size=13*13*5*20) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x;
if (index < size ) {
if (classes[index] >= threshold) {
boxes[index / 20] = true;
//printf("index %d value %f\n", index, classes[index]);
}
}
}
//blocks*threads
__global__ void sigmoid(float* x,int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x;
if (index<size) {
x[index] = 1 / (1 + exp(-1*x[index]));
}
}
//calculate centers of the box and the width and height
//calculate the necessary ones
__global__ void calculate_box_dims(float* x, float* d_anchors, int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
//center_x = (float(col) + sigmoid(tx)) * 32.0
x[index] = (((index / (4)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0;
//center_y = (float(row) + sigmoid(ty)) * 32.0
x[index+1] = ((index / (13 * 4)) + (1.0 / (1 + expf(-1 * x[index + 1]))))*32.0;
//roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0
x[index+2] = expf(x[index + 2])*d_anchors[2 * ((index / 25) % 5)] * 32.0;
//roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0
x[index+3] = expf(x[index + 3])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0;
}
}
__global__ void sigmoid_exp(float* x,float* d_anchors, int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x;
if (index < size) {
int cond = index % 25;
switch (cond)
{
case 0 :
//center_x = (float(col) + sigmoid(tx)) * 32.0
x[index] = (((index/(125))%13)+(1.0/(1+expf(-1*x[index]))))*32.0;
break;
case 1:
//center_y = (float(row) + sigmoid(ty)) * 32.0
x[index] = ((index/(13*125)) + (1.0 / (1 + expf(-1*x[index]))))*32.0;
break;
case 2 :
//roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0
x[index] = expf(x[index])*d_anchors[2 * ((index/25)%5)]*32.0;
break;
case 3 :
//roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0
x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5)+1]*32.0 ;
break;
case 4:
//confidence
//if (index == 4)printf("data sample %f\n\n", x[index]);
x[index] = (1.0 / (1 + expf(-1 * x[index])));
break;
}
//if (index <25)printf("data sample %d %f\n",index, x[index]);
}
}
__global__ void scores(float* classes,float* confidence,int size) {
int index = blockIdx.y*gridDim.x+blockIdx.x;
if (index < size ) {
float x = confidence[index];
int step = index * 20;
for (int i = 0; i < 20; i++) {
classes[step + i] *= x;
}
}
}
__global__ void get_max_scores(float* classes, bool* boxes, float* maxScores , int* maxIndecies, int size=13*13*5) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
int classIndex = 20 * index ;
if (index < size ) {
if (boxes[index]) {
float maxClassScore = classes[classIndex];
int maxClassIndex = 0;
float tmp=0;
for (int i=classIndex + 1 ; i < classIndex + 19; i++) {
tmp = classes[i];
if (tmp > maxClassScore) {
maxClassScore = tmp;
maxClassIndex = i - classIndex;
}
}
//printf("from get_max_score %d %d\n", index,classIndex);
maxScores[index] = maxClassScore;
maxIndecies[index] = maxClassIndex;
}
}
}
__global__ void bool_arr (bool* d_boxes, int size, bool value=false) {
int index = blockIdx.y*blockDim.x + blockIdx.x;
if (index < size) {
d_boxes[index] = value;
}
}
__global__ void separate_data(float* predictions,float* boxes,float* confidence,float* classes,int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
int x = index % 25;
if (x > 4) {
classes[(index / 25)*20 + (x-5)] = predictions[index];
}
else if(x==4)
{
confidence[(index / 25)] = predictions[index];
}
else
{
//centers and bounding boxes
boxes[(index / 25)*4 + x] = predictions[index];
}
}
}
//draw colored rectangles around objects
//scale colors first
//thickness = 4 pixels
//size is WxH
__global__ void draw(float* d_image,int x1,int y1,int x2,int y2,float r,float g,float b,int w,int h,int thickness=4) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
//scale for the three channels
if (index < w*h ) {
//index *= 3;
int xPos = (index/3 )%w;
int yPos = (index / (3*w ));
//on the same vertical line
//increase x axis
if ((yPos == y1 || yPos == y2) && (xPos >= x1 && xPos <= x2)) {
for (int i = 0; i < thickness; i++) {
if (index < w*h) {
//r
d_image[index] = 0;
//g
d_image[index + 1] = 0;
//b
d_image[index + 2] = 0;
//next column ie next x in image terminology as the row here is column there
//remember image is at format NHWC
index += 3;
}
}
}
else if((xPos==x1 || xPos == x2) && (yPos >= y1 && yPos <= y2) )
{
for (int i = 0; i < thickness; i++) {
if (index < w*h) {
//r
d_image[index] = 0;
//g
d_image[index + 1] =0;
//b
d_image[index + 2] = 0;
}
index += (3*h);
}
}
}
}
template<class T>
void test(T* host_data,T* device_data,int start, int end) {
cout << "host data \n\n";
for (int i = start; i < end; i++) {
cout << host_data[i] << " ";
}
cout << "\n\n";
T* tmp=(T*) malloc(end *sizeof(T));
hipMemcpy(tmp, device_data, end * sizeof(T), hipMemcpyDeviceToHost);
cout << "device data \n\n";
for (int i = start; i < end; i++) {
cout << tmp[i] << " ";
}
cout << "\n\n";
}
template<class T>
void test( T* device_data, int start , int end) {
T* tmp = (T*)malloc(end * sizeof(T));
cudaCheck(hipMemcpy(tmp, device_data, (end) * sizeof(T), hipMemcpyDeviceToHost));
cout << "device data \n\n";
for (int i = start; i < end; i++) {
cout << tmp[i] << " ";
}
cout << "\n\n";
//if (tmp[3] == true)cout << "True \n";
}
template<class T>
void test(T* device_data,int row,int col,int w, int step, int channels,int times,string name,int offset=0,bool xDirection=true) {
cout << name << "\n";
for (int j = 0; j < times; j++) {
test(device_data, (col*w*channels+row*channels+j*step+offset), (col*w*channels+row*channels + (j+1)*step));
//cout << (col*step*channels + row*channels + j*step + offset) <<" "<< (col*step*channels + row*channels + (j + 1)*step) << "\n";
}
}
//--------------------------------------things to be done for optimization---------------------------------------------------
//to be more memory effecient delete the unneeded values and re assign them
// this maybe time costy
//test that
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//make sure when it crashes because of memory to print that
//----------------------------------------------------------------------------------------------------------------------------
#define threadsPerBlock 32
#define shift 500
int main() {
// Layer kernel stride output shape
// -------------------------------------------- -
//Input(416,416,3)
// Convolution 33 1 (416, 416, 16)
// MaxPooling 22 2 (208, 208, 16)
// Convolution 33 1 (208, 208, 32)
// MaxPooling 22 2 (104, 104, 32)
// Convolution 33 1 (104, 104, 64)
// MaxPooling 22 2 (52, 52, 64)
// Convolution 33 1 (52, 52, 128)
// MaxPooling 22 2 (26, 26, 128)
// Convolution 33 1 (26, 26, 256)
// MaxPooling 22 2 (13, 13, 256)
// Convolution 33 1 (13, 13, 512)
// MaxPooling 22 1 (13, 13, 512)
// Convolution 33 1 (13, 13, 1024)
// Convolution 33 1 (13, 13, 1024)
// Convolution 11 1 (13, 13, 125)
// -------------------------------------------- -
//all MAX POOLING is valid padding except last one but padding = 0
//all CONV are SAME padding with p = 1
int imageH = 416, imageW = 416;
float x = 1.0, y = 0.0;
float* alpha = &x;
float *beta = &y;
long long totalSpace = 0;
size_t space = 0;
//std::cout << "ok\n";
cudnnHandle_t cudnn;
cudnnCheck(cudnnCreate(&cudnn));
//input layer
cudnnTensorDescriptor_t inputDes;
cudnnCheck(cudnnCreateTensorDescriptor(&inputDes));
cudnnCheck(cudnnSetTensor4dDescriptor(inputDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
3,
imageH,
imageW));
//cv::Mat image = load_image("person.jpg");
//std::cout << "image loaded with dims " << image.cols << " X " << image.rows << "\n";
//for (int i = 0; i < 20; i++)std::cout << image.at<float>(cv::Point(0, i)) << " ";
//std::cout << "\n\n";
float* d_input;
hipMalloc(&d_input, imageH*imageW * 3 * sizeof(float));
totalSpace += imageH*imageW * 3 * sizeof(float);
//load W1
float* w1 = (float*)malloc(16 * 3 * 3 * 3 * sizeof(float));
readWeights(w1, 16, 3, 3, 3, "conv1");
float* d_w1;
cudaCheck(hipMalloc(&d_w1, 16 * 3 * 3 * 3 * sizeof(float)));
totalSpace += 16 * 3 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(hipMemcpy(d_w1, w1, 16 * 3 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice));
//(416, 416, 16)
float* d_conv1Out;
cudaCheck(hipMalloc(&d_conv1Out, 16 * imageH * imageW * sizeof(float)));
totalSpace += 16 * imageH * imageW * sizeof(float);
//copy data to GPU
//don't forget to add the biases
float* b1=(float*)malloc(16*sizeof(float));
readWeights(b1, 16, 1, 1, 1, "conv1",false);
float* d_b1;
cudaCheck(hipMalloc(&d_b1, 16 * sizeof(float)));
cudaCheck(hipMemcpy(d_b1, b1, 16 * sizeof(float), hipMemcpyHostToDevice));
float* d_max1Out;
cudaCheck(hipMalloc(&d_max1Out, 208 * 208 * 16 * sizeof(float)));
totalSpace += 208 * 208 * 16 * sizeof(float);
//load W2
float* w2 = (float*)malloc(32 * 16 * 3 * 3 * sizeof(float));
readWeights(w2, 32, 16, 3, 3, "conv2");
float* d_w2;
cudaCheck(hipMalloc(&d_w2, 32 * 16 * 3 * 3 * sizeof(float)));
totalSpace += 32 * 16 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(hipMemcpy(d_w2, w2, 32 * 16 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice));
float* d_conv2Out;
cudaCheck(hipMalloc(&d_conv2Out, 32 * 208 * 208 * sizeof(float)));
totalSpace += 32 * 208 * 208 * sizeof(float);
//don't forget to add the biases
float* b2=(float*)malloc(32*sizeof(float));
readWeights(b2, 32, 1, 1, 1, "conv2",false);
float* d_b2;
cudaCheck(hipMalloc(&d_b2, 32 * sizeof(float)));
cudaCheck(hipMemcpy(d_b2, b2, 32 * sizeof(float), hipMemcpyHostToDevice));
//load W3
float* w3 = (float*)malloc(64 * 32 * 3 * 3 * sizeof(float));
readWeights(w3, 64, 32, 3, 3, "conv3");
float* d_w3;
hipMalloc(&d_w3, 64 * 32 * 3 * 3 * sizeof(float));
totalSpace += 64 * 32 * 3 * 3 * sizeof(float);
//copy weights to GPU
hipMemcpy(d_w3, w3, 64 * 32 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
float* b3=(float*) malloc(64*sizeof(float));
readWeights(b3, 64, 1, 1, 1, "conv3",false);
float* d_b3;
hipMalloc(&d_b3, 64 * sizeof(float));
hipMemcpy(d_b3, b3, 64 * sizeof(float), hipMemcpyHostToDevice);
float* d_max3Out;
hipMalloc(&d_max3Out, 52 * 52 * 64 * sizeof(float));
totalSpace += 52 * 52 * 64 * sizeof(float);
//load W4
float* w4 = (float*)malloc(128 * 64 * 3 * 3 * sizeof(float));
readWeights(w4, 128, 64, 3, 3, "conv4");
float* d_w4;
hipMalloc(&d_w4, 128 * 64 * 3 * 3 * sizeof(float));
totalSpace += 128 * 64 * 3 * 3 * sizeof(float);
//copy weights to GPU
hipMemcpy(d_w4, w4, 128 * 64 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
float* d_conv4Out;
hipMalloc(&d_conv4Out, 128 * 52 * 52 * sizeof(float));
totalSpace += 128 * 52 * 52 * sizeof(float);
float* b4=(float*) malloc(128*sizeof(float));
readWeights(b4, 128, 1, 1, 1, "conv4",false);
float* d_b4;
hipMalloc(&d_b4, 128 * sizeof(float));
hipMemcpy(d_b4, b4, 128 * sizeof(float), hipMemcpyHostToDevice);
float* d_max4Out;
hipMalloc(&d_max4Out, 26 * 26 * 128 * sizeof(float));
totalSpace += 26 * 26 * 128 * sizeof(float);
//load W5
float* w5 = (float*)malloc(256 * 128 * 3 * 3 * sizeof(float));
readWeights(w5, 256, 128, 3, 3, "conv5");
float* d_w5;
hipMalloc(&d_w5, 256 * 128 * 3 * 3 * sizeof(float));
totalSpace += 256 * 128 * 3 * 3 * sizeof(float);
//copy weights to GPU
hipMemcpy(d_w5, w5, 256 * 128 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
float* d_conv5Out;
hipMalloc(&d_conv5Out, 256 * 26 * 26 * sizeof(float));
totalSpace += 256 * 26 * 26 * sizeof(float);
float* b5=(float*)malloc(256*sizeof(float));
readWeights(b5, 256, 1, 1, 1, "conv5",false);
float* d_b5;
hipMalloc(&d_b5, 256 * sizeof(float));
hipMemcpy(d_b5, b5, 256 * sizeof(float), hipMemcpyHostToDevice);
float* d_max5Out;
hipMalloc(&d_max5Out, 13 * 13 * 256 * sizeof(float));
totalSpace += 13 * 13 * 256 * sizeof(float);
//load W6
float* w6 = (float*)malloc(512 * 256 * 3 * 3 * sizeof(float));
readWeights(w6, 512, 256, 3, 3, "conv6");
float* d_w6;
hipMalloc(&d_w6, 512 * 256 * 3 * 3 * sizeof(float));
totalSpace += 512 * 256 * 3 * 3 * sizeof(float);
//copy weights to GPU
hipMemcpy(d_w6, w6, 512 * 256 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
float* d_conv6Out;
hipMalloc(&d_conv6Out, 512 * 13 * 13 * sizeof(float));
totalSpace += 512 * 13 * 13 * sizeof(float);
float* b6=(float*) malloc(512*sizeof(float));
readWeights(b6, 512, 1, 1, 1, "conv6",false);
float* d_b6;
hipMalloc(&d_b6, 512 * sizeof(float));
hipMemcpy(d_b6, b6, 512 * sizeof(float), hipMemcpyHostToDevice);
//here there's padding and stride 1
float* d_max6Out;
hipMalloc(&d_max6Out, 13 * 13 * 512 * sizeof(float));
totalSpace += 13 * 13 * 512 * sizeof(float);
//load W7
float* w7 = (float*)malloc(1024 * 512 * 3 * 3 * sizeof(float));
readWeights(w7, 1024, 512, 3, 3, "conv7");
float* d_w7;
hipMalloc(&d_w7, 1024 * 512 * 3 * 3 * sizeof(float));
totalSpace += 1024 * 512 * 3 * 3 * sizeof(float);
//copy weights to GPU
hipMemcpy(d_w7, w7, 1024 * 512 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
float* d_conv7Out;
hipMalloc(&d_conv7Out, 1024 * 13 * 13 * sizeof(float));
totalSpace += 1024 * 13 * 13 * sizeof(float);
float* b7=(float*) malloc(1024*sizeof(float));
readWeights(b7, 1024, 1, 1, 1, "conv7",false);
float* d_b7;
cudaCheck(hipMalloc(&d_b7, 1024 * sizeof(float)));
cudaCheck(hipMemcpy(d_b7, b7, 1024 * sizeof(float), hipMemcpyHostToDevice));
//load W8
float* w8 = (float*)malloc(1024 * 1024 * 3 * 3 * sizeof(float));
readWeights(w8, 1024, 1024, 3, 3, "conv8",true);
float* d_w8;
cudaCheck(hipMalloc(&d_w8, 1024 * 1024 * 3 * 3 * sizeof(float)));
totalSpace += 1024 * 1024 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(hipMemcpy(d_w8, w8, 1024 * 1024 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice));
float* d_conv8Out;
cudaCheck(hipMalloc(&d_conv8Out, 1024 * 13 * 13 * sizeof(float)));
totalSpace += 1024 * 13 * 13 * sizeof(float);
float* b8=(float*) malloc(1024*sizeof(float));
readWeights(b8, 1024, 1, 1, 1, "conv8", false);
float* d_b8;
cudaCheck(hipMalloc(&d_b8, 1024 * sizeof(float)));
cudaCheck(hipMemcpy(d_b8, b8, 1024 * sizeof(float), hipMemcpyHostToDevice));
//load W9
float* w9 = (float*)malloc(1024 * 125 * sizeof(float));
readWeights(w9, 1024, 125, 3, 3, "conv9", true);
float* d_w9;
cudaCheck(hipMalloc(&d_w9, 1024 * 125 * sizeof(float)));
totalSpace += 1024 * 125 * sizeof(float);
float* d_conv9Out;
cudaCheck(hipMalloc(&d_conv9Out, 125 * 13 * 13 * sizeof(float)));
totalSpace += 125 * 13 * 13 * sizeof(float);
cout << "total space " << totalSpace / (1024 * 1024) << " MB\n";
float b9[125];
readWeights(b9, 125, 1, 1, 1, "conv9", false);
float* d_b9;
cudaCheck(hipMalloc(&d_b9, 125 * sizeof(float)));
float* d_classes_softmax;
cudaCheck(hipMalloc(&d_classes_softmax, 13 * 13 * 5 * 20 * sizeof(float)));
cv::Scalar colors[20] = { cv::Scalar(254.0, 254.0, 254),cv::Scalar(239.88888888888889, 211.66666666666669, 127),
cv::Scalar(225.77777777777777, 169.33333333333334, 0), cv::Scalar(211.66666666666669, 127.0, 254),
cv::Scalar(197.55555555555557, 84.66666666666667, 127), cv::Scalar(183.44444444444443, 42.33333333333332, 0),
cv::Scalar(169.33333333333334, 0.0, 254), cv::Scalar(155.22222222222223, -42.33333333333335, 127),
cv::Scalar(141.11111111111111, -84.66666666666664, 0), cv::Scalar(127.0, 254.0, 254),
cv::Scalar(112.88888888888889, 211.66666666666669, 127), cv::Scalar(98.77777777777777, 169.33333333333334, 0),
cv::Scalar(84.66666666666667, 127.0, 254), cv::Scalar(70.55555555555556, 84.66666666666667, 127),
cv::Scalar(56.44444444444444, 42.33333333333332, 0), cv::Scalar(42.33333333333332, 0.0, 254),
cv::Scalar(28.222222222222236, -42.33333333333335, 127), cv::Scalar(14.111111111111118, -84.66666666666664, 0),
cv::Scalar(0.0, 254.0, 254), cv::Scalar(-14.111111111111118, 211.66666666666669, 127) };
string classes[20] = { "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse"
, "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor" };
//anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]
float h_anchors[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 };
float* d_anchors;
cudaCheck(hipMalloc(&d_anchors, 10 * sizeof(float)));
float* d_boxes_dims;
cudaCheck(hipMalloc(&d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float)));
float* d_predictions;
cudaCheck(hipMalloc(&d_predictions, 13 * 13 * 5 * sizeof(float)));
float* d_classes;
cudaCheck(hipMalloc(&d_classes, 13 * 13 * 5 * 20 * sizeof(float)));
cudaCheck(hipMemcpy(d_anchors, h_anchors, 10 * sizeof(float), hipMemcpyHostToDevice));
bool* d_boxes;
cudaCheck(hipMalloc(&d_boxes, 13 * 13 * 5 * sizeof(bool)));
float* d_maxScorePerBox;
cudaCheck(hipMalloc(&d_maxScorePerBox, 13 * 13 * 5 * sizeof(float)));
int* d_maxScoreIndex;
cudaCheck(hipMalloc(&d_maxScoreIndex, 13 * 13 * 5 * sizeof(int)));
float* d_points;
cudaCheck(hipMalloc(&d_points, 13 * 13 * 5 * 4 * sizeof(float)));
bool h_boxes[13 * 13 * 5];
float* h_points = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float));
float h_maxScorePerBox[13 * 13 * 5];
int h_maxScoreIndex[13 * 13 * 5];
float* h_boxes_dims = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float));
cudaCheck(hipMemcpy(d_b9, b9, 125 * sizeof(float), hipMemcpyHostToDevice));
//workspases
void* workSpace[9] = { nullptr };
//(16X3X3X3)
cudnnFilterDescriptor_t w1Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w1Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w1Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
16,
3,
3,
3));
cudnnTensorDescriptor_t conv1OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv1OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv1OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
16,
416,
416));
//cout << "output format NHWC \n";
cudnnConvolutionDescriptor_t conv1Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv1Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv1Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv1Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
inputDes,
w1Des,
conv1Des,
conv1OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv1Algo));
cudnnTensorDescriptor_t max1OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max1OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max1OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
16,
208,
208));
//cout << "max1 out NHWC\n";
cudnnPoolingDescriptor_t max1Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max1Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max1Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w2Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w2Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w2Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
32,
16,
3,
3));
//(208, 208, 32)
cudnnTensorDescriptor_t conv2OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv2OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv2OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
32,
208,
208));
//cout << "conv2 out NHWC\n";
cudnnConvolutionDescriptor_t conv2Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv2Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv2Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv2Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max1OutDes,
w2Des,
conv2Des,
conv2OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv2Algo));
float* d_max2Out;
hipMalloc(&d_max2Out, 104 * 104 * 32 * sizeof(float));
totalSpace += 104 * 104 * 32 * sizeof(float);
cudnnTensorDescriptor_t max2OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max2OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max2OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
32,
104,
104));
cudnnPoolingDescriptor_t max2Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max2Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max2Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
//[3,3,32,64]
cudnnFilterDescriptor_t w3Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w3Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w3Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
64,
32,
3,
3));
//(104, 104, 64)
cudnnTensorDescriptor_t conv3OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv3OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv3OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
64,
104,
104));
float* d_conv3Out;
hipMalloc(&d_conv3Out, 64 * 104 * 104 * sizeof(float));
totalSpace += 64 * 104 * 104 * sizeof(float);
cudnnConvolutionDescriptor_t conv3Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv3Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv3Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv3Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max2OutDes,
w3Des,
conv3Des,
conv3OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv3Algo));
cudnnTensorDescriptor_t max3OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max3OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max3OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
64,
52,
52));
cudnnPoolingDescriptor_t max3Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max3Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max3Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w4Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w4Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w4Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
128,
64,
3,
3));
//(52, 52, 128)
cudnnTensorDescriptor_t conv4OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv4OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv4OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
128,
52,
52));
cudnnConvolutionDescriptor_t conv4Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv4Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv4Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv4Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max3OutDes,
w4Des,
conv4Des,
conv4OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv4Algo));
cudnnTensorDescriptor_t max4OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max4OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max4OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
128,
26,
26));
cudnnPoolingDescriptor_t max4Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max4Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max4Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
//[3,3,128,256]
cudnnFilterDescriptor_t w5Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w5Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w5Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
256,
128,
3,
3));
//(26, 26, 256)
cudnnTensorDescriptor_t conv5OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv5OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv5OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
256,
26,
26));
cudnnConvolutionDescriptor_t conv5Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv5Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv5Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv5Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max4OutDes,
w5Des,
conv5Des,
conv5OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv5Algo));
cudnnTensorDescriptor_t max5OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max5OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max5OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
256,
13,
13));
cudnnPoolingDescriptor_t max5Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max5Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max5Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w6Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w6Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w6Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
512,
256,
3,
3));
//(13, 13, 512)
cudnnTensorDescriptor_t conv6OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv6OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv6OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
512,
13,
13));
cudnnConvolutionDescriptor_t conv6Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv6Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv6Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv6Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max5OutDes,
w6Des,
conv6Des,
conv6OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv6Algo));
cudnnTensorDescriptor_t max6OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max6OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max6OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
512,
13,
13));
cudnnPoolingDescriptor_t max6Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max6Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max6Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
1,
1));
cudnnFilterDescriptor_t w7Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w7Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w7Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
1024,
512,
3,
3));
//(13 x 13 x 1024)
cudnnTensorDescriptor_t conv7OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv7OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv7OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
1024,
13,
13));
cudnnConvolutionDescriptor_t conv7Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv7Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv7Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv7Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max6OutDes,
w7Des,
conv7Des,
conv7OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv7Algo));
cudnnFilterDescriptor_t w8Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w8Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w8Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
1024,
1024,
3,
3));
//(13 x 13 x 1024)
cudnnTensorDescriptor_t conv8OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv8OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv8OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
1024,
13,
13));
cudnnConvolutionDescriptor_t conv8Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv8Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv8Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv8Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
conv7OutDes,
w8Des,
conv8Des,
conv8OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv8Algo));
//[1,1,1024,125]
cudnnFilterDescriptor_t w9Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w9Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w9Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
125,
1024,
1,
1));
//copy weights to GPU
cudaCheck(hipMemcpy(d_w9, w9, 1024 * 125 * sizeof(float), hipMemcpyHostToDevice));
//(13 x 13 x 125)
cudnnTensorDescriptor_t conv9OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv9OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv9OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
125,
13,
13));
cudnnConvolutionDescriptor_t conv9Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv9Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv9Des,
0,
0,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv9Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
conv8OutDes,
w9Des,
conv9Des,
conv9OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv9Algo));
cudnnTensorDescriptor_t softmaxInputDes;
cudnnCheck(cudnnCreateTensorDescriptor(&softmaxInputDes));
cudnnCheck(cudnnSetTensor4dDescriptor(softmaxInputDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
5,
20,
13,
13));
cudnnTensorDescriptor_t softmaxOutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&softmaxOutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(softmaxOutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
5,
20,
13,
13));
int numBlocks[8] = { ceil(sqrt((416 * 416 * 16) / shift)) ,ceil(sqrt((208 * 208 * 32) / shift)) , ceil(sqrt((104 * 104 * 64) / shift))
, ceil(sqrt((52 * 52 * 128) / shift)) , ceil(sqrt((26 * 26 * 256) / shift)) , ceil(sqrt((13 * 13 * 512) / shift))
,ceil(sqrt((13 * 13 * 1024) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) };
//-------------------------------------------------------START------------------------------------------
char* imagePaths[5] = {"dog.jpg","person.jpg","plane.jpg","motor.jpg","tv.jpg"};
cv::Mat image[5];
for (int i = 0; i < 5; i++) {
image[i]=load_image(imagePaths[i]);
}
float* h_image = (float*)malloc(416 * 416 * 3 * sizeof(float));
for (int i = 0; i < 5; i++) {
long t1 = clock();
hipMemcpy(d_input, image[i].ptr<float>(0), imageH*imageW * 3 * sizeof(float), hipMemcpyHostToDevice);
std::cout << imagePaths[i] << "\n";
//--------------------------------------------------------conv1----------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
inputDes,
w1Des,
conv1Des,
conv1OutDes,
conv1Algo,
&space));
if (i == 0) {
cudaCheck(hipMalloc(&(workSpace[0]), space));
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
inputDes,
d_input,
w1Des,
d_w1,
conv1Des,
conv1Algo,
workSpace[0],
space,
beta,
conv1OutDes,
d_conv1Out));
add_biase << <dim3(416, 416), 1 >> >(d_conv1Out, d_b1, 416 * 416 * 16, 416 * 416);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 1------------------------------------------------------------------
//leaky_relu << <dim3(1665, 1665), 1 >> > (d_conv1Out, .1, 416 * 416 * 16);
//int x = ceil(sqrt((416 * 416 * 16) / ( threadsPerBlock)));
//std::cout << "x = " << x << "\n";
//leaky_relu_v2 << < dim3(x, x), threadsPerBlock >> > (d_conv1Out, .1, 416 * 416 * 16);
//leaky_relu_v3 << <dim3(416,416),1 >> > (d_conv1Out, .1, 416 * 416 * 16, 416 * 416);
leaky_relu_v4 << <dim3(numBlocks[0], numBlocks[0]), 1 >> > (d_conv1Out, .1, 416 * 416 * 16,shift);
//----------------------------------------------------max 1----------------------------------------------------------------
// MaxPooling 22 2 (208, 208, 16)
cudnnCheck(cudnnPoolingForward(cudnn,
max1Des,
alpha,
conv1OutDes,
d_conv1Out,
beta,
max1OutDes,
d_max1Out));
//--------------------------------------------------------conv2-------------------------------------------------------------------
//[3,3,16,32]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max1OutDes,
w2Des,
conv2Des,
conv2OutDes,
conv2Algo,
&space));
if (i == 0) {
cudaCheck(hipMalloc(&workSpace[1], space));
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max1OutDes,
d_max1Out,
w2Des,
d_w2,
conv2Des,
conv2Algo,
workSpace[1],
space,
beta,
conv2OutDes,
d_conv2Out));
add_biase << <dim3(208, 208), 1 >> >(d_conv2Out, d_b2, 208 * 208 * 32, 208 * 208);
// to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 2------------------------------------------------------------------
//(208, 208, 32)
//leaky_relu << <dim3(1180, 1180), 1 >> > (d_conv2Out, .1, 208 * 208 * 32);
//leaky_relu_v3 << <dim3(208,208),1 >> > (d_conv2Out, .1, 208 * 208 * 32, 208 * 208);
leaky_relu_v4 << <dim3(numBlocks[1], numBlocks[1]), 1 >> > (d_conv2Out, .1, 208 * 208 * 32, shift);
//----------------------------------------------------max 2----------------------------------------------------------------
//MaxPooling 22 2 (104, 104, 32)
cudnnCheck(cudnnPoolingForward(cudnn,
max2Des,
alpha,
conv2OutDes,
d_conv2Out,
beta,
max2OutDes,
d_max2Out));
//--------------------------------------------------------conv3-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max2OutDes,
w3Des,
conv3Des,
conv3OutDes,
conv3Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[2], space);
totalSpace += space;
}
long m = clock();
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max2OutDes,
d_max2Out,
w3Des,
d_w3,
conv3Des,
conv3Algo,
workSpace[2],
space,
beta,
conv3OutDes,
d_conv3Out));
cout << "time for conv 3 " << clock() - m << "\n";
//don't forget to add the biases
add_biase << <dim3(104, 104), 1 >> >(d_conv3Out, d_b3, 104 * 104 * 64, 104 * 104);
//-----------------------------------------------------relu 3------------------------------------------------------------------
////(104, 104, 64)
//leaky_relu << <dim3(835, 835), 1 >> > (d_conv3Out, .1, 104 * 104 * 64);
//leaky_relu_v3 << <dim3(104, 104), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, 104 * 104);
leaky_relu_v4 << <dim3(numBlocks[2], numBlocks[2]), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, shift);
//----------------------------------------------------max 3----------------------------------------------------------------
//MaxPooling 22 2 (52, 52, 64)
cudnnCheck(cudnnPoolingForward(cudnn,
max3Des,
alpha,
conv3OutDes,
d_conv3Out,
beta,
max3OutDes,
d_max3Out));
//--------------------------------------------------------conv4-------------------------------------------------------------------
//[3,3,64,128]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max3OutDes,
w4Des,
conv4Des,
conv4OutDes,
conv4Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[3], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max3OutDes,
d_max3Out,
w4Des,
d_w4,
conv4Des,
conv4Algo,
workSpace[3],
space,
beta,
conv4OutDes,
d_conv4Out));
//don't forget to add the biases
//cout << "time for conv 2 " << clock() - m << "\n";
add_biase << <dim3(52, 52), 1 >> >(d_conv4Out, d_b4, 52 * 52 * 128, 52 * 52);
//test(d_conv4Out, 0, 16);
//test(d_conv4Out, 128, 128 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_conv4Out, 52 * 128, 52 * 128 + 16);
//test(d_conv4Out, 52 * 128 + 128, 52 * 128 + 128 + 16);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 4------------------------------------------------------------------
////(52, 52, 128)
//leaky_relu << <dim3(600, 600), 1 >> > (d_conv4Out, .1, 52 * 52 * 128);
//leaky_relu_v3 << <dim3(52, 52), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, 52 * 52);
leaky_relu_v4 << <dim3(numBlocks[3], numBlocks[3]), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, shift);
//----------------------------------------------------max 4----------------------------------------------------------------
//MaxPooling 22 2 (26, 26, 128)
cudnnCheck(cudnnPoolingForward(cudnn,
max4Des,
alpha,
conv4OutDes,
d_conv4Out,
beta,
max4OutDes,
d_max4Out));
//test(d_max4Out, 0, 16);
//test(d_max4Out, 128, 128 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_max4Out, 26 * 128, 26 * 128 + 16);
//test(d_max4Out, 26 * 128 + 128, 26 * 128 + 128 + 16);
//--------------------------------------------------------conv5-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max4OutDes,
w5Des,
conv5Des,
conv5OutDes,
conv5Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[4], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max4OutDes,
d_max4Out,
w5Des,
d_w5,
conv5Des,
conv5Algo,
workSpace[4],
space,
beta,
conv5OutDes,
d_conv5Out));
//don't forget to add the biases
add_biase << <dim3(28, 28), 1 >> >(d_conv5Out, d_b5, 26 * 26 * 256, 26 * 26);
//test(d_conv5Out, 0, 16);
//test(d_conv5Out, 256, 256 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_conv5Out, 26 * 256, 26 * 256 + 16);
//test(d_conv5Out, 26 * 256 + 256, 26 * 256 + 256 + 16);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 5------------------------------------------------------------------
////(26, 26, 256)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv5Out, .1, 26 * 26 * 256);
//leaky_relu_v3 << <dim3(26, 26), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, 26 * 26);
leaky_relu_v4 << <dim3(numBlocks[4], numBlocks[4]), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, shift);
//----------------------------------------------------max 5----------------------------------------------------------------
//MaxPooling 22 2 (13, 13, 256)
cudnnCheck(cudnnPoolingForward(cudnn,
max5Des,
alpha,
conv5OutDes,
d_conv5Out,
beta,
max5OutDes,
d_max5Out));
//--------------------------------------------------------conv6-------------------------------------------------------------------
//[3,3,256,512]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max5OutDes,
w6Des,
conv6Des,
conv6OutDes,
conv6Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[5], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max5OutDes,
d_max5Out,
w6Des,
d_w6,
conv6Des,
conv6Algo,
workSpace[5],
space,
beta,
conv6OutDes,
d_conv6Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv6Out, d_b6, 13 * 13 * 512, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 6------------------------------------------------------------------
////(13, 13, 512)
//leaky_relu << <dim3(300, 300), 1 >> > (d_conv6Out, .1, 13 * 13 * 512);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, 13 * 13);
leaky_relu_v4 << <dim3(numBlocks[5], numBlocks[5]), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, shift);
//----------------------------------------------------max 6----------------------------------------------------------------
//MaxPooling 22 1 (13, 13, 512)
cudnnCheck(cudnnPoolingForward(cudnn,
max6Des,
alpha,
conv6OutDes,
d_conv6Out,
beta,
max6OutDes,
d_max6Out));
//--------------------------------------------------------conv7-------------------------------------------------------------------
//[3,3,512,1024]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max6OutDes,
w7Des,
conv7Des,
conv7OutDes,
conv7Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[6], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max6OutDes,
d_max6Out,
w7Des,
d_w7,
conv7Des,
conv7Algo,
workSpace[6],
space,
beta,
conv7OutDes,
d_conv7Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv7Out, d_b7, 13 * 13 * 1024, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 7------------------------------------------------------------------
////(13 x 13 x 1024)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, 13 * 13);
leaky_relu_v4 << <dim3(numBlocks[6], numBlocks[6]), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, shift);
//--------------------------------------------------------conv8-------------------------------------------------------------------
//[3,3,1024,1024]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
conv7OutDes,
w8Des,
conv8Des,
conv8OutDes,
conv8Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[7], space);
totalSpace += space;
}
//cout << "total space " << totalSpace/(1024*1024) << " MB\n";
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
conv7OutDes,
d_conv7Out,
w8Des,
d_w8,
conv8Des,
conv8Algo,
workSpace[7],
space,
beta,
conv8OutDes,
d_conv8Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv8Out, d_b8, 13 * 13 * 1024, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 8------------------------------------------------------------------
////(13 x 13 x 1024)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, 13 * 13);
//x = ceil(sqrt((13 * 13 * 1024) / shift));
leaky_relu_v4 << <dim3(numBlocks[7], numBlocks[7]), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, shift);
//--------------------------------------------------------conv9-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
conv8OutDes,
w9Des,
conv9Des,
conv9OutDes,
conv9Algo,
&space));
if (i == 0) {
hipMalloc(&workSpace[8], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
conv8OutDes,
d_conv8Out,
w9Des,
d_w9,
conv9Des,
conv9Algo,
workSpace[8],
space,
beta,
conv9OutDes,
d_conv9Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv9Out, d_b9, 13 * 13 * 125, 13 * 13);
//another optimization separate first then sigmoid exp use the predefined ones
sigmoid_exp << <dim3(150, 150), 1 >> > (d_conv9Out, d_anchors, 13 * 13 * 125);
separate_data << <dim3(150, 150), 1 >> > (d_conv9Out, d_boxes_dims, d_predictions, d_classes, 13 * 13 * 125);
cudnnCheck(cudnnSoftmaxForward(cudnn,
CUDNN_SOFTMAX_FAST,
CUDNN_SOFTMAX_MODE_CHANNEL,
alpha,
softmaxInputDes,
d_classes,
beta,
softmaxOutDes,
d_classes_softmax));
scores << <dim3(32, 32), 1 >> > (d_classes_softmax, d_predictions, 13 * 13 * 5);
bool_arr << <dim3(30, 30), 1 >> >(d_boxes, 13 * 13 * 5, false);
filter << < dim3(150, 150), 1 >> > (d_classes_softmax, d_boxes, 0.3, 13 * 13 * 5 * 20);
get_max_scores << <dim3(30, 30), 1 >> > (d_classes_softmax, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 13 * 13 * 5);
calculate_points << <dim3(30, 30), 1 >> > (d_boxes_dims, d_points, d_boxes, 13 * 13 * 5);
//hipDeviceSynchronize();
non_max_supression << < dim3(30, 30), 1 >> > (d_points, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 0.3, 13 * 13 * 5);
cudaCheck(hipMemcpy(h_boxes, d_boxes, 13 * 13 * 5 * sizeof(bool), hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(h_maxScorePerBox, d_maxScorePerBox, 13 * 13 * 5 * sizeof(float), hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(h_maxScoreIndex, d_maxScoreIndex, 13 * 13 * 5 * sizeof(int), hipMemcpyDeviceToHost));
//cudaCheck(hipMemcpy(h_boxes_dims, d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float), hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(h_points, d_points, 13 * 13 * 5 * 4 * sizeof(float), hipMemcpyDeviceToHost));
cv::Mat output(416, 416, CV_8UC3);
cv::normalize(image[i], output, 0.0, 255.0, cv::NORM_MINMAX);
for (int i = 0; i < 13 * 13 * 5; i++) {
if (h_boxes[i]) {
int index = i * 4;
int left = h_points[index];
int top = h_points[index + 1];
int right = h_points[index + 2];
int bottom = h_points[index + 3];
std::cout << "( " << left << " , " << top << " ) , (" << right << " , " << bottom << " ) class "
<<classes[h_maxScoreIndex[i]]<<" with prop "<<h_maxScorePerBox[i]<<"\n";
if (left < 416 && top < 416 && right < 416 && bottom < 416) {
cv::rectangle(output, cv::Point(left, top), cv::Point(right, bottom), colors[h_maxScoreIndex[i]],3);
//draw << <dim3(416, 416), 1 >> > (d_input, left, top, right, bottom, colors[h_maxScoreIndex[i]].val[0],
//colors[h_maxScoreIndex[i]].val[1], colors[h_maxScoreIndex[i]].val[2], 416, 416);
}
}
}
cudaCheck(hipMemcpy(h_image, d_input, 416 * 416 * 3 * sizeof(float), hipMemcpyDeviceToHost));
//cv::Mat output0(416, 416, CV_32FC3,h_image);
//cv::normalize(output0, output, 0.0, 255.0, cv::NORM_MINMAX);
//cv::cvtColor(output, output, CV_RGB2BGR);
//cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX);
long t2 = clock();
cout << "time = " << t2 - t1 << "\n";
string num = std::to_string(i);
string file = "output" + num + ".png";
save_image(file.c_str(),output );
}
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
cout << "total space " << totalSpace / (1024 * 1024) << "MB\n";
}
|
6136c0ecf821cc341265099d199f038f33938b50.cu
|
//this version is 250 ms
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<cudnn.h>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include<cublas_v2.h>
#include<string>
#include<fstream>
#include<cmath>
#include<ctime>
using namespace std;
void readWeights(float* weights, int m/*output*/, int n/*input*/, int h, int w, string baseFileName,bool readWeights=true) {
string fileName = "weights2/" + baseFileName;
if (readWeights) {
fileName+="Weights.data";
}
else {
fileName += "Biases.data";
}
ifstream in(fileName, ios::in|ios::binary);
//cout << fileName << "\n";
if (!in.is_open())
{
cout << "file "<<baseFileName<<" didn't open \n";
return;
}
in.read((char*)weights, m*n*h*w * sizeof(float));
in.close();
//cout << baseFileName << " : " << weights[0] << " " << weights[1] << "\n";
}
#define cudnnCheck(exp){\
cudnnStatus_t status=(exp);\
if(status!=CUDNN_STATUS_SUCCESS){\
std::cout<<"Error at line "<<__LINE__<<" "<<cudnnGetErrorString(status)<<"\n";\
std::exit(EXIT_FAILURE);\
}\
}\
#define cudaCheck(exp) {\
cudaError_t status=(exp);\
if(status!=cudaSuccess){\
cerr<<"error at cuda "<<__LINE__<<" "<<cudaGetErrorString(status)<<"\n";\
exit(EXIT_FAILURE);\
}\
}\
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
if (image.empty()) { cerr << "couldn't open image\n"; }
cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
cv::Mat resizedImage(416, 416, CV_32FC2);
cv::resize(image, resizedImage, cv::Size(416, 416), 0, 0, cv::INTER_CUBIC);
if (resizedImage.empty())cerr << "resized image empty\n";
//cout << "ok\n";
return resizedImage;
}
void save_image(const char* output_filename,cv::Mat output_image) {
//cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR);
//cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
//output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
}
//incomplete
__global__ void leaky_relu_v2(float* d_data, float alpha, int size) {
int index = (blockIdx.y*gridDim.x + blockIdx.x);
if (index < size) {
float x = d_data[index];
if (x<0) d_data[index] = alpha*x;
}
}
//try constant shift
__global__ void leaky_relu_v3(float* d_data, float alpha, int size,int step) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < step) {
int channels = (size / step);
index *= channels;
for (int i = index; i < index+channels; i++) {
float x = d_data[i];
if (x<0) d_data[i] = alpha*x;
}
}
}
__global__ void leaky_relu_v4(float* d_data, float alpha, int size, int shift) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
index *= shift;
if (index < size-shift) {
for (int i = index; i < index + shift; i++) {
float x = d_data[i];
if (x<0) d_data[i] = alpha*x;
}
}
}
__global__ void leaky_relu(float* d_data, float alpha, int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
float x = d_data[index];
if (x<0) d_data[index] = alpha*x;
}
}
//step is width*height of the output of convolution
/*
@param size is width x height x channels
@Param step is width x height
the data in the format HxWxC
k is computed as index%(size/step)
*/
__global__ void add_biase(float* d_data, float* biases, int size/*WxHxC*/, int step/*WxH*/) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < step) {
int biaseSize = (size / step);
index *= biaseSize;
for (int i = 0; i < biaseSize; i++) {
d_data[index+i] += biases[i];
}
}
}
__device__ float iou(float bx1x1,float bx1y1,float bx1x2,float bx1y2, float bx2x1, float bx2y1, float bx2x2, float bx2y2) {
float x1 = (bx1x1 > bx2x1) ? bx1x1 : bx2x1;
float y1 = (bx1y1> bx2y1) ? bx1y1 : bx2y1;
float x2 = (bx1x2 > bx2x2) ? bx2x2 : bx1x2;
float y2 = (bx1y2 > bx2y2) ? bx2y2 : bx1y2;
float A1 = (bx1x2 - bx1x1)*(bx1y2 - bx1y1);
float A2 = (bx2x2 - bx2x1)*(bx2y2 - bx2y1);
float A_inter = ((x2 - x1) > 0 ? (x2 - x1) : 0)*((y2 - y1) > 0 ? (y2 - y1) : 0);
return(A_inter / (A1 + A2 - A_inter));
}
//consider calculating the necessary points only
__global__ void calculate_points(float* boxes_dims,float* points,bool* boxes,int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
//int left = h_boxes_dims[index] - (h_boxes_dims[index + 2] / 2.0);
//int right = h_boxes_dims[index] + (h_boxes_dims[index + 2] / 2.0);
//int top = h_boxes_dims[index + 1] - (h_boxes_dims[index + 3] / 2.0);
//int bottom = h_boxes_dims[index + 1] + (h_boxes_dims[index + 3] / 2.0);
int step = index * 4;
float center_x = boxes_dims[step];
float w = boxes_dims[step + 2];
float center_y = boxes_dims[step + 1];
float h = boxes_dims[step + 3];
points[step] = center_x - ((w) / 2.0);
points[step+2]= center_x + ((w) / 2.0);
points[step + 1] = center_y - ((h) / 2.0);
points[step + 3] = center_y + ((h) / 2.0);
}
}
__global__ void non_max_supression(float* points, bool* boxes,float* maxClassScore, int* maxClassIndex,float threashold=0.3,int size=13*13*5) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
float maxClass = maxClassScore[index];
if (maxClass < 0.3) {
boxes[index] = false;
return;
}
int maxClassInd = maxClassIndex[index];
float x1 = points[index * 4];
float y1 = points[index * 4 + 1];
float x2 = points[index * 4 + 2];
float y2 = points[index * 4 + 3];
for (int i = 0; i < size; i++) {
if (boxes[i] && i != index) {
if ( maxClassInd== maxClassIndex[i]) {
if (maxClass > maxClassScore[i]) {
float x = iou(x1,y1,x2,y2, points[i * 4]
, points[i * 4 + 1], points[i * 4 + 2], points[i * 4 + 3]);
if (x >= threashold) {
boxes[i] = false;
}
}
}
}
}
}
}
//20 classes
__global__ void exp(float* classes,int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x+threadIdx.x;
if (index<size) {
classes[index] = exp(classes[index]);
}
}
__global__ void softmax(float* classes,int offset, float sum) {
if (threadIdx.x < 20) {
classes[threadIdx.x + offset] /= sum;
}
}
__global__ void filter(float* classes,bool* boxes,float threshold=0.4,int size=13*13*5*20) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x;
if (index < size ) {
if (classes[index] >= threshold) {
boxes[index / 20] = true;
//printf("index %d value %f\n", index, classes[index]);
}
}
}
//blocks*threads
__global__ void sigmoid(float* x,int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x;
if (index<size) {
x[index] = 1 / (1 + exp(-1*x[index]));
}
}
//calculate centers of the box and the width and height
//calculate the necessary ones
__global__ void calculate_box_dims(float* x, float* d_anchors, int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
//center_x = (float(col) + sigmoid(tx)) * 32.0
x[index] = (((index / (4)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0;
//center_y = (float(row) + sigmoid(ty)) * 32.0
x[index+1] = ((index / (13 * 4)) + (1.0 / (1 + expf(-1 * x[index + 1]))))*32.0;
//roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0
x[index+2] = expf(x[index + 2])*d_anchors[2 * ((index / 25) % 5)] * 32.0;
//roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0
x[index+3] = expf(x[index + 3])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0;
}
}
__global__ void sigmoid_exp(float* x,float* d_anchors, int size) {
int index = (blockIdx.y*gridDim.x) + blockIdx.x;
if (index < size) {
int cond = index % 25;
switch (cond)
{
case 0 :
//center_x = (float(col) + sigmoid(tx)) * 32.0
x[index] = (((index/(125))%13)+(1.0/(1+expf(-1*x[index]))))*32.0;
break;
case 1:
//center_y = (float(row) + sigmoid(ty)) * 32.0
x[index] = ((index/(13*125)) + (1.0 / (1 + expf(-1*x[index]))))*32.0;
break;
case 2 :
//roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0
x[index] = expf(x[index])*d_anchors[2 * ((index/25)%5)]*32.0;
break;
case 3 :
//roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0
x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5)+1]*32.0 ;
break;
case 4:
//confidence
//if (index == 4)printf("data sample %f\n\n", x[index]);
x[index] = (1.0 / (1 + expf(-1 * x[index])));
break;
}
//if (index <25)printf("data sample %d %f\n",index, x[index]);
}
}
__global__ void scores(float* classes,float* confidence,int size) {
int index = blockIdx.y*gridDim.x+blockIdx.x;
if (index < size ) {
float x = confidence[index];
int step = index * 20;
for (int i = 0; i < 20; i++) {
classes[step + i] *= x;
}
}
}
__global__ void get_max_scores(float* classes, bool* boxes, float* maxScores , int* maxIndecies, int size=13*13*5) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
int classIndex = 20 * index ;
if (index < size ) {
if (boxes[index]) {
float maxClassScore = classes[classIndex];
int maxClassIndex = 0;
float tmp=0;
for (int i=classIndex + 1 ; i < classIndex + 19; i++) {
tmp = classes[i];
if (tmp > maxClassScore) {
maxClassScore = tmp;
maxClassIndex = i - classIndex;
}
}
//printf("from get_max_score %d %d\n", index,classIndex);
maxScores[index] = maxClassScore;
maxIndecies[index] = maxClassIndex;
}
}
}
__global__ void bool_arr (bool* d_boxes, int size, bool value=false) {
int index = blockIdx.y*blockDim.x + blockIdx.x;
if (index < size) {
d_boxes[index] = value;
}
}
__global__ void separate_data(float* predictions,float* boxes,float* confidence,float* classes,int size) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
if (index < size) {
int x = index % 25;
if (x > 4) {
classes[(index / 25)*20 + (x-5)] = predictions[index];
}
else if(x==4)
{
confidence[(index / 25)] = predictions[index];
}
else
{
//centers and bounding boxes
boxes[(index / 25)*4 + x] = predictions[index];
}
}
}
//draw colored rectangles around objects
//scale colors first
//thickness = 4 pixels
//size is WxH
__global__ void draw(float* d_image,int x1,int y1,int x2,int y2,float r,float g,float b,int w,int h,int thickness=4) {
int index = blockIdx.y*gridDim.x + blockIdx.x;
//scale for the three channels
if (index < w*h ) {
//index *= 3;
int xPos = (index/3 )%w;
int yPos = (index / (3*w ));
//on the same vertical line
//increase x axis
if ((yPos == y1 || yPos == y2) && (xPos >= x1 && xPos <= x2)) {
for (int i = 0; i < thickness; i++) {
if (index < w*h) {
//r
d_image[index] = 0;
//g
d_image[index + 1] = 0;
//b
d_image[index + 2] = 0;
//next column ie next x in image terminology as the row here is column there
//remember image is at format NHWC
index += 3;
}
}
}
else if((xPos==x1 || xPos == x2) && (yPos >= y1 && yPos <= y2) )
{
for (int i = 0; i < thickness; i++) {
if (index < w*h) {
//r
d_image[index] = 0;
//g
d_image[index + 1] =0;
//b
d_image[index + 2] = 0;
}
index += (3*h);
}
}
}
}
template<class T>
void test(T* host_data,T* device_data,int start, int end) {
cout << "host data \n\n";
for (int i = start; i < end; i++) {
cout << host_data[i] << " ";
}
cout << "\n\n";
T* tmp=(T*) malloc(end *sizeof(T));
cudaMemcpy(tmp, device_data, end * sizeof(T), cudaMemcpyDeviceToHost);
cout << "device data \n\n";
for (int i = start; i < end; i++) {
cout << tmp[i] << " ";
}
cout << "\n\n";
}
template<class T>
void test( T* device_data, int start , int end) {
T* tmp = (T*)malloc(end * sizeof(T));
cudaCheck(cudaMemcpy(tmp, device_data, (end) * sizeof(T), cudaMemcpyDeviceToHost));
cout << "device data \n\n";
for (int i = start; i < end; i++) {
cout << tmp[i] << " ";
}
cout << "\n\n";
//if (tmp[3] == true)cout << "True \n";
}
template<class T>
void test(T* device_data,int row,int col,int w, int step, int channels,int times,string name,int offset=0,bool xDirection=true) {
cout << name << "\n";
for (int j = 0; j < times; j++) {
test(device_data, (col*w*channels+row*channels+j*step+offset), (col*w*channels+row*channels + (j+1)*step));
//cout << (col*step*channels + row*channels + j*step + offset) <<" "<< (col*step*channels + row*channels + (j + 1)*step) << "\n";
}
}
//--------------------------------------things to be done for optimization---------------------------------------------------
//to be more memory effecient delete the unneeded values and re assign them
// this maybe time costy
//test that
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//make sure when it crashes because of memory to print that
//----------------------------------------------------------------------------------------------------------------------------
#define threadsPerBlock 32
#define shift 500
int main() {
// Layer kernel stride output shape
// -------------------------------------------- -
//Input(416,416,3)
// Convolution 3×3 1 (416, 416, 16)
// MaxPooling 2×2 2 (208, 208, 16)
// Convolution 3×3 1 (208, 208, 32)
// MaxPooling 2×2 2 (104, 104, 32)
// Convolution 3×3 1 (104, 104, 64)
// MaxPooling 2×2 2 (52, 52, 64)
// Convolution 3×3 1 (52, 52, 128)
// MaxPooling 2×2 2 (26, 26, 128)
// Convolution 3×3 1 (26, 26, 256)
// MaxPooling 2×2 2 (13, 13, 256)
// Convolution 3×3 1 (13, 13, 512)
// MaxPooling 2×2 1 (13, 13, 512)
// Convolution 3×3 1 (13, 13, 1024)
// Convolution 3×3 1 (13, 13, 1024)
// Convolution 1×1 1 (13, 13, 125)
// -------------------------------------------- -
//all MAX POOLING is valid padding except last one but padding = 0
//all CONV are SAME padding with p = 1
int imageH = 416, imageW = 416;
float x = 1.0, y = 0.0;
float* alpha = &x;
float *beta = &y;
long long totalSpace = 0;
size_t space = 0;
//std::cout << "ok\n";
cudnnHandle_t cudnn;
cudnnCheck(cudnnCreate(&cudnn));
//input layer
cudnnTensorDescriptor_t inputDes;
cudnnCheck(cudnnCreateTensorDescriptor(&inputDes));
cudnnCheck(cudnnSetTensor4dDescriptor(inputDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
3,
imageH,
imageW));
//cv::Mat image = load_image("person.jpg");
//std::cout << "image loaded with dims " << image.cols << " X " << image.rows << "\n";
//for (int i = 0; i < 20; i++)std::cout << image.at<float>(cv::Point(0, i)) << " ";
//std::cout << "\n\n";
float* d_input;
cudaMalloc(&d_input, imageH*imageW * 3 * sizeof(float));
totalSpace += imageH*imageW * 3 * sizeof(float);
//load W1
float* w1 = (float*)malloc(16 * 3 * 3 * 3 * sizeof(float));
readWeights(w1, 16, 3, 3, 3, "conv1");
float* d_w1;
cudaCheck(cudaMalloc(&d_w1, 16 * 3 * 3 * 3 * sizeof(float)));
totalSpace += 16 * 3 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(cudaMemcpy(d_w1, w1, 16 * 3 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice));
//(416, 416, 16)
float* d_conv1Out;
cudaCheck(cudaMalloc(&d_conv1Out, 16 * imageH * imageW * sizeof(float)));
totalSpace += 16 * imageH * imageW * sizeof(float);
//copy data to GPU
//don't forget to add the biases
float* b1=(float*)malloc(16*sizeof(float));
readWeights(b1, 16, 1, 1, 1, "conv1",false);
float* d_b1;
cudaCheck(cudaMalloc(&d_b1, 16 * sizeof(float)));
cudaCheck(cudaMemcpy(d_b1, b1, 16 * sizeof(float), cudaMemcpyHostToDevice));
float* d_max1Out;
cudaCheck(cudaMalloc(&d_max1Out, 208 * 208 * 16 * sizeof(float)));
totalSpace += 208 * 208 * 16 * sizeof(float);
//load W2
float* w2 = (float*)malloc(32 * 16 * 3 * 3 * sizeof(float));
readWeights(w2, 32, 16, 3, 3, "conv2");
float* d_w2;
cudaCheck(cudaMalloc(&d_w2, 32 * 16 * 3 * 3 * sizeof(float)));
totalSpace += 32 * 16 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(cudaMemcpy(d_w2, w2, 32 * 16 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice));
float* d_conv2Out;
cudaCheck(cudaMalloc(&d_conv2Out, 32 * 208 * 208 * sizeof(float)));
totalSpace += 32 * 208 * 208 * sizeof(float);
//don't forget to add the biases
float* b2=(float*)malloc(32*sizeof(float));
readWeights(b2, 32, 1, 1, 1, "conv2",false);
float* d_b2;
cudaCheck(cudaMalloc(&d_b2, 32 * sizeof(float)));
cudaCheck(cudaMemcpy(d_b2, b2, 32 * sizeof(float), cudaMemcpyHostToDevice));
//load W3
float* w3 = (float*)malloc(64 * 32 * 3 * 3 * sizeof(float));
readWeights(w3, 64, 32, 3, 3, "conv3");
float* d_w3;
cudaMalloc(&d_w3, 64 * 32 * 3 * 3 * sizeof(float));
totalSpace += 64 * 32 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaMemcpy(d_w3, w3, 64 * 32 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
float* b3=(float*) malloc(64*sizeof(float));
readWeights(b3, 64, 1, 1, 1, "conv3",false);
float* d_b3;
cudaMalloc(&d_b3, 64 * sizeof(float));
cudaMemcpy(d_b3, b3, 64 * sizeof(float), cudaMemcpyHostToDevice);
float* d_max3Out;
cudaMalloc(&d_max3Out, 52 * 52 * 64 * sizeof(float));
totalSpace += 52 * 52 * 64 * sizeof(float);
//load W4
float* w4 = (float*)malloc(128 * 64 * 3 * 3 * sizeof(float));
readWeights(w4, 128, 64, 3, 3, "conv4");
float* d_w4;
cudaMalloc(&d_w4, 128 * 64 * 3 * 3 * sizeof(float));
totalSpace += 128 * 64 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaMemcpy(d_w4, w4, 128 * 64 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
float* d_conv4Out;
cudaMalloc(&d_conv4Out, 128 * 52 * 52 * sizeof(float));
totalSpace += 128 * 52 * 52 * sizeof(float);
float* b4=(float*) malloc(128*sizeof(float));
readWeights(b4, 128, 1, 1, 1, "conv4",false);
float* d_b4;
cudaMalloc(&d_b4, 128 * sizeof(float));
cudaMemcpy(d_b4, b4, 128 * sizeof(float), cudaMemcpyHostToDevice);
float* d_max4Out;
cudaMalloc(&d_max4Out, 26 * 26 * 128 * sizeof(float));
totalSpace += 26 * 26 * 128 * sizeof(float);
//load W5
float* w5 = (float*)malloc(256 * 128 * 3 * 3 * sizeof(float));
readWeights(w5, 256, 128, 3, 3, "conv5");
float* d_w5;
cudaMalloc(&d_w5, 256 * 128 * 3 * 3 * sizeof(float));
totalSpace += 256 * 128 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaMemcpy(d_w5, w5, 256 * 128 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
float* d_conv5Out;
cudaMalloc(&d_conv5Out, 256 * 26 * 26 * sizeof(float));
totalSpace += 256 * 26 * 26 * sizeof(float);
float* b5=(float*)malloc(256*sizeof(float));
readWeights(b5, 256, 1, 1, 1, "conv5",false);
float* d_b5;
cudaMalloc(&d_b5, 256 * sizeof(float));
cudaMemcpy(d_b5, b5, 256 * sizeof(float), cudaMemcpyHostToDevice);
float* d_max5Out;
cudaMalloc(&d_max5Out, 13 * 13 * 256 * sizeof(float));
totalSpace += 13 * 13 * 256 * sizeof(float);
//load W6
float* w6 = (float*)malloc(512 * 256 * 3 * 3 * sizeof(float));
readWeights(w6, 512, 256, 3, 3, "conv6");
float* d_w6;
cudaMalloc(&d_w6, 512 * 256 * 3 * 3 * sizeof(float));
totalSpace += 512 * 256 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaMemcpy(d_w6, w6, 512 * 256 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
float* d_conv6Out;
cudaMalloc(&d_conv6Out, 512 * 13 * 13 * sizeof(float));
totalSpace += 512 * 13 * 13 * sizeof(float);
float* b6=(float*) malloc(512*sizeof(float));
readWeights(b6, 512, 1, 1, 1, "conv6",false);
float* d_b6;
cudaMalloc(&d_b6, 512 * sizeof(float));
cudaMemcpy(d_b6, b6, 512 * sizeof(float), cudaMemcpyHostToDevice);
//here there's padding and stride 1
float* d_max6Out;
cudaMalloc(&d_max6Out, 13 * 13 * 512 * sizeof(float));
totalSpace += 13 * 13 * 512 * sizeof(float);
//load W7
float* w7 = (float*)malloc(1024 * 512 * 3 * 3 * sizeof(float));
readWeights(w7, 1024, 512, 3, 3, "conv7");
float* d_w7;
cudaMalloc(&d_w7, 1024 * 512 * 3 * 3 * sizeof(float));
totalSpace += 1024 * 512 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaMemcpy(d_w7, w7, 1024 * 512 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
float* d_conv7Out;
cudaMalloc(&d_conv7Out, 1024 * 13 * 13 * sizeof(float));
totalSpace += 1024 * 13 * 13 * sizeof(float);
float* b7=(float*) malloc(1024*sizeof(float));
readWeights(b7, 1024, 1, 1, 1, "conv7",false);
float* d_b7;
cudaCheck(cudaMalloc(&d_b7, 1024 * sizeof(float)));
cudaCheck(cudaMemcpy(d_b7, b7, 1024 * sizeof(float), cudaMemcpyHostToDevice));
//load W8
float* w8 = (float*)malloc(1024 * 1024 * 3 * 3 * sizeof(float));
readWeights(w8, 1024, 1024, 3, 3, "conv8",true);
float* d_w8;
cudaCheck(cudaMalloc(&d_w8, 1024 * 1024 * 3 * 3 * sizeof(float)));
totalSpace += 1024 * 1024 * 3 * 3 * sizeof(float);
//copy weights to GPU
cudaCheck(cudaMemcpy(d_w8, w8, 1024 * 1024 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice));
float* d_conv8Out;
cudaCheck(cudaMalloc(&d_conv8Out, 1024 * 13 * 13 * sizeof(float)));
totalSpace += 1024 * 13 * 13 * sizeof(float);
float* b8=(float*) malloc(1024*sizeof(float));
readWeights(b8, 1024, 1, 1, 1, "conv8", false);
float* d_b8;
cudaCheck(cudaMalloc(&d_b8, 1024 * sizeof(float)));
cudaCheck(cudaMemcpy(d_b8, b8, 1024 * sizeof(float), cudaMemcpyHostToDevice));
//load W9
float* w9 = (float*)malloc(1024 * 125 * sizeof(float));
readWeights(w9, 1024, 125, 3, 3, "conv9", true);
float* d_w9;
cudaCheck(cudaMalloc(&d_w9, 1024 * 125 * sizeof(float)));
totalSpace += 1024 * 125 * sizeof(float);
float* d_conv9Out;
cudaCheck(cudaMalloc(&d_conv9Out, 125 * 13 * 13 * sizeof(float)));
totalSpace += 125 * 13 * 13 * sizeof(float);
cout << "total space " << totalSpace / (1024 * 1024) << " MB\n";
float b9[125];
readWeights(b9, 125, 1, 1, 1, "conv9", false);
float* d_b9;
cudaCheck(cudaMalloc(&d_b9, 125 * sizeof(float)));
float* d_classes_softmax;
cudaCheck(cudaMalloc(&d_classes_softmax, 13 * 13 * 5 * 20 * sizeof(float)));
cv::Scalar colors[20] = { cv::Scalar(254.0, 254.0, 254),cv::Scalar(239.88888888888889, 211.66666666666669, 127),
cv::Scalar(225.77777777777777, 169.33333333333334, 0), cv::Scalar(211.66666666666669, 127.0, 254),
cv::Scalar(197.55555555555557, 84.66666666666667, 127), cv::Scalar(183.44444444444443, 42.33333333333332, 0),
cv::Scalar(169.33333333333334, 0.0, 254), cv::Scalar(155.22222222222223, -42.33333333333335, 127),
cv::Scalar(141.11111111111111, -84.66666666666664, 0), cv::Scalar(127.0, 254.0, 254),
cv::Scalar(112.88888888888889, 211.66666666666669, 127), cv::Scalar(98.77777777777777, 169.33333333333334, 0),
cv::Scalar(84.66666666666667, 127.0, 254), cv::Scalar(70.55555555555556, 84.66666666666667, 127),
cv::Scalar(56.44444444444444, 42.33333333333332, 0), cv::Scalar(42.33333333333332, 0.0, 254),
cv::Scalar(28.222222222222236, -42.33333333333335, 127), cv::Scalar(14.111111111111118, -84.66666666666664, 0),
cv::Scalar(0.0, 254.0, 254), cv::Scalar(-14.111111111111118, 211.66666666666669, 127) };
string classes[20] = { "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse"
, "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor" };
//anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]
float h_anchors[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 };
float* d_anchors;
cudaCheck(cudaMalloc(&d_anchors, 10 * sizeof(float)));
float* d_boxes_dims;
cudaCheck(cudaMalloc(&d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float)));
float* d_predictions;
cudaCheck(cudaMalloc(&d_predictions, 13 * 13 * 5 * sizeof(float)));
float* d_classes;
cudaCheck(cudaMalloc(&d_classes, 13 * 13 * 5 * 20 * sizeof(float)));
cudaCheck(cudaMemcpy(d_anchors, h_anchors, 10 * sizeof(float), cudaMemcpyHostToDevice));
bool* d_boxes;
cudaCheck(cudaMalloc(&d_boxes, 13 * 13 * 5 * sizeof(bool)));
float* d_maxScorePerBox;
cudaCheck(cudaMalloc(&d_maxScorePerBox, 13 * 13 * 5 * sizeof(float)));
int* d_maxScoreIndex;
cudaCheck(cudaMalloc(&d_maxScoreIndex, 13 * 13 * 5 * sizeof(int)));
float* d_points;
cudaCheck(cudaMalloc(&d_points, 13 * 13 * 5 * 4 * sizeof(float)));
bool h_boxes[13 * 13 * 5];
float* h_points = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float));
float h_maxScorePerBox[13 * 13 * 5];
int h_maxScoreIndex[13 * 13 * 5];
float* h_boxes_dims = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float));
cudaCheck(cudaMemcpy(d_b9, b9, 125 * sizeof(float), cudaMemcpyHostToDevice));
//workspases
void* workSpace[9] = { nullptr };
//(16X3X3X3)
cudnnFilterDescriptor_t w1Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w1Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w1Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
16,
3,
3,
3));
cudnnTensorDescriptor_t conv1OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv1OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv1OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
16,
416,
416));
//cout << "output format NHWC \n";
cudnnConvolutionDescriptor_t conv1Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv1Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv1Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv1Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
inputDes,
w1Des,
conv1Des,
conv1OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv1Algo));
cudnnTensorDescriptor_t max1OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max1OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max1OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
16,
208,
208));
//cout << "max1 out NHWC\n";
cudnnPoolingDescriptor_t max1Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max1Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max1Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w2Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w2Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w2Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
32,
16,
3,
3));
//(208, 208, 32)
cudnnTensorDescriptor_t conv2OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv2OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv2OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
32,
208,
208));
//cout << "conv2 out NHWC\n";
cudnnConvolutionDescriptor_t conv2Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv2Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv2Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv2Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max1OutDes,
w2Des,
conv2Des,
conv2OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv2Algo));
float* d_max2Out;
cudaMalloc(&d_max2Out, 104 * 104 * 32 * sizeof(float));
totalSpace += 104 * 104 * 32 * sizeof(float);
cudnnTensorDescriptor_t max2OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max2OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max2OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
32,
104,
104));
cudnnPoolingDescriptor_t max2Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max2Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max2Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
//[3,3,32,64]
cudnnFilterDescriptor_t w3Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w3Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w3Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
64,
32,
3,
3));
//(104, 104, 64)
cudnnTensorDescriptor_t conv3OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv3OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv3OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
64,
104,
104));
float* d_conv3Out;
cudaMalloc(&d_conv3Out, 64 * 104 * 104 * sizeof(float));
totalSpace += 64 * 104 * 104 * sizeof(float);
cudnnConvolutionDescriptor_t conv3Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv3Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv3Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv3Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max2OutDes,
w3Des,
conv3Des,
conv3OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv3Algo));
cudnnTensorDescriptor_t max3OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max3OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max3OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
64,
52,
52));
cudnnPoolingDescriptor_t max3Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max3Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max3Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w4Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w4Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w4Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
128,
64,
3,
3));
//(52, 52, 128)
cudnnTensorDescriptor_t conv4OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv4OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv4OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
128,
52,
52));
cudnnConvolutionDescriptor_t conv4Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv4Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv4Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv4Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max3OutDes,
w4Des,
conv4Des,
conv4OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv4Algo));
cudnnTensorDescriptor_t max4OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max4OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max4OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
128,
26,
26));
cudnnPoolingDescriptor_t max4Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max4Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max4Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
//[3,3,128,256]
cudnnFilterDescriptor_t w5Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w5Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w5Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
256,
128,
3,
3));
//(26, 26, 256)
cudnnTensorDescriptor_t conv5OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv5OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv5OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
256,
26,
26));
cudnnConvolutionDescriptor_t conv5Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv5Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv5Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv5Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max4OutDes,
w5Des,
conv5Des,
conv5OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv5Algo));
cudnnTensorDescriptor_t max5OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max5OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max5OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
256,
13,
13));
cudnnPoolingDescriptor_t max5Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max5Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max5Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
2,
2));
cudnnFilterDescriptor_t w6Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w6Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w6Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
512,
256,
3,
3));
//(13, 13, 512)
cudnnTensorDescriptor_t conv6OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv6OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv6OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
512,
13,
13));
cudnnConvolutionDescriptor_t conv6Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv6Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv6Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv6Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max5OutDes,
w6Des,
conv6Des,
conv6OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv6Algo));
cudnnTensorDescriptor_t max6OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&max6OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(max6OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
512,
13,
13));
cudnnPoolingDescriptor_t max6Des;
cudnnCheck(cudnnCreatePoolingDescriptor(&max6Des));
cudnnCheck(cudnnSetPooling2dDescriptor(max6Des,
CUDNN_POOLING_MAX,
CUDNN_PROPAGATE_NAN,
2,
2,
0,
0,
1,
1));
cudnnFilterDescriptor_t w7Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w7Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w7Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
1024,
512,
3,
3));
//(13 x 13 x 1024)
cudnnTensorDescriptor_t conv7OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv7OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv7OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
1024,
13,
13));
cudnnConvolutionDescriptor_t conv7Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv7Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv7Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv7Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
max6OutDes,
w7Des,
conv7Des,
conv7OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv7Algo));
cudnnFilterDescriptor_t w8Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w8Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w8Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
1024,
1024,
3,
3));
//(13 x 13 x 1024)
cudnnTensorDescriptor_t conv8OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv8OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv8OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
1024,
13,
13));
cudnnConvolutionDescriptor_t conv8Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv8Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv8Des,
1,
1,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv8Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
conv7OutDes,
w8Des,
conv8Des,
conv8OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv8Algo));
//[1,1,1024,125]
cudnnFilterDescriptor_t w9Des;
cudnnCheck(cudnnCreateFilterDescriptor(&w9Des));
cudnnCheck(cudnnSetFilter4dDescriptor(w9Des,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
125,
1024,
1,
1));
//copy weights to GPU
cudaCheck(cudaMemcpy(d_w9, w9, 1024 * 125 * sizeof(float), cudaMemcpyHostToDevice));
//(13 x 13 x 125)
cudnnTensorDescriptor_t conv9OutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&conv9OutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(conv9OutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1,
125,
13,
13));
cudnnConvolutionDescriptor_t conv9Des;
cudnnCheck(cudnnCreateConvolutionDescriptor(&conv9Des));
cudnnCheck(cudnnSetConvolution2dDescriptor(conv9Des,
0,
0,
1,
1,
1,
1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t conv9Algo;
cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn,
conv8OutDes,
w9Des,
conv9Des,
conv9OutDes,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&conv9Algo));
cudnnTensorDescriptor_t softmaxInputDes;
cudnnCheck(cudnnCreateTensorDescriptor(&softmaxInputDes));
cudnnCheck(cudnnSetTensor4dDescriptor(softmaxInputDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
5,
20,
13,
13));
cudnnTensorDescriptor_t softmaxOutDes;
cudnnCheck(cudnnCreateTensorDescriptor(&softmaxOutDes));
cudnnCheck(cudnnSetTensor4dDescriptor(softmaxOutDes,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
5,
20,
13,
13));
int numBlocks[8] = { ceil(sqrt((416 * 416 * 16) / shift)) ,ceil(sqrt((208 * 208 * 32) / shift)) , ceil(sqrt((104 * 104 * 64) / shift))
, ceil(sqrt((52 * 52 * 128) / shift)) , ceil(sqrt((26 * 26 * 256) / shift)) , ceil(sqrt((13 * 13 * 512) / shift))
,ceil(sqrt((13 * 13 * 1024) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) };
//-------------------------------------------------------START------------------------------------------
char* imagePaths[5] = {"dog.jpg","person.jpg","plane.jpg","motor.jpg","tv.jpg"};
cv::Mat image[5];
for (int i = 0; i < 5; i++) {
image[i]=load_image(imagePaths[i]);
}
float* h_image = (float*)malloc(416 * 416 * 3 * sizeof(float));
for (int i = 0; i < 5; i++) {
long t1 = clock();
cudaMemcpy(d_input, image[i].ptr<float>(0), imageH*imageW * 3 * sizeof(float), cudaMemcpyHostToDevice);
std::cout << imagePaths[i] << "\n";
//--------------------------------------------------------conv1----------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
inputDes,
w1Des,
conv1Des,
conv1OutDes,
conv1Algo,
&space));
if (i == 0) {
cudaCheck(cudaMalloc(&(workSpace[0]), space));
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
inputDes,
d_input,
w1Des,
d_w1,
conv1Des,
conv1Algo,
workSpace[0],
space,
beta,
conv1OutDes,
d_conv1Out));
add_biase << <dim3(416, 416), 1 >> >(d_conv1Out, d_b1, 416 * 416 * 16, 416 * 416);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 1------------------------------------------------------------------
//leaky_relu << <dim3(1665, 1665), 1 >> > (d_conv1Out, .1, 416 * 416 * 16);
//int x = ceil(sqrt((416 * 416 * 16) / ( threadsPerBlock)));
//std::cout << "x = " << x << "\n";
//leaky_relu_v2 << < dim3(x, x), threadsPerBlock >> > (d_conv1Out, .1, 416 * 416 * 16);
//leaky_relu_v3 << <dim3(416,416),1 >> > (d_conv1Out, .1, 416 * 416 * 16, 416 * 416);
leaky_relu_v4 << <dim3(numBlocks[0], numBlocks[0]), 1 >> > (d_conv1Out, .1, 416 * 416 * 16,shift);
//----------------------------------------------------max 1----------------------------------------------------------------
// MaxPooling 2×2 2 (208, 208, 16)
cudnnCheck(cudnnPoolingForward(cudnn,
max1Des,
alpha,
conv1OutDes,
d_conv1Out,
beta,
max1OutDes,
d_max1Out));
//--------------------------------------------------------conv2-------------------------------------------------------------------
//[3,3,16,32]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max1OutDes,
w2Des,
conv2Des,
conv2OutDes,
conv2Algo,
&space));
if (i == 0) {
cudaCheck(cudaMalloc(&workSpace[1], space));
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max1OutDes,
d_max1Out,
w2Des,
d_w2,
conv2Des,
conv2Algo,
workSpace[1],
space,
beta,
conv2OutDes,
d_conv2Out));
add_biase << <dim3(208, 208), 1 >> >(d_conv2Out, d_b2, 208 * 208 * 32, 208 * 208);
// to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 2------------------------------------------------------------------
//(208, 208, 32)
//leaky_relu << <dim3(1180, 1180), 1 >> > (d_conv2Out, .1, 208 * 208 * 32);
//leaky_relu_v3 << <dim3(208,208),1 >> > (d_conv2Out, .1, 208 * 208 * 32, 208 * 208);
leaky_relu_v4 << <dim3(numBlocks[1], numBlocks[1]), 1 >> > (d_conv2Out, .1, 208 * 208 * 32, shift);
//----------------------------------------------------max 2----------------------------------------------------------------
//MaxPooling 2×2 2 (104, 104, 32)
cudnnCheck(cudnnPoolingForward(cudnn,
max2Des,
alpha,
conv2OutDes,
d_conv2Out,
beta,
max2OutDes,
d_max2Out));
//--------------------------------------------------------conv3-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max2OutDes,
w3Des,
conv3Des,
conv3OutDes,
conv3Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[2], space);
totalSpace += space;
}
long m = clock();
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max2OutDes,
d_max2Out,
w3Des,
d_w3,
conv3Des,
conv3Algo,
workSpace[2],
space,
beta,
conv3OutDes,
d_conv3Out));
cout << "time for conv 3 " << clock() - m << "\n";
//don't forget to add the biases
add_biase << <dim3(104, 104), 1 >> >(d_conv3Out, d_b3, 104 * 104 * 64, 104 * 104);
//-----------------------------------------------------relu 3------------------------------------------------------------------
////(104, 104, 64)
//leaky_relu << <dim3(835, 835), 1 >> > (d_conv3Out, .1, 104 * 104 * 64);
//leaky_relu_v3 << <dim3(104, 104), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, 104 * 104);
leaky_relu_v4 << <dim3(numBlocks[2], numBlocks[2]), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, shift);
//----------------------------------------------------max 3----------------------------------------------------------------
//MaxPooling 2×2 2 (52, 52, 64)
cudnnCheck(cudnnPoolingForward(cudnn,
max3Des,
alpha,
conv3OutDes,
d_conv3Out,
beta,
max3OutDes,
d_max3Out));
//--------------------------------------------------------conv4-------------------------------------------------------------------
//[3,3,64,128]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max3OutDes,
w4Des,
conv4Des,
conv4OutDes,
conv4Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[3], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max3OutDes,
d_max3Out,
w4Des,
d_w4,
conv4Des,
conv4Algo,
workSpace[3],
space,
beta,
conv4OutDes,
d_conv4Out));
//don't forget to add the biases
//cout << "time for conv 2 " << clock() - m << "\n";
add_biase << <dim3(52, 52), 1 >> >(d_conv4Out, d_b4, 52 * 52 * 128, 52 * 52);
//test(d_conv4Out, 0, 16);
//test(d_conv4Out, 128, 128 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_conv4Out, 52 * 128, 52 * 128 + 16);
//test(d_conv4Out, 52 * 128 + 128, 52 * 128 + 128 + 16);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 4------------------------------------------------------------------
////(52, 52, 128)
//leaky_relu << <dim3(600, 600), 1 >> > (d_conv4Out, .1, 52 * 52 * 128);
//leaky_relu_v3 << <dim3(52, 52), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, 52 * 52);
leaky_relu_v4 << <dim3(numBlocks[3], numBlocks[3]), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, shift);
//----------------------------------------------------max 4----------------------------------------------------------------
//MaxPooling 2×2 2 (26, 26, 128)
cudnnCheck(cudnnPoolingForward(cudnn,
max4Des,
alpha,
conv4OutDes,
d_conv4Out,
beta,
max4OutDes,
d_max4Out));
//test(d_max4Out, 0, 16);
//test(d_max4Out, 128, 128 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_max4Out, 26 * 128, 26 * 128 + 16);
//test(d_max4Out, 26 * 128 + 128, 26 * 128 + 128 + 16);
//--------------------------------------------------------conv5-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max4OutDes,
w5Des,
conv5Des,
conv5OutDes,
conv5Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[4], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max4OutDes,
d_max4Out,
w5Des,
d_w5,
conv5Des,
conv5Algo,
workSpace[4],
space,
beta,
conv5OutDes,
d_conv5Out));
//don't forget to add the biases
add_biase << <dim3(28, 28), 1 >> >(d_conv5Out, d_b5, 26 * 26 * 256, 26 * 26);
//test(d_conv5Out, 0, 16);
//test(d_conv5Out, 256, 256 + 16);
////test(d_conv2Out, 32+16, 32 + 32);
////test(d_conv1Out, 32 + 16, 32 + 32);
//test(d_conv5Out, 26 * 256, 26 * 256 + 16);
//test(d_conv5Out, 26 * 256 + 256, 26 * 256 + 256 + 16);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 5------------------------------------------------------------------
////(26, 26, 256)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv5Out, .1, 26 * 26 * 256);
//leaky_relu_v3 << <dim3(26, 26), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, 26 * 26);
leaky_relu_v4 << <dim3(numBlocks[4], numBlocks[4]), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, shift);
//----------------------------------------------------max 5----------------------------------------------------------------
//MaxPooling 2×2 2 (13, 13, 256)
cudnnCheck(cudnnPoolingForward(cudnn,
max5Des,
alpha,
conv5OutDes,
d_conv5Out,
beta,
max5OutDes,
d_max5Out));
//--------------------------------------------------------conv6-------------------------------------------------------------------
//[3,3,256,512]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max5OutDes,
w6Des,
conv6Des,
conv6OutDes,
conv6Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[5], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max5OutDes,
d_max5Out,
w6Des,
d_w6,
conv6Des,
conv6Algo,
workSpace[5],
space,
beta,
conv6OutDes,
d_conv6Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv6Out, d_b6, 13 * 13 * 512, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 6------------------------------------------------------------------
////(13, 13, 512)
//leaky_relu << <dim3(300, 300), 1 >> > (d_conv6Out, .1, 13 * 13 * 512);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, 13 * 13);
leaky_relu_v4 << <dim3(numBlocks[5], numBlocks[5]), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, shift);
//----------------------------------------------------max 6----------------------------------------------------------------
//MaxPooling 2×2 1 (13, 13, 512)
cudnnCheck(cudnnPoolingForward(cudnn,
max6Des,
alpha,
conv6OutDes,
d_conv6Out,
beta,
max6OutDes,
d_max6Out));
//--------------------------------------------------------conv7-------------------------------------------------------------------
//[3,3,512,1024]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
max6OutDes,
w7Des,
conv7Des,
conv7OutDes,
conv7Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[6], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
max6OutDes,
d_max6Out,
w7Des,
d_w7,
conv7Des,
conv7Algo,
workSpace[6],
space,
beta,
conv7OutDes,
d_conv7Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv7Out, d_b7, 13 * 13 * 1024, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 7------------------------------------------------------------------
////(13 x 13 x 1024)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, 13 * 13);
leaky_relu_v4 << <dim3(numBlocks[6], numBlocks[6]), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, shift);
//--------------------------------------------------------conv8-------------------------------------------------------------------
//[3,3,1024,1024]
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
conv7OutDes,
w8Des,
conv8Des,
conv8OutDes,
conv8Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[7], space);
totalSpace += space;
}
//cout << "total space " << totalSpace/(1024*1024) << " MB\n";
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
conv7OutDes,
d_conv7Out,
w8Des,
d_w8,
conv8Des,
conv8Algo,
workSpace[7],
space,
beta,
conv8OutDes,
d_conv8Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv8Out, d_b8, 13 * 13 * 1024, 13 * 13);
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
//-----------------------------------------------------relu 8------------------------------------------------------------------
////(13 x 13 x 1024)
//leaky_relu << <dim3(420, 420), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024);
//leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, 13 * 13);
//x = ceil(sqrt((13 * 13 * 1024) / shift));
leaky_relu_v4 << <dim3(numBlocks[7], numBlocks[7]), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, shift);
//--------------------------------------------------------conv9-------------------------------------------------------------------
cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
conv8OutDes,
w9Des,
conv9Des,
conv9OutDes,
conv9Algo,
&space));
if (i == 0) {
cudaMalloc(&workSpace[8], space);
totalSpace += space;
}
cudnnCheck(cudnnConvolutionForward(cudnn,
alpha,
conv8OutDes,
d_conv8Out,
w9Des,
d_w9,
conv9Des,
conv9Algo,
workSpace[8],
space,
beta,
conv9OutDes,
d_conv9Out));
//don't forget to add the biases
add_biase << <dim3(13, 13), 1 >> > (d_conv9Out, d_b9, 13 * 13 * 125, 13 * 13);
//another optimization separate first then sigmoid exp use the predefined ones
sigmoid_exp << <dim3(150, 150), 1 >> > (d_conv9Out, d_anchors, 13 * 13 * 125);
separate_data << <dim3(150, 150), 1 >> > (d_conv9Out, d_boxes_dims, d_predictions, d_classes, 13 * 13 * 125);
cudnnCheck(cudnnSoftmaxForward(cudnn,
CUDNN_SOFTMAX_FAST,
CUDNN_SOFTMAX_MODE_CHANNEL,
alpha,
softmaxInputDes,
d_classes,
beta,
softmaxOutDes,
d_classes_softmax));
scores << <dim3(32, 32), 1 >> > (d_classes_softmax, d_predictions, 13 * 13 * 5);
bool_arr << <dim3(30, 30), 1 >> >(d_boxes, 13 * 13 * 5, false);
filter << < dim3(150, 150), 1 >> > (d_classes_softmax, d_boxes, 0.3, 13 * 13 * 5 * 20);
get_max_scores << <dim3(30, 30), 1 >> > (d_classes_softmax, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 13 * 13 * 5);
calculate_points << <dim3(30, 30), 1 >> > (d_boxes_dims, d_points, d_boxes, 13 * 13 * 5);
//cudaDeviceSynchronize();
non_max_supression << < dim3(30, 30), 1 >> > (d_points, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 0.3, 13 * 13 * 5);
cudaCheck(cudaMemcpy(h_boxes, d_boxes, 13 * 13 * 5 * sizeof(bool), cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(h_maxScorePerBox, d_maxScorePerBox, 13 * 13 * 5 * sizeof(float), cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(h_maxScoreIndex, d_maxScoreIndex, 13 * 13 * 5 * sizeof(int), cudaMemcpyDeviceToHost));
//cudaCheck(cudaMemcpy(h_boxes_dims, d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float), cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(h_points, d_points, 13 * 13 * 5 * 4 * sizeof(float), cudaMemcpyDeviceToHost));
cv::Mat output(416, 416, CV_8UC3);
cv::normalize(image[i], output, 0.0, 255.0, cv::NORM_MINMAX);
for (int i = 0; i < 13 * 13 * 5; i++) {
if (h_boxes[i]) {
int index = i * 4;
int left = h_points[index];
int top = h_points[index + 1];
int right = h_points[index + 2];
int bottom = h_points[index + 3];
std::cout << "( " << left << " , " << top << " ) , (" << right << " , " << bottom << " ) class "
<<classes[h_maxScoreIndex[i]]<<" with prop "<<h_maxScorePerBox[i]<<"\n";
if (left < 416 && top < 416 && right < 416 && bottom < 416) {
cv::rectangle(output, cv::Point(left, top), cv::Point(right, bottom), colors[h_maxScoreIndex[i]],3);
//draw << <dim3(416, 416), 1 >> > (d_input, left, top, right, bottom, colors[h_maxScoreIndex[i]].val[0],
//colors[h_maxScoreIndex[i]].val[1], colors[h_maxScoreIndex[i]].val[2], 416, 416);
}
}
}
cudaCheck(cudaMemcpy(h_image, d_input, 416 * 416 * 3 * sizeof(float), cudaMemcpyDeviceToHost));
//cv::Mat output0(416, 416, CV_32FC3,h_image);
//cv::normalize(output0, output, 0.0, 255.0, cv::NORM_MINMAX);
//cv::cvtColor(output, output, CV_RGB2BGR);
//cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX);
long t2 = clock();
cout << "time = " << t2 - t1 << "\n";
string num = std::to_string(i);
string file = "output" + num + ".png";
save_image(file.c_str(),output );
}
//to be space effecient free workspace but make sure it doesn't include any data related to convolution
cout << "total space " << totalSpace / (1024 * 1024) << "MB\n";
}
|
a6dd5eb78c5747448e1b0abdda105a49428cf658.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#include "reducedMathPlugin.h"
using namespace nvinfer1::plugin; // for ReducedDivisor
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void reorgKernel(
const float* input, // input tensor of shape (batch, C, H, W)
const int volume, // note that volumes of input and output tensors are the same
ReducedDivisor batch,
ReducedDivisor C,
ReducedDivisor H,
ReducedDivisor W,
ReducedDivisor C_out,
ReducedDivisor stride,
float* output) // output tensor of shape (batch, C * stride * stride, H / stride, W / stride)
{
/*
* Reference
* https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/src/blas_kernels.cu#L370
* https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/src/blas.c#L9
*/
// outIndex is row-major position of input coordinates
for (int outIndex = blockIdx.x * nthdsPerCTA + threadIdx.x; outIndex < volume; outIndex += nthdsPerCTA)
{
int i = outIndex;
// calculate output coordinates from outIndex
int outW, outH, outC;
W.divmod(i, i, outW);
H.divmod(i, i, outH);
C.divmod(i, i, outC);
int outN = i;
// calculate input coordinates based on output coordinates
// offset is [0, 1, ..., stride * stride - 1] = posH * stride + posW
int offset, inC, posH, posW;
C_out.divmod(outC, offset, inC);
stride.divmod(offset, posH, posW);
int inH = outH * stride.get() + posH;
int inW = outW * stride.get() + posW;
int inN = outN;
// inIndex is row-major position of input coordinates
int inIndex = inW + W.get() * stride.get() * (inH + H.get() * stride.get() * (inC + C_out.get() * inN));
output[outIndex] = input[inIndex];
}
}
pluginStatus_t reorgGPU(
hipStream_t stream,
const int batch,
const int C,
const int H,
const int W,
const int stride,
const float* input,
float* output)
{
const int BS = 512; // number of threads in one block
const int volume = batch * C * H * W; // size of input tensor
const int GS = (volume + BS - 1) / BS; // number of blocks to launch, calculated so global number of threads is >= volume
ReducedDivisor C_out(C / (stride * stride));
hipLaunchKernelGGL(( reorgKernel<BS>), dim3(GS), dim3(BS), 0, stream, input, volume, ReducedDivisor(batch), ReducedDivisor(C), ReducedDivisor(H), ReducedDivisor(W), C_out, ReducedDivisor(stride), output);
return STATUS_SUCCESS;
}
pluginStatus_t reorgInference(
hipStream_t stream,
const int batch,
const int C,
const int H,
const int W,
const int stride,
const void* input,
void* output)
{
return reorgGPU(stream, batch, C, H, W, stride, (const float*) input, (float*) output);
}
|
a6dd5eb78c5747448e1b0abdda105a49428cf658.cu
|
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#include "reducedMathPlugin.h"
using namespace nvinfer1::plugin; // for ReducedDivisor
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void reorgKernel(
const float* input, // input tensor of shape (batch, C, H, W)
const int volume, // note that volumes of input and output tensors are the same
ReducedDivisor batch,
ReducedDivisor C,
ReducedDivisor H,
ReducedDivisor W,
ReducedDivisor C_out,
ReducedDivisor stride,
float* output) // output tensor of shape (batch, C * stride * stride, H / stride, W / stride)
{
/*
* Reference
* https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/src/blas_kernels.cu#L370
* https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/src/blas.c#L9
*/
// outIndex is row-major position of input coordinates
for (int outIndex = blockIdx.x * nthdsPerCTA + threadIdx.x; outIndex < volume; outIndex += nthdsPerCTA)
{
int i = outIndex;
// calculate output coordinates from outIndex
int outW, outH, outC;
W.divmod(i, i, outW);
H.divmod(i, i, outH);
C.divmod(i, i, outC);
int outN = i;
// calculate input coordinates based on output coordinates
// offset is [0, 1, ..., stride * stride - 1] = posH * stride + posW
int offset, inC, posH, posW;
C_out.divmod(outC, offset, inC);
stride.divmod(offset, posH, posW);
int inH = outH * stride.get() + posH;
int inW = outW * stride.get() + posW;
int inN = outN;
// inIndex is row-major position of input coordinates
int inIndex = inW + W.get() * stride.get() * (inH + H.get() * stride.get() * (inC + C_out.get() * inN));
output[outIndex] = input[inIndex];
}
}
pluginStatus_t reorgGPU(
cudaStream_t stream,
const int batch,
const int C,
const int H,
const int W,
const int stride,
const float* input,
float* output)
{
const int BS = 512; // number of threads in one block
const int volume = batch * C * H * W; // size of input tensor
const int GS = (volume + BS - 1) / BS; // number of blocks to launch, calculated so global number of threads is >= volume
ReducedDivisor C_out(C / (stride * stride));
reorgKernel<BS><<<GS, BS, 0, stream>>>(input, volume, ReducedDivisor(batch), ReducedDivisor(C), ReducedDivisor(H), ReducedDivisor(W), C_out, ReducedDivisor(stride), output);
return STATUS_SUCCESS;
}
pluginStatus_t reorgInference(
cudaStream_t stream,
const int batch,
const int C,
const int H,
const int W,
const int stride,
const void* input,
void* output)
{
return reorgGPU(stream, batch, C, H, W, stride, (const float*) input, (float*) output);
}
|
1eb9d31694d309320bebc223bb407594e1559d6f.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of the cube - ica/cuda - software package
// Copyright 2010-2013 Christian Kellner <[email protected]>
// License: MIT (see LICENSE.BSD-MIT)
#include "cube.h"
#include "cube_blas.h"
#include "cube_matrix.h"
#include "cube_kernels.h"
#include "cube_private.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ double
d_inv (double x, int inv)
{
return inv ? 1.0 / x : x;
}
__global__ void
k_diag (int n, double *D, int ldd, int inv, double alpha, double *x, int incx)
{
extern __shared__ double smem[];
double *s;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.y; //n
global_y = (blockDim.y * blockIdx.y) + threadIdx.x; //m
gid = (ldd * global_x) + global_y;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
smem[lid] = 0;
if (blockIdx.x == blockIdx.y && threadIdx.x < warpSize && global_y < n)
{
s = &smem[blockDim.x * blockDim.y];
s[threadIdx.x] = x[(blockIdx.x * blockDim.x + threadIdx.x) * incx];
smem[threadIdx.x * blockDim.x + threadIdx.x] = d_inv (s[threadIdx.x] * alpha, inv);
}
if (global_x < n && global_y < n)
D[gid] = smem[lid];
}
void
cube_gpu_diag (cube_t *ctx, int n, double *diag, int ldd, int inv, double alpha, double *x, int incx)
{
size_t smem;
dim3 block, grid;
if (! cube_context_check (ctx))
return;
block.x = 32;
block.y = 32;
grid.x = ceil (n / (double) block.x);
grid.y = grid.x;
smem = (block.x + 1) * block.y * sizeof (double);
hipLaunchKernelGGL(( k_diag), dim3(block), dim3(grid), smem, 0, n, diag, ldd, inv, alpha, x, incx);
}
|
1eb9d31694d309320bebc223bb407594e1559d6f.cu
|
// This file is part of the cube - ica/cuda - software package
// Copyright © 2010-2013 Christian Kellner <[email protected]>
// License: MIT (see LICENSE.BSD-MIT)
#include "cube.h"
#include "cube_blas.h"
#include "cube_matrix.h"
#include "cube_kernels.h"
#include "cube_private.h"
#include <cuda.h>
#include <stdio.h>
__device__ double
d_inv (double x, int inv)
{
return inv ? 1.0 / x : x;
}
__global__ void
k_diag (int n, double *D, int ldd, int inv, double alpha, double *x, int incx)
{
extern __shared__ double smem[];
double *s;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.y; //n
global_y = (blockDim.y * blockIdx.y) + threadIdx.x; //m
gid = (ldd * global_x) + global_y;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
smem[lid] = 0;
if (blockIdx.x == blockIdx.y && threadIdx.x < warpSize && global_y < n)
{
s = &smem[blockDim.x * blockDim.y];
s[threadIdx.x] = x[(blockIdx.x * blockDim.x + threadIdx.x) * incx];
smem[threadIdx.x * blockDim.x + threadIdx.x] = d_inv (s[threadIdx.x] * alpha, inv);
}
if (global_x < n && global_y < n)
D[gid] = smem[lid];
}
void
cube_gpu_diag (cube_t *ctx, int n, double *diag, int ldd, int inv, double alpha, double *x, int incx)
{
size_t smem;
dim3 block, grid;
if (! cube_context_check (ctx))
return;
block.x = 32;
block.y = 32;
grid.x = ceil (n / (double) block.x);
grid.y = grid.x;
smem = (block.x + 1) * block.y * sizeof (double);
k_diag<<<block, grid, smem>>> (n, diag, ldd, inv, alpha, x, incx);
}
|
e8341746b2d9ecf2467691ba37cbb62a74185aa0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes
the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SGEMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
This example has delibrately been kept similar to the basic_gemm example from cutass-1.3 to
highlight the minimum amount of differences needed to transition to cutlass-2.0.
Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for single-precision GEMM kernel
//
// Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class.
#include "cutlass/gemm/device/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t CutlassSgemmNN(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc,
int kernelNums) {
// Define type definition for single-precision CUTLASS GEMM with column-major
// input matrices and 128x128x8 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for single-precision GEMM. Typical values are used as
// default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details.
//
// To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
ColumnMajor, // Layout of A matrix
float, // Data-type of B matrix
ColumnMajor, // Layout of B matrix
float, // Data-type of C matrix
ColumnMajor>; // Layout of C matrix
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
// Construct the CUTLASS GEMM arguments object.
//
// One of CUTLASS's design patterns is to define gemm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Gemm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
//
// Launch the CUTLASS GEMM kernel.
//
// hipStream_t strm0;
// hipStream_t strm1;
// hipStreamCreate(&strm0);
// hipStreamCreate(&strm1);
hipStream_t strm[kernelNums];
for(int i = 0; i < kernelNums; i++) {
hipStreamCreate(&strm[i]);
}
cutlass::Status status;
for(int i = 0; i < kernelNums; i++) {
status = gemm_operator(args, nullptr, strm[i]);
}
//cutlass::Status status = gemm_operator(args, nullptr, strm);
//
// Return a hipError_t if the CUTLASS GEMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
// Return success, if no errors were encountered.
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
float *matrix,
int ldm,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
float value = float(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
hipError_t InitializeMatrix(float *matrix, int ldm, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
hipLaunchKernelGGL(( InitializeMatrix_kernel), dim3(grid), dim3(block) , 0, 0, matrix, ldm, rows, columns, seed);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
hipError_t AllocateMatrix(float **matrix, int ldm, int rows, int columns, int seed = 0) {
hipError_t result;
size_t sizeof_matrix = sizeof(float) * ldm * columns;
// Allocate device memory.
result = hipMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to allocate matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = hipMemset(*matrix, 0, sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed);
if (result != hipSuccess) {
std::cerr << "Failed to initialize matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference GEMM computation.
__global__ void ReferenceGemm_kernel(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
float accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference GEMM computation.
hipError_t ReferenceGemm(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
hipLaunchKernelGGL(( ReferenceGemm_kernel), dim3(grid), dim3(block) , 0, 0, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta, int kernelNums) {
hipError_t result;
//
// Define several matrices to be used as operands to GEMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = K;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(float) * ldc * N;
// Define pointers to matrices in GPU device memory.
float *A;
float *B;
float *C_cutlass;
float *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, K, 0);
if (result != hipSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, K, N, 17);
if (result != hipSuccess) {
hipFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
hipFree(C_cutlass);
return result;
}
result = hipMemcpy(C_reference, C_cutlass, sizeof_C, hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Launch CUTLASS GEMM.
//
result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc, kernelNums);
if (result != hipSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Verify.
//
// Launch reference GEMM
result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc);
if (result != hipSuccess) {
std::cerr << "Reference GEMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<float> host_cutlass(ldc * N, 0);
std::vector<float> host_reference(ldc * N, 0);
result = hipMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy CUTLASS GEMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
result = hipMemcpy(host_reference.data(), C_reference, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy Reference GEMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Free device memory allocations.
//
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_gemm example.
//
// usage:
//
// 00_basic_gemm <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions.
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
float scalars[2] = { 1, 0 };
int kernelNums = atoi(arg[4]);
// for (int i = 4; i < argc && i < 6; ++i) {
// std::stringstream ss(arg[i]);
// ss >> scalars[i - 4];
// }
//
// Run the CUTLASS GEMM test.
//
hipError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1], // beta
kernelNums
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
e8341746b2d9ecf2467691ba37cbb62a74185aa0.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes
the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SGEMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
This example has delibrately been kept similar to the basic_gemm example from cutass-1.3 to
highlight the minimum amount of differences needed to transition to cutlass-2.0.
Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for single-precision GEMM kernel
//
// Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class.
#include "cutlass/gemm/device/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t CutlassSgemmNN(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc,
int kernelNums) {
// Define type definition for single-precision CUTLASS GEMM with column-major
// input matrices and 128x128x8 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for single-precision GEMM. Typical values are used as
// default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details.
//
// To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
ColumnMajor, // Layout of A matrix
float, // Data-type of B matrix
ColumnMajor, // Layout of B matrix
float, // Data-type of C matrix
ColumnMajor>; // Layout of C matrix
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
// Construct the CUTLASS GEMM arguments object.
//
// One of CUTLASS's design patterns is to define gemm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Gemm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
//
// Launch the CUTLASS GEMM kernel.
//
// cudaStream_t strm0;
// cudaStream_t strm1;
// cudaStreamCreate(&strm0);
// cudaStreamCreate(&strm1);
cudaStream_t strm[kernelNums];
for(int i = 0; i < kernelNums; i++) {
cudaStreamCreate(&strm[i]);
}
cutlass::Status status;
for(int i = 0; i < kernelNums; i++) {
status = gemm_operator(args, nullptr, strm[i]);
}
//cutlass::Status status = gemm_operator(args, nullptr, strm);
//
// Return a cudaError_t if the CUTLASS GEMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
float *matrix,
int ldm,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
float value = float(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(float *matrix, int ldm, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(float **matrix, int ldm, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(float) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference GEMM computation.
__global__ void ReferenceGemm_kernel(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
float accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference GEMM computation.
cudaError_t ReferenceGemm(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceGemm_kernel<<< grid, block >>>(M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta, int kernelNums) {
cudaError_t result;
//
// Define several matrices to be used as operands to GEMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = K;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(float) * ldc * N;
// Define pointers to matrices in GPU device memory.
float *A;
float *B;
float *C_cutlass;
float *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, K, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS GEMM.
//
result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc, kernelNums);
if (result != cudaSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference GEMM
result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<float> host_cutlass(ldc * N, 0);
std::vector<float> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_gemm example.
//
// usage:
//
// 00_basic_gemm <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions.
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
float scalars[2] = { 1, 0 };
int kernelNums = atoi(arg[4]);
// for (int i = 4; i < argc && i < 6; ++i) {
// std::stringstream ss(arg[i]);
// ss >> scalars[i - 4];
// }
//
// Run the CUTLASS GEMM test.
//
cudaError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1], // beta
kernelNums
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
49af397227ec96a370d552864a921f71b1c50d1a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer/constant_initializer.h"
#include "common_hip.cuh"
namespace SparseOperationKit {
ConstantInit::ConstantInit(const float value)
: value_(value)
{}
std::shared_ptr<ConstantInit> ConstantInit::create(const float value) {
return std::shared_ptr<ConstantInit>(new ConstantInit(value));
}
void ConstantInit::fill(std::shared_ptr<Tensor> tensor,
const size_t sm_count,
const hiprandGenerator_t& generator,
const hipStream_t& stream) {
float value = value_;
auto op = [value] __device__(float val) { return value; };
hipLaunchKernelGGL(( transform_array), dim3(sm_count * 2), dim3(1024), 0, stream, tensor->GetPtrWithType<float>(),
tensor->GetPtrWithType<float>(),
tensor->get_num_elements(), op);
}
} // namespace SparseOperationKit
|
49af397227ec96a370d552864a921f71b1c50d1a.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer/constant_initializer.h"
#include "common.cuh"
namespace SparseOperationKit {
ConstantInit::ConstantInit(const float value)
: value_(value)
{}
std::shared_ptr<ConstantInit> ConstantInit::create(const float value) {
return std::shared_ptr<ConstantInit>(new ConstantInit(value));
}
void ConstantInit::fill(std::shared_ptr<Tensor> tensor,
const size_t sm_count,
const curandGenerator_t& generator,
const cudaStream_t& stream) {
float value = value_;
auto op = [value] __device__(float val) { return value; };
transform_array<<<sm_count * 2, 1024, 0, stream>>>(tensor->GetPtrWithType<float>(),
tensor->GetPtrWithType<float>(),
tensor->get_num_elements(), op);
}
} // namespace SparseOperationKit
|
a8a461c2122a52e4635dbf00e0f4c4c2a699358f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
#include <type_traits>
template <typename T, uint STOCHASTIC, uint THREADS, uint UNROLL>
__global__ void __launch_bounds__(THREADS) quantize(uint* E, T* Y, const T* X, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size)
{
#if __CUDA_ARCH__ < 320
// TODO: this is a hack so we can build the tfserving version of these ops properly
// ideally we could not build for old architectures or make thing
// backwards compatible
return;
#else
const uint tid = threadIdx.x;
const uint bid = blockIdx.x;
const uint offsetX = bid * THREADS * UNROLL + tid;
const uint strideX = gridDim.x * THREADS * UNROLL;
if (offsetX < size)
{
uint lfsr0, lfsr1, lfsr2;
if (STOCHASTIC == 1)
{
// Grab some entropy wherever we can and evenly distribute it
uint idx = bid * THREADS + tid;
asm("mov.b32 %0, %%clock_hi;" : "=r"(lfsr0) :);
asm("mov.b32 %0, %%clock;" : "=r"(lfsr1) :);
asm("mov.b32 %0, %%globaltimer_lo;" : "=r"(lfsr2) :);
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr0) : "r"((lfsr0^tid) & 31)); // rotate bits
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr1) : "r"((lfsr1^tid) & 31)); // rotate bits
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr2) : "r"((lfsr2^tid) & 31)); // rotate bits
lfsr0 ^= idx ^ (idx << 5) ^ (idx << 11) ^ (idx << 17) ^ (idx << 23);
}
else if (STOCHASTIC == 2)
{
lfsr0 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*0 + bid*THREADS + tid));
lfsr1 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*1 + bid*THREADS + tid));
lfsr2 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*2 + bid*THREADS + tid));
}
#pragma unroll 1
for (uint offset = offsetX; offset < size; offset += strideX)
{
const T* Xp = add_ptr_u(X, offset);
T* Yp = add_ptr_u(Y, offset);
#pragma unroll
for (uint j = 0; j < UNROLL; j++)
{
bool in_bounds = offset + j*THREADS < size;
float x = load(Xp, j*THREADS, in_bounds);
float rscale = round_scale;
if (STOCHASTIC)
{
// tausworthe generator (low quality rng is just fine)
lfsr0 = ((lfsr0 & 0xfffffffe) << 12) ^ (((lfsr0 << 13) ^ lfsr0) >> 19);
lfsr1 = ((lfsr1 & 0xfffffff8) << 4) ^ (((lfsr1 << 2) ^ lfsr1) >> 25);
lfsr2 = ((lfsr2 & 0xfffffff0) << 11) ^ (((lfsr2 << 3) ^ lfsr2) >> 11);
rscale *= (float)(lfsr0 ^ lfsr1 ^ lfsr2);
}
asm("{ \n\t"
".reg .f32 sign_exp, val; \n\t"
"and.b32 sign_exp, %0, 0xff800000; \n\t" // extract sign/exponent
"fma.rz.ftz.f32 val, sign_exp, %1, %0; \n\t" // add the round amount just below the final ulp position
"and.b32 %0, val, %2; \n\t" // truncate off unused mantissa
"}" : "+f"(x) : "f"(rscale), "r"(trunc_mask));
x = fmaxf(x, -max_float);
x = fminf(x, max_float);
if (fabs(x) < min_float)
x = 0.0f;
else
{
// Denorm Quantization:
// First subtract value off of exponent that will bring min_float to an unbiased exponent of 1.
// Then mul by 2**-23 to force truncation of any unused sub normal bits.
// Then scale back to origal exponent by reversing this process.
asm("{ \n\t"
".reg .f32 f; \n\t"
".reg .u32 u; \n\t"
"mov.b32 u, %0; \n\t"
"sub.u32 u, u, %1; \n\t"
"mov.b32 f, u; \n\t"
"mul.rn.f32 f, f, 0F34000000; \n\t" // 2 **-23, round to nearest denorm
"mul.rz.f32 f, f, 0F4b000000; \n\t" // 2 ** 23
"mov.b32 u, f; \n\t"
"add.u32 u, u, %1; \n\t"
"mov.b32 %0, u; \n\t"
"}" : "+f"(x) : "r"(exp_norm));
}
store(Yp, x, j*THREADS, in_bounds);
}
}
if (STOCHASTIC == 2)
{
__stg(add_ptr_u(E, gridDim.x*THREADS*0 + bid*THREADS + tid), lfsr0);
__stg(add_ptr_u(E, gridDim.x*THREADS*1 + bid*THREADS + tid), lfsr1);
__stg(add_ptr_u(E, gridDim.x*THREADS*2 + bid*THREADS + tid), lfsr2);
}
}
#endif
}
template <typename T>
__global__ void __launch_bounds__(1024) quantization_stats(float* S, const T* X, float max_float, float ftz_float, float rcp_size, uint size)
{
__shared__ float4 Share4[32];
__shared__ float Share1[32];
uint tid = threadIdx.x;
uint bid = blockIdx.x;
// check if this block has any work to do
if (bid * 1024 < size)
{
float4 stats = {0,0,0,0}; // mean, mean**2, n_sat, n_ftz
float x_max = 0;
for (uint offset = bid * 1024 + tid; offset < size; offset += gridDim.x*1024)
{
float x = load(add_ptr_u(X, offset));
// Nans => Inf
asm("{ \n\t"
".reg .pred is_number; \n\t"
"testp.number.f32 is_number, %0; \n\t"
"selp.f32 %0, %0, 0F7f800000, is_number;\n\t"
"}" : "+f"(x) :);
// Saturate fp16 infinity values
if (std::is_same<T, ehalf>::value)
x = fmaxf(fminf(x, 65504.0f), -65504.0f);
stats.x += abs(x);
stats.y += x*x;
stats.z += abs(x) >= max_float ? 1.0f : 0.0f;
stats.w += x != 0.0f && abs(x) < ftz_float ? 1.0f : 0.0f;
x_max = fmaxf(x_max, abs(x));
}
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
stats.x += shfl_xor(stats.x, i);
stats.y += shfl_xor(stats.y, i);
stats.z += shfl_xor(stats.z, i);
stats.w += shfl_xor(stats.w, i);
x_max = fmaxf(x_max, shfl_xor(x_max, i));
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share4[tid >> 5] = stats;
Share1[tid >> 5] = x_max;
}
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share4[tid];
x_max = Share1[tid];
// reduce within this last warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
stats.x += shfl_xor(stats.x, i);
stats.y += shfl_xor(stats.y, i);
stats.z += shfl_xor(stats.z, i);
stats.w += shfl_xor(stats.w, i);
x_max = fmaxf(x_max, shfl_xor(x_max, i));
}
// All threads in this warp now have the same final reduction values
// First 4 stats are sums, so add them in one shot
float sum =
tid == 0 ? stats.x * rcp_size :
tid == 1 ? stats.y * rcp_size :
tid == 2 ? stats.z * rcp_size :
stats.w * rcp_size ;
if (tid < 4)
atomicRed(add_ptr_u(S, tid), sum);
// Last stat needs to be maxed seperately
if (tid == 0)
atomicRedMax(S + 4, x_max);
}
}
}
template <typename T>
bool Quantize(hipStream_t stream, uint SMs, uint* entropy, T* y, const T* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic)
{
if (stochastic)
{
// stochastic mode does more compute per load and needs fewer total threads.
uint grid =
size >= SMs*8*128*4 ? SMs*8 :
size >= SMs*4*128*4 ? SMs*4 :
size >= SMs*2*128*4 ? SMs*2 :
SMs ;
if (entropy != NULL)
hipLaunchKernelGGL(( quantize<T,2,128,4>), dim3(grid),dim3(128),0,stream, entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
else
hipLaunchKernelGGL(( quantize<T,1,128,4>), dim3(grid),dim3(128),0,stream, entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
}
else
{
uint grid =
size >= SMs*16*128*4 ? SMs*16 :
size >= SMs* 8*128*4 ? SMs* 8 :
size >= SMs* 4*128*4 ? SMs* 4 :
size >= SMs* 2*128*4 ? SMs* 2 :
SMs ;
hipLaunchKernelGGL(( quantize<T,0,128,4>), dim3(grid),dim3(128),0,stream, entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
}
return true; // TODO
}
template <typename T>
QuantStats QuantizationStats(hipStream_t stream, uint SMs, float* s, const T* x, float max_float, float ftz_float, uint size)
{
QuantStats stats;
uint grid = size > SMs*1024 ? SMs*2 : SMs;
hipMemsetD8Async((hipDeviceptr_t)s, 0, sizeof(stats), stream);
hipLaunchKernelGGL(( quantization_stats<T>), dim3(grid),dim3(1024),0,stream, s, x, max_float, ftz_float, 1.0f/(float)size, size);
cuMemcpyDtoHAsync((void*)&stats, (hipDeviceptr_t)s, sizeof(stats), stream);
// var(x) == mean(x**2) - mean(x)**2
stats.stdv = sqrtf(stats.stdv - stats.mean*stats.mean);
stats.sat_pct *= 100.0f;
stats.ftz_pct *= 100.0f;
return stats;
}
template bool Quantize<float>(hipStream_t stream, uint SMs, uint* entropy, float* y, const float* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic);
template bool Quantize<bhalf>(hipStream_t stream, uint SMs, uint* entropy, bhalf* y, const bhalf* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic);
template QuantStats QuantizationStats<float>(hipStream_t stream, uint SMs, float* s, const float* x, float max_float, float ftz_float, uint size);
template QuantStats QuantizationStats<bhalf>(hipStream_t stream, uint SMs, float* s, const bhalf* x, float max_float, float ftz_float, uint size);
template QuantStats QuantizationStats<ehalf>(hipStream_t stream, uint SMs, float* s, const ehalf* x, float max_float, float ftz_float, uint size);
#endif
|
a8a461c2122a52e4635dbf00e0f4c4c2a699358f.cu
|
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
#include <type_traits>
template <typename T, uint STOCHASTIC, uint THREADS, uint UNROLL>
__global__ void __launch_bounds__(THREADS) quantize(uint* E, T* Y, const T* X, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size)
{
#if __CUDA_ARCH__ < 320
// TODO: this is a hack so we can build the tfserving version of these ops properly
// ideally we could not build for old architectures or make thing
// backwards compatible
return;
#else
const uint tid = threadIdx.x;
const uint bid = blockIdx.x;
const uint offsetX = bid * THREADS * UNROLL + tid;
const uint strideX = gridDim.x * THREADS * UNROLL;
if (offsetX < size)
{
uint lfsr0, lfsr1, lfsr2;
if (STOCHASTIC == 1)
{
// Grab some entropy wherever we can and evenly distribute it
uint idx = bid * THREADS + tid;
asm("mov.b32 %0, %%clock_hi;" : "=r"(lfsr0) :);
asm("mov.b32 %0, %%clock;" : "=r"(lfsr1) :);
asm("mov.b32 %0, %%globaltimer_lo;" : "=r"(lfsr2) :);
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr0) : "r"((lfsr0^tid) & 31)); // rotate bits
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr1) : "r"((lfsr1^tid) & 31)); // rotate bits
asm("shf.r.clamp.b32 %0,%0,%0,%1;" : "=r"(lfsr2) : "r"((lfsr2^tid) & 31)); // rotate bits
lfsr0 ^= idx ^ (idx << 5) ^ (idx << 11) ^ (idx << 17) ^ (idx << 23);
}
else if (STOCHASTIC == 2)
{
lfsr0 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*0 + bid*THREADS + tid));
lfsr1 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*1 + bid*THREADS + tid));
lfsr2 = ldg(add_ptr_u((const uint*)E, gridDim.x*THREADS*2 + bid*THREADS + tid));
}
#pragma unroll 1
for (uint offset = offsetX; offset < size; offset += strideX)
{
const T* Xp = add_ptr_u(X, offset);
T* Yp = add_ptr_u(Y, offset);
#pragma unroll
for (uint j = 0; j < UNROLL; j++)
{
bool in_bounds = offset + j*THREADS < size;
float x = load(Xp, j*THREADS, in_bounds);
float rscale = round_scale;
if (STOCHASTIC)
{
// tausworthe generator (low quality rng is just fine)
lfsr0 = ((lfsr0 & 0xfffffffe) << 12) ^ (((lfsr0 << 13) ^ lfsr0) >> 19);
lfsr1 = ((lfsr1 & 0xfffffff8) << 4) ^ (((lfsr1 << 2) ^ lfsr1) >> 25);
lfsr2 = ((lfsr2 & 0xfffffff0) << 11) ^ (((lfsr2 << 3) ^ lfsr2) >> 11);
rscale *= (float)(lfsr0 ^ lfsr1 ^ lfsr2);
}
asm("{ \n\t"
".reg .f32 sign_exp, val; \n\t"
"and.b32 sign_exp, %0, 0xff800000; \n\t" // extract sign/exponent
"fma.rz.ftz.f32 val, sign_exp, %1, %0; \n\t" // add the round amount just below the final ulp position
"and.b32 %0, val, %2; \n\t" // truncate off unused mantissa
"}" : "+f"(x) : "f"(rscale), "r"(trunc_mask));
x = fmaxf(x, -max_float);
x = fminf(x, max_float);
if (fabs(x) < min_float)
x = 0.0f;
else
{
// Denorm Quantization:
// First subtract value off of exponent that will bring min_float to an unbiased exponent of 1.
// Then mul by 2**-23 to force truncation of any unused sub normal bits.
// Then scale back to origal exponent by reversing this process.
asm("{ \n\t"
".reg .f32 f; \n\t"
".reg .u32 u; \n\t"
"mov.b32 u, %0; \n\t"
"sub.u32 u, u, %1; \n\t"
"mov.b32 f, u; \n\t"
"mul.rn.f32 f, f, 0F34000000; \n\t" // 2 **-23, round to nearest denorm
"mul.rz.f32 f, f, 0F4b000000; \n\t" // 2 ** 23
"mov.b32 u, f; \n\t"
"add.u32 u, u, %1; \n\t"
"mov.b32 %0, u; \n\t"
"}" : "+f"(x) : "r"(exp_norm));
}
store(Yp, x, j*THREADS, in_bounds);
}
}
if (STOCHASTIC == 2)
{
__stg(add_ptr_u(E, gridDim.x*THREADS*0 + bid*THREADS + tid), lfsr0);
__stg(add_ptr_u(E, gridDim.x*THREADS*1 + bid*THREADS + tid), lfsr1);
__stg(add_ptr_u(E, gridDim.x*THREADS*2 + bid*THREADS + tid), lfsr2);
}
}
#endif
}
template <typename T>
__global__ void __launch_bounds__(1024) quantization_stats(float* S, const T* X, float max_float, float ftz_float, float rcp_size, uint size)
{
__shared__ float4 Share4[32];
__shared__ float Share1[32];
uint tid = threadIdx.x;
uint bid = blockIdx.x;
// check if this block has any work to do
if (bid * 1024 < size)
{
float4 stats = {0,0,0,0}; // mean, mean**2, n_sat, n_ftz
float x_max = 0;
for (uint offset = bid * 1024 + tid; offset < size; offset += gridDim.x*1024)
{
float x = load(add_ptr_u(X, offset));
// Nans => Inf
asm("{ \n\t"
".reg .pred is_number; \n\t"
"testp.number.f32 is_number, %0; \n\t"
"selp.f32 %0, %0, 0F7f800000, is_number;\n\t"
"}" : "+f"(x) :);
// Saturate fp16 infinity values
if (std::is_same<T, ehalf>::value)
x = fmaxf(fminf(x, 65504.0f), -65504.0f);
stats.x += abs(x);
stats.y += x*x;
stats.z += abs(x) >= max_float ? 1.0f : 0.0f;
stats.w += x != 0.0f && abs(x) < ftz_float ? 1.0f : 0.0f;
x_max = fmaxf(x_max, abs(x));
}
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
stats.x += shfl_xor(stats.x, i);
stats.y += shfl_xor(stats.y, i);
stats.z += shfl_xor(stats.z, i);
stats.w += shfl_xor(stats.w, i);
x_max = fmaxf(x_max, shfl_xor(x_max, i));
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share4[tid >> 5] = stats;
Share1[tid >> 5] = x_max;
}
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share4[tid];
x_max = Share1[tid];
// reduce within this last warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
stats.x += shfl_xor(stats.x, i);
stats.y += shfl_xor(stats.y, i);
stats.z += shfl_xor(stats.z, i);
stats.w += shfl_xor(stats.w, i);
x_max = fmaxf(x_max, shfl_xor(x_max, i));
}
// All threads in this warp now have the same final reduction values
// First 4 stats are sums, so add them in one shot
float sum =
tid == 0 ? stats.x * rcp_size :
tid == 1 ? stats.y * rcp_size :
tid == 2 ? stats.z * rcp_size :
stats.w * rcp_size ;
if (tid < 4)
atomicRed(add_ptr_u(S, tid), sum);
// Last stat needs to be maxed seperately
if (tid == 0)
atomicRedMax(S + 4, x_max);
}
}
}
template <typename T>
bool Quantize(CUstream stream, uint SMs, uint* entropy, T* y, const T* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic)
{
if (stochastic)
{
// stochastic mode does more compute per load and needs fewer total threads.
uint grid =
size >= SMs*8*128*4 ? SMs*8 :
size >= SMs*4*128*4 ? SMs*4 :
size >= SMs*2*128*4 ? SMs*2 :
SMs ;
if (entropy != NULL)
quantize<T,2,128,4><<<grid,128,0,stream>>>(entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
else
quantize<T,1,128,4><<<grid,128,0,stream>>>(entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
}
else
{
uint grid =
size >= SMs*16*128*4 ? SMs*16 :
size >= SMs* 8*128*4 ? SMs* 8 :
size >= SMs* 4*128*4 ? SMs* 4 :
size >= SMs* 2*128*4 ? SMs* 2 :
SMs ;
quantize<T,0,128,4><<<grid,128,0,stream>>>(entropy, y, x, round_scale, trunc_mask, max_float, min_float, exp_norm, size);
}
return true; // TODO
}
template <typename T>
QuantStats QuantizationStats(CUstream stream, uint SMs, float* s, const T* x, float max_float, float ftz_float, uint size)
{
QuantStats stats;
uint grid = size > SMs*1024 ? SMs*2 : SMs;
cuMemsetD8Async((CUdeviceptr)s, 0, sizeof(stats), stream);
quantization_stats<T><<<grid,1024,0,stream>>>(s, x, max_float, ftz_float, 1.0f/(float)size, size);
cuMemcpyDtoHAsync((void*)&stats, (CUdeviceptr)s, sizeof(stats), stream);
// var(x) == mean(x**2) - mean(x)**2
stats.stdv = sqrtf(stats.stdv - stats.mean*stats.mean);
stats.sat_pct *= 100.0f;
stats.ftz_pct *= 100.0f;
return stats;
}
template bool Quantize<float>(CUstream stream, uint SMs, uint* entropy, float* y, const float* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic);
template bool Quantize<bhalf>(CUstream stream, uint SMs, uint* entropy, bhalf* y, const bhalf* x, float round_scale, uint trunc_mask, float max_float, float min_float, uint exp_norm, uint size, int stochastic);
template QuantStats QuantizationStats<float>(CUstream stream, uint SMs, float* s, const float* x, float max_float, float ftz_float, uint size);
template QuantStats QuantizationStats<bhalf>(CUstream stream, uint SMs, float* s, const bhalf* x, float max_float, float ftz_float, uint size);
template QuantStats QuantizationStats<ehalf>(CUstream stream, uint SMs, float* s, const ehalf* x, float max_float, float ftz_float, uint size);
#endif
|
3db0dd9d53edf41978b166ba9e74935c05b74419.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void helper(float * output, float * blocksum, int len) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len){
for (int j=0; j<i/blockDim.x; j++)
output[i] += blocksum[j];
}
}
|
3db0dd9d53edf41978b166ba9e74935c05b74419.cu
|
#include "includes.h"
__global__ void helper(float * output, float * blocksum, int len) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len){
for (int j=0; j<i/blockDim.x; j++)
output[i] += blocksum[j];
}
}
|
cb1f8a48365c061954501037151cebe2f2da49f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
//hipError_t store_helper(Mat image, float* GPURuntimes);
//hipError_t calculate_helper(int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE2 16 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
#define BOX_SIZE1 1
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int Mode;
int a = 0;
int stage = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel1(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //baseline
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE1][BOX_SIZE1];
int row = bx * BOX_SIZE1 + tx; //row of image
int col = by * BOX_SIZE1 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel2(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //optimal blocksize
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE2][BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel3(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //new algorithm + optimal block size
int h,v;
int row2; //new row of image
int col2; //new column of image
double cc, ss, k1, k2;
double Y, newX, newY, ScaleFactor;
double Diagonal, H, V;
double CRA,SRA, CRAS, SRAS, SRAYS, CRAYS;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE2][BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
H=(double) N;
V=(double) M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
CRA=cos(RotAngle); CRAS=ScaleFactor*CRA;
SRA=sin(RotAngle); SRAS=ScaleFactor*SRA;
// integer div
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
cc=col*CRAS;
ss=col*SRAS;
Y=(double)v-(double)row;
SRAYS=SRAS*Y; CRAYS=CRAS*Y;
k1=CRAS*(double)h + SRAYS;
k2=SRAS*(double)h - CRAYS;
// pixel rotation matrix
newX=cc-k1;
newY=ss-k2;
// convert back from Cartesian to image coordinates
col2=((int) newX+h);
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel4(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //asynchronous memcpy (no change here)
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[6*BOX_SIZE2][16*BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel5(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //shared memory size change
int h,v;
int row2; //new row of image
int col2; //new column of image
double cc, ss, k1, k2;
double Y, newX, newY, ScaleFactor;
double Diagonal, H, V;
double CRA,SRA, CRAS, SRAS, SRAYS, CRAYS;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[6*BOX_SIZE2][16*BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
//all of these values are constants
H=(double) N;
V=(double) M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
CRA=cos(RotAngle); CRAS=ScaleFactor*CRA;
SRA=sin(RotAngle); SRAS=ScaleFactor*SRA;
// integer div
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
cc=col*CRAS;
ss=col*SRAS;
Y=(double)v-(double)row;
SRAYS=SRAS*Y; CRAYS=CRAS*Y;
k1=CRAS*(double)h + SRAYS;
k2=SRAS*(double)h - CRAYS;
// pixel rotation matrix
newX=cc-k1;
newY=ss-k2;
// convert back from Cartesian to image coordinates
col2=((int) newX+h);
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
hipError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 5){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations> <mode [1-5]>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
Mode = atoi(argv[4]);
if (Mode >=20){
printf("Improper usage! Mode must be between [1-5]\n");
exit(EXIT_FAILURE);
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != hipSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
hipEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
dim3 threadsPerBlock;
dim3 numBlocks;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0); // use the first GPU
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
hipEventCreate(&time1);
hipEventCreate(&time2);
hipEventCreate(&time3);
hipEventCreate(&time4);
hipEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
if (Mode != 4){
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(GPU_odata, zero.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
switch(Mode){
case 1 : threadsPerBlock = dim3(BOX_SIZE1, BOX_SIZE1);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel1), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 2 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 3 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel3), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 5 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel5), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
break;
}
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
a++;
Error:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipFree(GPU_zerodata);
goto End;
}
else if (Mode == 4){
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
hipStream_t stream1, stream2;
threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto MultiError;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto MultiError;
}
cudaStatus = hipMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto MultiError;
}
hipEventRecord(time1, 0);
hipMemcpyAsync(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(GPU_odata, zero.data, TotalGPUSize, hipMemcpyHostToDevice, stream2);
hipEventRecord(time2, 0);
hipLaunchKernelGGL(( rotate_kernel4), dim3(numBlocks), dim3(threadsPerBlock), 0, stream1, GPU_idata, GPU_odata, M, N, a, NumRot);
hipEventRecord(time3, 0);
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto MultiError;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto MultiError;
}
// Copy output (results) from GPU buffer to host (CPU) memory.
hipMemcpyAsync(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost, stream1);
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
a++;
MultiError:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipFree(GPU_zerodata);
goto End;
}
End:
hipEventDestroy(time1);
hipEventDestroy(time2);
hipEventDestroy(time3);
hipEventDestroy(time4);
return cudaStatus;
}
|
cb1f8a48365c061954501037151cebe2f2da49f1.cu
|
#include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
//cudaError_t store_helper(Mat image, float* GPURuntimes);
//cudaError_t calculate_helper(int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE2 16 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
#define BOX_SIZE1 1
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int Mode;
int a = 0;
int stage = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel1(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //baseline
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE1][BOX_SIZE1];
int row = bx * BOX_SIZE1 + tx; //row of image
int col = by * BOX_SIZE1 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel2(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //optimal blocksize
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE2][BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel3(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //new algorithm + optimal block size
int h,v;
int row2; //new row of image
int col2; //new column of image
double cc, ss, k1, k2;
double Y, newX, newY, ScaleFactor;
double Diagonal, H, V;
double CRA,SRA, CRAS, SRAS, SRAYS, CRAYS;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE2][BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
H=(double) N;
V=(double) M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
CRA=cos(RotAngle); CRAS=ScaleFactor*CRA;
SRA=sin(RotAngle); SRAS=ScaleFactor*SRA;
// integer div
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
cc=col*CRAS;
ss=col*SRAS;
Y=(double)v-(double)row;
SRAYS=SRAS*Y; CRAYS=CRAS*Y;
k1=CRAS*(double)h + SRAYS;
k2=SRAS*(double)h - CRAYS;
// pixel rotation matrix
newX=cc-k1;
newY=ss-k2;
// convert back from Cartesian to image coordinates
col2=((int) newX+h);
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel4(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //asynchronous memcpy (no change here)
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[6*BOX_SIZE2][16*BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
__global__ void rotate_kernel5(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){ //shared memory size change
int h,v;
int row2; //new row of image
int col2; //new column of image
double cc, ss, k1, k2;
double Y, newX, newY, ScaleFactor;
double Diagonal, H, V;
double CRA,SRA, CRAS, SRAS, SRAYS, CRAYS;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[6*BOX_SIZE2][16*BOX_SIZE2];
int row = bx * BOX_SIZE2 + tx; //row of image
int col = by * BOX_SIZE2 + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
//all of these values are constants
H=(double) N;
V=(double) M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
CRA=cos(RotAngle); CRAS=ScaleFactor*CRA;
SRA=sin(RotAngle); SRAS=ScaleFactor*SRA;
// integer div
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
cc=col*CRAS;
ss=col*SRAS;
Y=(double)v-(double)row;
SRAYS=SRAS*Y; CRAYS=CRAS*Y;
k1=CRAS*(double)h + SRAYS;
k2=SRAS*(double)h - CRAYS;
// pixel rotation matrix
newX=cc-k1;
newY=ss-k2;
// convert back from Cartesian to image coordinates
col2=((int) newX+h);
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
cudaError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 5){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations> <mode [1-5]>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
Mode = atoi(argv[4]);
if (Mode >=20){
printf("Improper usage! Mode must be between [1-5]\n");
exit(EXIT_FAILURE);
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != cudaSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
cudaEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
dim3 threadsPerBlock;
dim3 numBlocks;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0); // use the first GPU
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
if (Mode != 4){
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(GPU_odata, zero.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
switch(Mode){
case 1 : threadsPerBlock = dim3(BOX_SIZE1, BOX_SIZE1);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel1<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 2 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel2<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 3 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel3<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
break;
case 5 : threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel5<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
break;
}
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
a++;
Error:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaFree(GPU_zerodata);
goto End;
}
else if (Mode == 4){
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
cudaStream_t stream1, stream2;
threadsPerBlock = dim3(BOX_SIZE2, BOX_SIZE2);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto MultiError;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto MultiError;
}
cudaStatus = cudaMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto MultiError;
}
cudaEventRecord(time1, 0);
cudaMemcpyAsync(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(GPU_odata, zero.data, TotalGPUSize, cudaMemcpyHostToDevice, stream2);
cudaEventRecord(time2, 0);
rotate_kernel4<<<numBlocks, threadsPerBlock, 0, stream1>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
cudaEventRecord(time3, 0);
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto MultiError;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto MultiError;
}
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaMemcpyAsync(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost, stream1);
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
a++;
MultiError:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaFree(GPU_zerodata);
goto End;
}
End:
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
return cudaStatus;
}
|
20d8c116a8d62878206b3857cd79c78d51f3e950.hip
|
// !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(03/08/2016)
///sta versin sirve para graficar en matlab los errores absolutos y relativos (RADIX-2) 2^1 - 2^10
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int N,int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
int *flag_inputstage_1,*flag_inputstage_2,*flag_inputstage_3,*flag_outputstage_1,*flag_outputstage_2,*flag_outputstage_3;
int *flag_inputstage_1_d,*flag_inputstage_2_d,*flag_inputstage_3_d,*flag_outputstage_1_d,*flag_outputstage_2_d,*flag_outputstage_3_d;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
int loop = 1;
///Ingrese el nmero de muestras requeridas
const int muestras = 1;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
int i,j,alea_real[1024],alea_imag[1024],i_N,l_res,j_res,k_res,incremento_j;
//float suma;
//float promedio[muestras];
///Se crean los archivos binarios donde se guardarn los datos
FILE *da;
FILE *db;
FILE *dc;
//FILE *dd;
FILE *fi_1;
FILE *fi_2;
FILE *fi_3;
FILE *fo_1;
FILE *fo_2;
FILE *fo_3;
da = fopen("Resultados_radix_2_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_radix_2_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo
dc = fopen("Entrada_radix_2_CUDA.txt","w+t"); //Crea o sobre escribe archivo
//dd = fopen("TIEMPOS_FFT_DIF_DIT_TD_SECUENCIAL_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_1 = fopen("Flag_inputstage_1_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_2 = fopen("Flag_inputstage_2_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_3 = fopen("Flag_inputstage_3_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_1 = fopen("Flag_outputstage_1_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_2 = fopen("Flag_outputstage_2_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_3 = fopen("Flag_outputstage_3_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
///Generacin de vector de entrada aleatorio
srand (time(NULL)); //Utilizo la hr del sistema como semilla
for(i = 0;i < 1024;i++)
{
alea_real[i]=rand()%11;
//alea_real[i]=i+1;
alea_imag[i]=rand()%11;
//alea_imag[i]=0;
fprintf(dc,"%d %d\n",alea_real[i],alea_imag[i]);
}
fclose(dc);
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
//Se reserva espacio para las flags
flag_inputstage_1 = (int *)malloc(1*sizeof(int));
flag_inputstage_2 = (int *)malloc(1*sizeof(int));
flag_inputstage_3 = (int *)malloc(1*sizeof(int));
flag_outputstage_1 = (int *)malloc(1*sizeof(int));
flag_outputstage_2 = (int *)malloc(1*sizeof(int));
flag_outputstage_3 = (int *)malloc(1*sizeof(int));
hipMalloc((int**)&flag_inputstage_1_d,1*sizeof(int));
hipMalloc((int**)&flag_inputstage_2_d,1*sizeof(int));
hipMalloc((int**)&flag_inputstage_3_d,1*sizeof(int));
hipMalloc((int**)&flag_outputstage_1_d,1*sizeof(int));
hipMalloc((int**)&flag_outputstage_2_d,1*sizeof(int));
hipMalloc((int**)&flag_outputstage_3_d,1*sizeof(int));
//Inicializaciones
incremento_j = 1;
flag_inputstage_1[0] = 0;
flag_inputstage_2[0] = 0;
flag_inputstage_3[0] = 0;
flag_outputstage_1[0] = 0;
flag_outputstage_2[0] = 0;
flag_outputstage_3[0] = 0;
for(i_N = 1;i_N <= 10;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se reserva memoria para x_device y W_device
hipMalloc((void**)&x_device,N*sizeof(cuFloatComplex));
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
///Generacin del vector x
for(l_res=0;l_res < N;l_res++)
{
//x_host[l_res] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[l_res] = make_cuFloatComplex((float)alea_real[l_res],(float)alea_imag[l_res]);
//printf(" %d-> (%f) + (%f)\n",l_res+1,cuCrealf(x_host[l_res]),cuCimagf(x_host[l_res]));
}
///Se genera el arreglo W[N]
arreglo_W(N);
//Envo de los arreglos x y W hacia la memoria global del device
hipMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
for(j_res=incremento_j;j_res<=N;j_res=j_res+incremento_j)
{
Li=j_res;
for(k_res=incremento_j;k_res<=N;k_res=k_res+incremento_j)
{
Lo=k_res;
//printf("\n Li = %d Lo = %d",Li,Lo);
for(i=1;i<=muestras;i++)
{
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
//float elapsedTime_app;
//hipEvent_t start_app, stop_app;
//hipEventCreate(&start_app);
//hipEventCreate(&stop_app);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
//hipEventRecord(start_app,0);
//Se generan en el host los valores del vector de entrada x[n]
//vector_entrada_xn(N,Li);
//Se generan en el host los valores del arreglo W[N]
//arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
///Se imprimen los resultados en los archivos binarios
int m;
float *parte_real;
float *parte_imag;
parte_real = (float*) malloc(Lo*sizeof(float));
parte_imag = (float*) malloc(Lo*sizeof(float));
for(m=0;m<=Lo-1;m++)
{
parte_real[m]=cuCrealf(X_host[m]);
parte_imag[m]=cuCimagf(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
fwrite(parte_real,sizeof(float),Lo,da);
fwrite(parte_imag,sizeof(float),Lo,db);
///Se leen los valores de las flags desde el device
hipMemcpy(flag_inputstage_1,flag_inputstage_1_d,1*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(flag_inputstage_2,flag_inputstage_2_d,1*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(flag_inputstage_3,flag_inputstage_3_d,1*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(flag_outputstage_1,flag_outputstage_1_d,1*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(flag_outputstage_2,flag_outputstage_2_d,1*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(flag_outputstage_3,flag_outputstage_3_d,1*sizeof(int),hipMemcpyDeviceToHost);
///Se imprimen el valor de las flags en sus respectivos archivos binarios
fwrite(flag_inputstage_1,1*sizeof(int),1,fi_1);
fwrite(flag_inputstage_2,1*sizeof(int),1,fi_2);
fwrite(flag_inputstage_3,1*sizeof(int),1,fi_3);
fwrite(flag_outputstage_1,1*sizeof(int),1,fo_1);
fwrite(flag_outputstage_2,1*sizeof(int),1,fo_2);
fwrite(flag_outputstage_3,1*sizeof(int),1,fo_3);
/*
printf("\n flag_inputstage_1 = %d \n",flag_inputstage_1[0]);
printf("\n flag_inputstage_2 = %d \n",flag_inputstage_2[0]);
printf("\n flag_inputstage_3 = %d \n",flag_inputstage_3[0]);
printf("\n flag_outputstage_1 = %d \n",flag_outputstage_1[0]);
printf("\n flag_outputstage_2 = %d \n",flag_outputstage_2[0]);
printf("\n flag_outputstage_3 = %d \n",flag_outputstage_3[0]);
*/
//Se liberan memorias del Host y Device
//free(x_host);
//free(W_host);
//free(y_host);
//free(z_host);
free(X_host);
free(parte_real);
free(parte_imag);
//hipFree(x_device);
//hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
//hipEventRecord(stop_app,0);
//hipEventSynchronize(stop_app);
//hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
//hipEventDestroy(start_app);
//hipEventDestroy(stop_app);
///Se resetean las flags
flag_inputstage_1[0] = 0;
flag_inputstage_2[0] = 0;
flag_inputstage_3[0] = 0;
flag_outputstage_1[0] = 0;
flag_outputstage_2[0] = 0;
flag_outputstage_3[0] = 0;
}
//promedio[i-1] = suma/(float)loop;
//printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]);
}
//fwrite(promedio,sizeof(float),muestras,dd);
//fclose(dd);
}
}
free(x_host);
free(W_host);
hipFree(x_device);
hipFree(W_device);
}
fclose(da);
fclose(db);
fclose(fi_1);
fclose(fi_2);
fclose(fi_3);
fclose(fo_1);
fclose(fo_2);
fclose(fo_3);
free(flag_inputstage_1);
free(flag_inputstage_2);
free(flag_inputstage_3);
free(flag_outputstage_1);
free(flag_outputstage_2);
free(flag_outputstage_3);
hipFree(flag_inputstage_1_d);
hipFree(flag_inputstage_2_d);
hipFree(flag_inputstage_3_d);
hipFree(flag_outputstage_1_d);
hipFree(flag_outputstage_2_d);
hipFree(flag_outputstage_3_d);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int N,int Li)
{
//Declaracin de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se dan valores a x[n]
for(k=0;k<N;k++)
{
if(k < Li)
{
//x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
else
{
x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0));
}
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<N;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device,flag_inputstage_1_d,flag_inputstage_2_d,flag_inputstage_3_d);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
flag_inputstage_1_d[0] = 0;
flag_inputstage_2_d[0] = 0;
flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device,flag_outputstage_1_d,flag_outputstage_2_d,flag_outputstage_3_d);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
flag_outputstage_1_d[0] = 0;
flag_outputstage_2_d[0] = 0;
flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
20d8c116a8d62878206b3857cd79c78d51f3e950.cu
|
///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(03/08/2016)
///Ésta versión sirve para graficar en matlab los errores absolutos y relativos (RADIX-2) 2^1 - 2^10
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int N,int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
int *flag_inputstage_1,*flag_inputstage_2,*flag_inputstage_3,*flag_outputstage_1,*flag_outputstage_2,*flag_outputstage_3;
int *flag_inputstage_1_d,*flag_inputstage_2_d,*flag_inputstage_3_d,*flag_outputstage_1_d,*flag_outputstage_2_d,*flag_outputstage_3_d;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
int loop = 1;
///Ingrese el número de muestras requeridas
const int muestras = 1;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
int i,j,alea_real[1024],alea_imag[1024],i_N,l_res,j_res,k_res,incremento_j;
//float suma;
//float promedio[muestras];
///Se crean los archivos binarios donde se guardarán los datos
FILE *da;
FILE *db;
FILE *dc;
//FILE *dd;
FILE *fi_1;
FILE *fi_2;
FILE *fi_3;
FILE *fo_1;
FILE *fo_2;
FILE *fo_3;
da = fopen("Resultados_radix_2_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_radix_2_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo
dc = fopen("Entrada_radix_2_CUDA.txt","w+t"); //Crea o sobre escribe archivo
//dd = fopen("TIEMPOS_FFT_DIF_DIT_TD_SECUENCIAL_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_1 = fopen("Flag_inputstage_1_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_2 = fopen("Flag_inputstage_2_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fi_3 = fopen("Flag_inputstage_3_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_1 = fopen("Flag_outputstage_1_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_2 = fopen("Flag_outputstage_2_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
fo_3 = fopen("Flag_outputstage_3_radix_2_CUDA.bin","a+b"); //Crea o sobre escribe archivo
///Generación de vector de entrada aleatorio
srand (time(NULL)); //Utilizo la hr del sistema como semilla
for(i = 0;i < 1024;i++)
{
alea_real[i]=rand()%11;
//alea_real[i]=i+1;
alea_imag[i]=rand()%11;
//alea_imag[i]=0;
fprintf(dc,"%d %d\n",alea_real[i],alea_imag[i]);
}
fclose(dc);
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
//Se reserva espacio para las flags
flag_inputstage_1 = (int *)malloc(1*sizeof(int));
flag_inputstage_2 = (int *)malloc(1*sizeof(int));
flag_inputstage_3 = (int *)malloc(1*sizeof(int));
flag_outputstage_1 = (int *)malloc(1*sizeof(int));
flag_outputstage_2 = (int *)malloc(1*sizeof(int));
flag_outputstage_3 = (int *)malloc(1*sizeof(int));
cudaMalloc((int**)&flag_inputstage_1_d,1*sizeof(int));
cudaMalloc((int**)&flag_inputstage_2_d,1*sizeof(int));
cudaMalloc((int**)&flag_inputstage_3_d,1*sizeof(int));
cudaMalloc((int**)&flag_outputstage_1_d,1*sizeof(int));
cudaMalloc((int**)&flag_outputstage_2_d,1*sizeof(int));
cudaMalloc((int**)&flag_outputstage_3_d,1*sizeof(int));
//Inicializaciones
incremento_j = 1;
flag_inputstage_1[0] = 0;
flag_inputstage_2[0] = 0;
flag_inputstage_3[0] = 0;
flag_outputstage_1[0] = 0;
flag_outputstage_2[0] = 0;
flag_outputstage_3[0] = 0;
for(i_N = 1;i_N <= 10;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se reserva memoria para x_device y W_device
cudaMalloc((void**)&x_device,N*sizeof(cuFloatComplex));
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
///Generación del vector x
for(l_res=0;l_res < N;l_res++)
{
//x_host[l_res] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[l_res] = make_cuFloatComplex((float)alea_real[l_res],(float)alea_imag[l_res]);
//printf(" %d-> (%f) + (%f)\n",l_res+1,cuCrealf(x_host[l_res]),cuCimagf(x_host[l_res]));
}
///Se genera el arreglo W[N]
arreglo_W(N);
//Envío de los arreglos x y W hacia la memoria global del device
cudaMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
for(j_res=incremento_j;j_res<=N;j_res=j_res+incremento_j)
{
Li=j_res;
for(k_res=incremento_j;k_res<=N;k_res=k_res+incremento_j)
{
Lo=k_res;
//printf("\n Li = %d Lo = %d",Li,Lo);
for(i=1;i<=muestras;i++)
{
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
//float elapsedTime_app;
//cudaEvent_t start_app, stop_app;
//cudaEventCreate(&start_app);
//cudaEventCreate(&stop_app);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
//cudaEventRecord(start_app,0);
//Se generan en el host los valores del vector de entrada x[n]
//vector_entrada_xn(N,Li);
//Se generan en el host los valores del arreglo W[N]
//arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
///Se imprimen los resultados en los archivos binarios
int m;
float *parte_real;
float *parte_imag;
parte_real = (float*) malloc(Lo*sizeof(float));
parte_imag = (float*) malloc(Lo*sizeof(float));
for(m=0;m<=Lo-1;m++)
{
parte_real[m]=cuCrealf(X_host[m]);
parte_imag[m]=cuCimagf(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
fwrite(parte_real,sizeof(float),Lo,da);
fwrite(parte_imag,sizeof(float),Lo,db);
///Se leen los valores de las flags desde el device
cudaMemcpy(flag_inputstage_1,flag_inputstage_1_d,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(flag_inputstage_2,flag_inputstage_2_d,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(flag_inputstage_3,flag_inputstage_3_d,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(flag_outputstage_1,flag_outputstage_1_d,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(flag_outputstage_2,flag_outputstage_2_d,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(flag_outputstage_3,flag_outputstage_3_d,1*sizeof(int),cudaMemcpyDeviceToHost);
///Se imprimen el valor de las flags en sus respectivos archivos binarios
fwrite(flag_inputstage_1,1*sizeof(int),1,fi_1);
fwrite(flag_inputstage_2,1*sizeof(int),1,fi_2);
fwrite(flag_inputstage_3,1*sizeof(int),1,fi_3);
fwrite(flag_outputstage_1,1*sizeof(int),1,fo_1);
fwrite(flag_outputstage_2,1*sizeof(int),1,fo_2);
fwrite(flag_outputstage_3,1*sizeof(int),1,fo_3);
/*
printf("\n flag_inputstage_1 = %d \n",flag_inputstage_1[0]);
printf("\n flag_inputstage_2 = %d \n",flag_inputstage_2[0]);
printf("\n flag_inputstage_3 = %d \n",flag_inputstage_3[0]);
printf("\n flag_outputstage_1 = %d \n",flag_outputstage_1[0]);
printf("\n flag_outputstage_2 = %d \n",flag_outputstage_2[0]);
printf("\n flag_outputstage_3 = %d \n",flag_outputstage_3[0]);
*/
//Se liberan memorias del Host y Device
//free(x_host);
//free(W_host);
//free(y_host);
//free(z_host);
free(X_host);
free(parte_real);
free(parte_imag);
//cudaFree(x_device);
//cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
//cudaEventRecord(stop_app,0);
//cudaEventSynchronize(stop_app);
//cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
//cudaEventDestroy(start_app);
//cudaEventDestroy(stop_app);
///Se resetean las flags
flag_inputstage_1[0] = 0;
flag_inputstage_2[0] = 0;
flag_inputstage_3[0] = 0;
flag_outputstage_1[0] = 0;
flag_outputstage_2[0] = 0;
flag_outputstage_3[0] = 0;
}
//promedio[i-1] = suma/(float)loop;
//printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]);
}
//fwrite(promedio,sizeof(float),muestras,dd);
//fclose(dd);
}
}
free(x_host);
free(W_host);
cudaFree(x_device);
cudaFree(W_device);
}
fclose(da);
fclose(db);
fclose(fi_1);
fclose(fi_2);
fclose(fi_3);
fclose(fo_1);
fclose(fo_2);
fclose(fo_3);
free(flag_inputstage_1);
free(flag_inputstage_2);
free(flag_inputstage_3);
free(flag_outputstage_1);
free(flag_outputstage_2);
free(flag_outputstage_3);
cudaFree(flag_inputstage_1_d);
cudaFree(flag_inputstage_2_d);
cudaFree(flag_inputstage_3_d);
cudaFree(flag_outputstage_1_d);
cudaFree(flag_outputstage_2_d);
cudaFree(flag_outputstage_3_d);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int N,int Li)
{
//Declaración de variables locales
int k;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se dan valores a x[n]
for(k=0;k<N;k++)
{
if(k < Li)
{
//x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
}
else
{
x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0));
}
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<N;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device,flag_inputstage_1_d,flag_inputstage_2_d,flag_inputstage_3_d);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
flag_inputstage_1_d[0] = 0;
flag_inputstage_2_d[0] = 0;
flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device,flag_outputstage_1_d,flag_outputstage_2_d,flag_outputstage_3_d);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
flag_outputstage_1_d[0] = 0;
flag_outputstage_2_d[0] = 0;
flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
12d90f044e3cdf66429892711b6b36b0a499f386.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/c_api.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cmath>
#include "../../../include/xgboost/logging.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/math.h"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/simple_dmatrix.h"
#include "../data/test_array_interface.h"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "test_hist_util.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins, AllThreadsForTest());
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins, AllThreadsForTest());
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(hipGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = ::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size);
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts);
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted);
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
|
12d90f044e3cdf66429892711b6b36b0a499f386.cu
|
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/c_api.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cmath>
#include "../../../include/xgboost/logging.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/math.h"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/simple_dmatrix.h"
#include "../data/test_array_interface.h"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "test_hist_util.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins, AllThreadsForTest());
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins, AllThreadsForTest());
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(cudaGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = std::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size);
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts);
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted);
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
|
a3896f84278402b7ee115eeb8f7a85a689b72123.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_4_a * (y) + \
xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_4_a * (y) + \
xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_4_a_gpu(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a *
ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a *
ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_4_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 76))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel4_plus_4_a");
OPS_kernels[76].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[76].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 76;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 76;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel4_plus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
a3896f84278402b7ee115eeb8f7a85a689b72123.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_4_a * (y) + \
xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_4_a * (y) + \
xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_4_a_gpu(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a *
ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a *
ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_4_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 76))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel4_plus_4_a");
OPS_kernels[76].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_4_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[76].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 76;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 76;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel4_plus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
712630dca583f253a674ecc0bc7d83a65cb0a0c3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/L2Select.cuh>
#include <stdio.h>
#include <cstring>
#include <faiss/gpu/utils/MemorySpace.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
namespace faiss { namespace gpu {
// L2 + select kernel for k == 1, implements re-use of ||c||^2
template <typename T, int kRowsPerBlock, int kBlockSize>
__global__ void l2SelectMin1(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<uint8_t, 1, true> bitset,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices) {
// Each block handles kRowsPerBlock rows of the distances (results)
Pair<T, int> threadMin[kRowsPerBlock];
__shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)];
T distance[kRowsPerBlock];
#pragma unroll
for (int i = 0; i < kRowsPerBlock; ++i) {
threadMin[i].k = Limits<T>::getMax();
threadMin[i].v = -1;
}
// blockIdx.x: which chunk of rows we are responsible for updating
int rowStart = blockIdx.x * kRowsPerBlock;
// FIXME: if we have exact multiples, don't need this
bool endRow = (blockIdx.x == gridDim.x - 1);
bool bitsetEmpty = (bitset.getSize(0) == 0);
if (endRow) {
if (productDistances.getSize(0) % kRowsPerBlock == 0) {
endRow = false;
}
}
if (endRow) {
for (int row = rowStart; row < productDistances.getSize(0); ++row) {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
if (bitsetEmpty || (!(bitset[col >> 3] & (0x1 << (col & 0x7))))) {
distance[0] = Math<T>::add(centroidDistances[col],
productDistances[row][col]);
} else {
distance[0] = (T)(1.0 / 0.0);
}
if (Math<T>::lt(distance[0], threadMin[0].k)) {
threadMin[0].k = distance[0];
threadMin[0].v = col;
}
}
// Reduce within the block
threadMin[0] =
blockReduceAll<Pair<T, int>, Min<Pair<T, int>>, false, false>(
threadMin[0], Min<Pair<T, int>>(), blockMin);
if (threadIdx.x == 0) {
outDistances[row][0] = threadMin[0].k;
outIndices[row][0] = threadMin[0].v;
}
// so we can use the shared memory again
__syncthreads();
threadMin[0].k = Limits<T>::getMax();
threadMin[0].v = -1;
}
} else {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
T centroidDistance = centroidDistances[col];
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = productDistances[rowStart + row][col];
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = Math<T>::add(distance[row], centroidDistance);
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
if (Math<T>::lt(distance[row], threadMin[row].k)) {
threadMin[row].k = distance[row];
threadMin[row].v = col;
}
}
}
// Reduce within the block
blockReduceAll<kRowsPerBlock,
Pair<T, int>,
Min<Pair<T, int> >,
false,
false>(threadMin,
Min<Pair<T, int> >(),
blockMin);
if (threadIdx.x == 0) {
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
outDistances[rowStart + row][0] = threadMin[row].k;
outIndices[rowStart + row][0] = threadMin[row].v;
}
}
}
}
// With bitset included
// L2 + select kernel for k > 1, no re-use of ||c||^2
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<uint8_t, 1, true> bitset,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
bool bitsetEmpty = (bitset.getSize(0) == 0);
T v;
for (; i < limit; i += blockDim.x) {
if (bitsetEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) {
v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.checkThreadQ();
}
if (i < productDistances.getSize(1)) {
if (bitsetEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) {
v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[row][i] = smemK[i];
outIndices[row][i] = smemV[i];
}
}
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK, int prev) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.add(v, i);
}
if (i < productDistances.getSize(1)) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.reduce();
for (int i = threadIdx.x+prev; i < k+prev; i += blockDim.x) {
outDistances[row][i-prev] = smemK[i];
outIndices[row][i-prev] = smemV[i];
}
}
template <typename T>
void runL2SelectMin(Tensor<T, 2, true>& productDistances,
Tensor<T, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<T, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
FAISS_ASSERT(outDistances.getSize(1) == k);
FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
hipLaunchKernelGGL(( l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock>)
, dim3(grid), dim3(block), 0, stream, productDistances, centroidDistances, bitset,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
hipLaunchKernelGGL(( l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK>) \
, dim3(grid), dim3(BLOCK), 0, stream, productDistances, centroidDistances, bitset, \
outDistances, outIndices, \
k, Limits<T>::getMax()); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
}
void runL2SelMn(float* hostOutDistances,
int* hostOutIndices,
int startPos,
int curQuerySize,
int i,
int nprobe,
Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
// FAISS_ASSERT(outDistances.getSize(1) == k);
// FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
hipLaunchKernelGGL(( l2SelectMin1<float, kRowsPerBlock, kThreadsPerBlock>)
, dim3(grid), dim3(block), 0, stream, productDistances, centroidDistances, bitset,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
hipLaunchKernelGGL(( l2SelectMinK<float, NUM_WARP_Q, NUM_THREAD_Q, BLOCK>) \
, dim3(grid), dim3(BLOCK), 0, stream, productDistances, centroidDistances, \
outDistances, outIndices, \
k, Limits<float>::getMax(), i); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
float* tmpDistances = new float[outDistances.getSize(0) * outDistances.getSize(1)];
int* tmpIndices = new int[outDistances.getSize(0) * outDistances.getSize(1)];
fromDevice<float,2>(outDistances, tmpDistances, stream);
fromDevice<int,2>(outIndices, tmpIndices, stream);
for(int j = 0; j < curQuerySize; j ++) {
for(int m = 0; m < k; m ++) {
hostOutDistances[(startPos + j) * nprobe + i + m] = tmpDistances[k * j + m];
hostOutIndices[(startPos + j) * nprobe + i + m] = tmpIndices[k * j + m];
}
}
delete [] tmpDistances;
delete [] tmpIndices;
}
CUDA_TEST_ERROR();
}
void runL2SelectMin(Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
hipStream_t stream) {
runL2SelectMin<float>(productDistances,
centroidDistances,
bitset,
outDistances,
outIndices,
k,
stream);
}
} } // namespace
|
712630dca583f253a674ecc0bc7d83a65cb0a0c3.cu
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/L2Select.cuh>
#include <stdio.h>
#include <cstring>
#include <faiss/gpu/utils/MemorySpace.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
namespace faiss { namespace gpu {
// L2 + select kernel for k == 1, implements re-use of ||c||^2
template <typename T, int kRowsPerBlock, int kBlockSize>
__global__ void l2SelectMin1(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<uint8_t, 1, true> bitset,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices) {
// Each block handles kRowsPerBlock rows of the distances (results)
Pair<T, int> threadMin[kRowsPerBlock];
__shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)];
T distance[kRowsPerBlock];
#pragma unroll
for (int i = 0; i < kRowsPerBlock; ++i) {
threadMin[i].k = Limits<T>::getMax();
threadMin[i].v = -1;
}
// blockIdx.x: which chunk of rows we are responsible for updating
int rowStart = blockIdx.x * kRowsPerBlock;
// FIXME: if we have exact multiples, don't need this
bool endRow = (blockIdx.x == gridDim.x - 1);
bool bitsetEmpty = (bitset.getSize(0) == 0);
if (endRow) {
if (productDistances.getSize(0) % kRowsPerBlock == 0) {
endRow = false;
}
}
if (endRow) {
for (int row = rowStart; row < productDistances.getSize(0); ++row) {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
if (bitsetEmpty || (!(bitset[col >> 3] & (0x1 << (col & 0x7))))) {
distance[0] = Math<T>::add(centroidDistances[col],
productDistances[row][col]);
} else {
distance[0] = (T)(1.0 / 0.0);
}
if (Math<T>::lt(distance[0], threadMin[0].k)) {
threadMin[0].k = distance[0];
threadMin[0].v = col;
}
}
// Reduce within the block
threadMin[0] =
blockReduceAll<Pair<T, int>, Min<Pair<T, int>>, false, false>(
threadMin[0], Min<Pair<T, int>>(), blockMin);
if (threadIdx.x == 0) {
outDistances[row][0] = threadMin[0].k;
outIndices[row][0] = threadMin[0].v;
}
// so we can use the shared memory again
__syncthreads();
threadMin[0].k = Limits<T>::getMax();
threadMin[0].v = -1;
}
} else {
for (int col = threadIdx.x; col < productDistances.getSize(1);
col += blockDim.x) {
T centroidDistance = centroidDistances[col];
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = productDistances[rowStart + row][col];
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
distance[row] = Math<T>::add(distance[row], centroidDistance);
}
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
if (Math<T>::lt(distance[row], threadMin[row].k)) {
threadMin[row].k = distance[row];
threadMin[row].v = col;
}
}
}
// Reduce within the block
blockReduceAll<kRowsPerBlock,
Pair<T, int>,
Min<Pair<T, int> >,
false,
false>(threadMin,
Min<Pair<T, int> >(),
blockMin);
if (threadIdx.x == 0) {
#pragma unroll
for (int row = 0; row < kRowsPerBlock; ++row) {
outDistances[rowStart + row][0] = threadMin[row].k;
outIndices[rowStart + row][0] = threadMin[row].v;
}
}
}
}
// With bitset included
// L2 + select kernel for k > 1, no re-use of ||c||^2
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<uint8_t, 1, true> bitset,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
bool bitsetEmpty = (bitset.getSize(0) == 0);
T v;
for (; i < limit; i += blockDim.x) {
if (bitsetEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) {
v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.checkThreadQ();
}
if (i < productDistances.getSize(1)) {
if (bitsetEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) {
v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[row][i] = smemK[i];
outIndices[row][i] = smemV[i];
}
}
template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock>
__global__ void l2SelectMinK(Tensor<T, 2, true> productDistances,
Tensor<T, 1, true> centroidDistances,
Tensor<T, 2, true> outDistances,
Tensor<int, 2, true> outIndices,
int k, T initK, int prev) {
// Each block handles a single row of the distances (results)
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ T smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
BlockSelect<T, int, false, Comparator<T>,
NumWarpQ, NumThreadQ, ThreadsPerBlock>
heap(initK, -1, smemK, smemV, k);
int row = blockIdx.x;
// Whole warps must participate in the selection
int limit = utils::roundDown(productDistances.getSize(1), kWarpSize);
int i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.add(v, i);
}
if (i < productDistances.getSize(1)) {
T v = Math<T>::add(centroidDistances[i],
productDistances[row][i]);
heap.addThreadQ(v, i);
}
heap.reduce();
for (int i = threadIdx.x+prev; i < k+prev; i += blockDim.x) {
outDistances[row][i-prev] = smemK[i];
outIndices[row][i-prev] = smemV[i];
}
}
template <typename T>
void runL2SelectMin(Tensor<T, 2, true>& productDistances,
Tensor<T, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<T, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
FAISS_ASSERT(outDistances.getSize(1) == k);
FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock>
<<<grid, block, 0, stream>>>(productDistances, centroidDistances, bitset,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK> \
<<<grid, BLOCK, 0, stream>>>(productDistances, centroidDistances, bitset, \
outDistances, outIndices, \
k, Limits<T>::getMax()); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
}
void runL2SelMn(float* hostOutDistances,
int* hostOutIndices,
int startPos,
int curQuerySize,
int i,
int nprobe,
Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0));
FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0));
FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1));
// FAISS_ASSERT(outDistances.getSize(1) == k);
// FAISS_ASSERT(outIndices.getSize(1) == k);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
if (k == 1) {
constexpr int kThreadsPerBlock = 256;
constexpr int kRowsPerBlock = 8;
auto block = dim3(kThreadsPerBlock);
auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock));
l2SelectMin1<float, kRowsPerBlock, kThreadsPerBlock>
<<<grid, block, 0, stream>>>(productDistances, centroidDistances, bitset,
outDistances, outIndices);
} else {
auto grid = dim3(outDistances.getSize(0));
#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \
do { \
l2SelectMinK<float, NUM_WARP_Q, NUM_THREAD_Q, BLOCK> \
<<<grid, BLOCK, 0, stream>>>(productDistances, centroidDistances, \
outDistances, outIndices, \
k, Limits<float>::getMax(), i); \
} while (0)
// block size 128 for everything <= 1024
if (k <= 32) {
RUN_L2_SELECT(128, 32, 2);
} else if (k <= 64) {
RUN_L2_SELECT(128, 64, 3);
} else if (k <= 128) {
RUN_L2_SELECT(128, 128, 3);
} else if (k <= 256) {
RUN_L2_SELECT(128, 256, 4);
} else if (k <= 512) {
RUN_L2_SELECT(128, 512, 8);
} else if (k <= 1024) {
RUN_L2_SELECT(128, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
// smaller block for less shared memory
RUN_L2_SELECT(64, 2048, 8);
#endif
} else {
FAISS_ASSERT(false);
}
float* tmpDistances = new float[outDistances.getSize(0) * outDistances.getSize(1)];
int* tmpIndices = new int[outDistances.getSize(0) * outDistances.getSize(1)];
fromDevice<float,2>(outDistances, tmpDistances, stream);
fromDevice<int,2>(outIndices, tmpIndices, stream);
for(int j = 0; j < curQuerySize; j ++) {
for(int m = 0; m < k; m ++) {
hostOutDistances[(startPos + j) * nprobe + i + m] = tmpDistances[k * j + m];
hostOutIndices[(startPos + j) * nprobe + i + m] = tmpIndices[k * j + m];
}
}
delete [] tmpDistances;
delete [] tmpIndices;
}
CUDA_TEST_ERROR();
}
void runL2SelectMin(Tensor<float, 2, true>& productDistances,
Tensor<float, 1, true>& centroidDistances,
Tensor<uint8_t, 1, true>& bitset,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
int k,
cudaStream_t stream) {
runL2SelectMin<float>(productDistances,
centroidDistances,
bitset,
outDistances,
outIndices,
k,
stream);
}
} } // namespace
|
5308c09ffb45cbaaa984f715664b9bf7c5c22857.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define TILE_WIDTH 512
#define index(i, j, N) ((i)*(N+1)) + (j)
int maximum(int a, int b) {
return (a > b)? a : b;
}
__global__ void knapsackKernel(int *profits, int *weights, int *input_f, int *output_f, int capacity, int c_min, int k){
int c = blockIdx.x*512 + threadIdx.x;
if(c<c_min || c>capacity){return;}
if(input_f[c] < input_f[c-weights[k-1]]+profits[k-1]){
output_f[c] = input_f[c-weights[k-1]]+profits[k-1];
}
else{
output_f[c] = input_f[c];
}
}
void knapsackCuda(int *profits, int *weights, int capacity, int n, int *f0, int *f1){
int *dev_profits, *dev_weights, *dev_f0, *dev_f1;
int sumW = 0;
int i,c;
for(i=0; i<n; i++){
sumW = sumW + weights[i];
}
hipMalloc((void**)&dev_f0, (capacity+1)*sizeof(int));
hipMalloc((void**)&dev_f1, (capacity+1)*sizeof(int));
hipMalloc((void**)&dev_profits, n*sizeof(int));
hipMalloc((void**)&dev_weights, n*sizeof(int));
hipMemcpy(dev_profits, profits, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_weights, weights, n*sizeof(int), hipMemcpyHostToDevice);
hipMemset(dev_f0, 0, (capacity+1)*sizeof(int));
hipMemset(dev_f1, 0, (capacity+1)*sizeof(int));
/*int p;
for(p=0; p<=capacity; p++) printf("%d ", dev_f1[p]);
printf("\n");*/
int k=1;
while(k<=n){
sumW = sumW - weights[k-1];
c = maximum(capacity-sumW, weights[k-1]);
//printf("k = %d\n", k);
//printf("%d\n", c);
dim3 dimGrid(ceil(1.0*(capacity-0+1)/TILE_WIDTH), 1, 1);
dim3 dimBlock(TILE_WIDTH,1,1);
if(k%2==0){
hipMemcpy(dev_f1, dev_f0, (capacity+1)*sizeof(int), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( knapsackKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_profits, dev_weights, dev_f0, dev_f1, capacity, c, k);
//hipDeviceSynchronize();
/*hipMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), hipMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f1[p]);
printf("\n");*/
}
else{
hipMemcpy(dev_f0, dev_f1, (capacity+1)*sizeof(int), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( knapsackKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_profits, dev_weights, dev_f1, dev_f0, capacity, c, k);
//hipDeviceSynchronize();
/*hipMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), hipMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f0[p]);
printf("\n");*/
}
k++;
}
hipMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_profits);
hipFree(dev_weights);
hipFree(dev_f0);
hipFree(dev_f1);
}
int main() {
int i;
int n = 10000;
int *profits = (int*)malloc(n*sizeof(int));
int *weights = (int*)malloc(n*sizeof(int));
FILE *myFile;
myFile = fopen("rand.txt", "r");
for (i = 0; i < n; i++)
{
fscanf(myFile, "%d %d", &profits[i], &weights[i]);
}
int capacity = 0;
for(i=0; i<n; i++){
capacity = capacity + weights[i];
}
capacity = capacity/2;
//capacity = 1000;
printf("capacity = %d\n", capacity);
int *f0 = (int *)malloc((capacity+1)*sizeof(int));
int *f1 = (int *)malloc((capacity+1)*sizeof(int));
knapsackCuda(profits, weights, capacity, n, f0, f1);
if(n%2==0){
//int p;
//for(p=0; p<=capacity; p++){ printf("%d ", f1[p]);}
printf("%d\n", f1[capacity]);
}
else{
//int p;
//for(p=0; p<=capacity; p++) {printf("%d ", f0[p]);}
printf("%d\n", f0[capacity]);
}
}
|
5308c09ffb45cbaaa984f715664b9bf7c5c22857.cu
|
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#define TILE_WIDTH 512
#define index(i, j, N) ((i)*(N+1)) + (j)
int maximum(int a, int b) {
return (a > b)? a : b;
}
__global__ void knapsackKernel(int *profits, int *weights, int *input_f, int *output_f, int capacity, int c_min, int k){
int c = blockIdx.x*512 + threadIdx.x;
if(c<c_min || c>capacity){return;}
if(input_f[c] < input_f[c-weights[k-1]]+profits[k-1]){
output_f[c] = input_f[c-weights[k-1]]+profits[k-1];
}
else{
output_f[c] = input_f[c];
}
}
void knapsackCuda(int *profits, int *weights, int capacity, int n, int *f0, int *f1){
int *dev_profits, *dev_weights, *dev_f0, *dev_f1;
int sumW = 0;
int i,c;
for(i=0; i<n; i++){
sumW = sumW + weights[i];
}
cudaMalloc((void**)&dev_f0, (capacity+1)*sizeof(int));
cudaMalloc((void**)&dev_f1, (capacity+1)*sizeof(int));
cudaMalloc((void**)&dev_profits, n*sizeof(int));
cudaMalloc((void**)&dev_weights, n*sizeof(int));
cudaMemcpy(dev_profits, profits, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_weights, weights, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(dev_f0, 0, (capacity+1)*sizeof(int));
cudaMemset(dev_f1, 0, (capacity+1)*sizeof(int));
/*int p;
for(p=0; p<=capacity; p++) printf("%d ", dev_f1[p]);
printf("\n");*/
int k=1;
while(k<=n){
sumW = sumW - weights[k-1];
c = maximum(capacity-sumW, weights[k-1]);
//printf("k = %d\n", k);
//printf("%d\n", c);
dim3 dimGrid(ceil(1.0*(capacity-0+1)/TILE_WIDTH), 1, 1);
dim3 dimBlock(TILE_WIDTH,1,1);
if(k%2==0){
cudaMemcpy(dev_f1, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToDevice);
knapsackKernel<<<dimGrid, dimBlock>>>(dev_profits, dev_weights, dev_f0, dev_f1, capacity, c, k);
//cudaDeviceSynchronize();
/*cudaMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f1[p]);
printf("\n");*/
}
else{
cudaMemcpy(dev_f0, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToDevice);
knapsackKernel<<<dimGrid, dimBlock>>>(dev_profits, dev_weights, dev_f1, dev_f0, capacity, c, k);
//cudaDeviceSynchronize();
/*cudaMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f0[p]);
printf("\n");*/
}
k++;
}
cudaMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_profits);
cudaFree(dev_weights);
cudaFree(dev_f0);
cudaFree(dev_f1);
}
int main() {
int i;
int n = 10000;
int *profits = (int*)malloc(n*sizeof(int));
int *weights = (int*)malloc(n*sizeof(int));
FILE *myFile;
myFile = fopen("rand.txt", "r");
for (i = 0; i < n; i++)
{
fscanf(myFile, "%d %d", &profits[i], &weights[i]);
}
int capacity = 0;
for(i=0; i<n; i++){
capacity = capacity + weights[i];
}
capacity = capacity/2;
//capacity = 1000;
printf("capacity = %d\n", capacity);
int *f0 = (int *)malloc((capacity+1)*sizeof(int));
int *f1 = (int *)malloc((capacity+1)*sizeof(int));
knapsackCuda(profits, weights, capacity, n, f0, f1);
if(n%2==0){
//int p;
//for(p=0; p<=capacity; p++){ printf("%d ", f1[p]);}
printf("%d\n", f1[capacity]);
}
else{
//int p;
//for(p=0; p<=capacity; p++) {printf("%d ", f0[p]);}
printf("%d\n", f0[capacity]);
}
}
|
77862129b43c481c86fa7d97ce9f073de89a70a3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#define blocksize 256
int getblock(int length) {
int block = length / blocksize;
if ((length%blocksize) > 0)block++;
return block;
}
int getlg2(int length);
void check(int *a, int length) {
printf("//////////////////////////////////////////////\n");
int t = 0;
for (int i = 0; i < length; i++) {
if (i < (length - 1)) {
if (a[i] > a[i + 1])
printf("a[%d] = %d , a[%d] = %d\n", i, a[i], i + 1, a[i + 1]);
}
else printf("succeed\n");
}
}
__device__ __host__ int insert0(int a, int *array,unsigned int star,unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a > array[p])star = p;
else end = p;
}
p = (end + star)/2;
if (a > array[p])star = p;
else end = p;
if (a > array[end])end++;
return end;
}
__device__ __host__ int insert1(int a, int *array, unsigned int star, unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a >= array[p])star = p;
else end = p;
}
p = (end + star) / 2;
if (a >= array[p])star = p;
else end = p;
if (a >= array[end])end++;
return end;
}
__global__ void sort_int_shared(int *a_map, int *a_dev, int length) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int a_s[blocksize];
//
if(tid<length)a_s[threadIdx.x] = a_map[tid];
else a_s[threadIdx.x] = INT_MAX;
int r = 0;
__syncthreads();
//,
bool flag = (threadIdx.x < (blocksize / 2));
tid = tid % (blocksize / 2);
//
if (flag) r = a_s[tid * 2];
else r = a_s[tid * 2 + 1];
__syncthreads();
if (flag) {
if(r > a_s[tid * 2 + 1])a_s[tid*2+1] = r;
}
else {
if (r < a_s[tid * 2])a_s[tid * 2] = r;
}
__syncthreads();//
for (int i = 2; i < blocksize; i *= 2) {
int pair_star = (tid / i)*i*2;
int offset = tid%i;
r = a_s[pair_star + offset + ((!flag)*i)];
int p;
if (flag) {
p = insert0(r, a_s, pair_star + i, pair_star + (2 * i) - 1);
p = p - i;
}
else
{
p = insert1(r, a_s, pair_star, pair_star + i - 1);
}
__syncthreads();
p += offset;
a_s[p] = r;
__syncthreads();
}
a_dev[blockIdx.x * blockDim.x + threadIdx.x] = a_s[threadIdx.x];
}
__global__ void short_int_global(int *a_dev, int *a_next, int i) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = a_dev[tid];
int p = tid;
bool flag = ((blockIdx.x >> i)&1);
flag = !flag;
int pair_length = 2 << i;
int pair_tid = blockIdx.x / pair_length;
if (flag)
{
if ((blockIdx.x >> (i + 1)) == (gridDim.x >> (i + 1))) {
int size = gridDim.x % (2 << i);
size -= (pair_length / 2);
if (size > 0) {
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + size;
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
}
a_next[p] = r;
}
else
{
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + (pair_length/2);
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
a_next[p] = r;
}
}
else
{
int star = pair_tid*pair_length;
int end = star + (pair_length / 2);
star *= blocksize;
end *= blocksize;
end--;
p = insert1(r, a_dev, star, end);
p += tid - ((pair_tid*pair_length + (pair_length / 2))*blocksize);
a_next[p] = r;
}
}
void sort_int(int *a,int length) {;
int *a_dev;
int *a_map;
hipMalloc((void**)&a_dev, getblock(length) * blocksize * sizeof(int));
hipHostGetDevicePointer((void **)&a_map, (void *)a, 0);
hipLaunchKernelGGL(( sort_int_shared), dim3(getblock(length)),dim3(blocksize), 0, 0, a_map, a_dev, length);
hipMemcpy(a, a_dev, length * sizeof(int), hipMemcpyDeviceToHost);
int *a_next;
hipMalloc((void**)&a_next, getblock(length) * blocksize * sizeof(int));
int times = getlg2(getblock(length));
for (int i = 0; i < times; i++) {
hipLaunchKernelGGL(( short_int_global) , dim3(getblock(length)), dim3(blocksize) , 0, 0, a_dev, a_next, i);
//hipMemcpy(a, a_next, length * sizeof(int), hipMemcpyDeviceToHost);
//check(a, length);
if (i == (times - 1)) {
hipMemcpy(a, a_next, length * sizeof(int), hipMemcpyDeviceToHost);
break;
}
int *c;
c = a_dev;
a_dev = a_next;
a_next = c;
}
hipFree(a_next);
//hipFree(a_dev);
//hipMemcpy(a,a_next,length*sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_next);
//check(a, length);
}
int getlg2(int length) {
int a = 0, b = 0;
for (int i = 0; i < 32; i++) {
if (((length >>i )&1) == 1) {
//printf("i = %d\n", i);
a = i;
b++;
}
}
if (b > 1)a++;
return a;
}
int* genarray(int length) {
int *a;
hipHostMalloc((void**)&a, length*sizeof(int), hipHostMallocMapped);
for (int i = 0; i < length; i++) {
a[i] = length - i;
};
return a;
}
int main() {
int length = 134217728;
int *a = genarray(length);
/*for (int i = 0; i < length; i++) {
printf("a[%d] = %d\n", i, a[i]);
}*/
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// record start event on the default stream
hipEventRecord(start);
sort_int(a,length);
hipEventRecord(stop);
// wait until the stop event completes
hipEventSynchronize(stop);
// calculate the elapsed time between two events
float time;
hipEventElapsedTime(&time, start, stop);
printf("time = %f\n",time);
getchar();
}
|
77862129b43c481c86fa7d97ce9f073de89a70a3.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#define blocksize 256
int getblock(int length) {
int block = length / blocksize;
if ((length%blocksize) > 0)block++;
return block;
}
int getlg2(int length);
void check(int *a, int length) {
printf("//////////////////////////////////////////////\n");
int t = 0;
for (int i = 0; i < length; i++) {
if (i < (length - 1)) {
if (a[i] > a[i + 1])
printf("a[%d] = %d , a[%d] = %d\n", i, a[i], i + 1, a[i + 1]);
}
else printf("succeed\n");
}
}
__device__ __host__ int insert0(int a, int *array,unsigned int star,unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a > array[p])star = p;
else end = p;
}
p = (end + star)/2;
if (a > array[p])star = p;
else end = p;
if (a > array[end])end++;
return end;
}
__device__ __host__ int insert1(int a, int *array, unsigned int star, unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a >= array[p])star = p;
else end = p;
}
p = (end + star) / 2;
if (a >= array[p])star = p;
else end = p;
if (a >= array[end])end++;
return end;
}
__global__ void sort_int_shared(int *a_map, int *a_dev, int length) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int a_s[blocksize];
//先获取待排序的值存入共享内存中
if(tid<length)a_s[threadIdx.x] = a_map[tid];
else a_s[threadIdx.x] = INT_MAX;
int r = 0;
__syncthreads();
//把线程块分成两半,分别执行不同的内容
bool flag = (threadIdx.x < (blocksize / 2));
tid = tid % (blocksize / 2);
//进行只有两个元素的数组的排序
if (flag) r = a_s[tid * 2];
else r = a_s[tid * 2 + 1];
__syncthreads();
if (flag) {
if(r > a_s[tid * 2 + 1])a_s[tid*2+1] = r;
}
else {
if (r < a_s[tid * 2])a_s[tid * 2] = r;
}
__syncthreads();//存储结果,并同步
for (int i = 2; i < blocksize; i *= 2) {
int pair_star = (tid / i)*i*2;
int offset = tid%i;
r = a_s[pair_star + offset + ((!flag)*i)];
int p;
if (flag) {
p = insert0(r, a_s, pair_star + i, pair_star + (2 * i) - 1);
p = p - i;
}
else
{
p = insert1(r, a_s, pair_star, pair_star + i - 1);
}
__syncthreads();
p += offset;
a_s[p] = r;
__syncthreads();
}
a_dev[blockIdx.x * blockDim.x + threadIdx.x] = a_s[threadIdx.x];
}
__global__ void short_int_global(int *a_dev, int *a_next, int i) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = a_dev[tid];
int p = tid;
bool flag = ((blockIdx.x >> i)&1);
flag = !flag;
int pair_length = 2 << i;
int pair_tid = blockIdx.x / pair_length;
if (flag)
{
if ((blockIdx.x >> (i + 1)) == (gridDim.x >> (i + 1))) {
int size = gridDim.x % (2 << i);
size -= (pair_length / 2);
if (size > 0) {
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + size;
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
}
a_next[p] = r;
}
else
{
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + (pair_length/2);
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
a_next[p] = r;
}
}
else
{
int star = pair_tid*pair_length;
int end = star + (pair_length / 2);
star *= blocksize;
end *= blocksize;
end--;
p = insert1(r, a_dev, star, end);
p += tid - ((pair_tid*pair_length + (pair_length / 2))*blocksize);
a_next[p] = r;
}
}
void sort_int(int *a,int length) {;
int *a_dev;
int *a_map;
cudaMalloc((void**)&a_dev, getblock(length) * blocksize * sizeof(int));
cudaHostGetDevicePointer((void **)&a_map, (void *)a, 0);
sort_int_shared<<<getblock(length),blocksize>>>(a_map, a_dev, length);
cudaMemcpy(a, a_dev, length * sizeof(int), cudaMemcpyDeviceToHost);
int *a_next;
cudaMalloc((void**)&a_next, getblock(length) * blocksize * sizeof(int));
int times = getlg2(getblock(length));
for (int i = 0; i < times; i++) {
short_int_global <<<getblock(length), blocksize >>>(a_dev, a_next, i);
//cudaMemcpy(a, a_next, length * sizeof(int), cudaMemcpyDeviceToHost);
//check(a, length);
if (i == (times - 1)) {
cudaMemcpy(a, a_next, length * sizeof(int), cudaMemcpyDeviceToHost);
break;
}
int *c;
c = a_dev;
a_dev = a_next;
a_next = c;
}
cudaFree(a_next);
//cudaFree(a_dev);
//cudaMemcpy(a,a_next,length*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_next);
//check(a, length);
}
int getlg2(int length) {
int a = 0, b = 0;
for (int i = 0; i < 32; i++) {
if (((length >>i )&1) == 1) {
//printf("i = %d\n", i);
a = i;
b++;
}
}
if (b > 1)a++;
return a;
}
int* genarray(int length) {
int *a;
cudaHostAlloc((void**)&a, length*sizeof(int), cudaHostAllocMapped);
for (int i = 0; i < length; i++) {
a[i] = length - i;
};
return a;
}
int main() {
int length = 134217728;
int *a = genarray(length);
/*for (int i = 0; i < length; i++) {
printf("a[%d] = %d\n", i, a[i]);
}*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record start event on the default stream
cudaEventRecord(start);
sort_int(a,length);
cudaEventRecord(stop);
// wait until the stop event completes
cudaEventSynchronize(stop);
// calculate the elapsed time between two events
float time;
cudaEventElapsedTime(&time, start, stop);
printf("time = %f\n",time);
getchar();
}
|
80c3cb9b55af1c6ded68c673fe89b9447db42a08.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_shared_worker.cuh"
__global__ void gpu_shared_filter(unsigned char * image_origininal, unsigned char * image_result, unsigned int width, unsigned int height, int division_coef)
{
int current_width = blockIdx.y * blockDim.y + threadIdx.y;
int current_height = blockIdx.x * blockDim.x + threadIdx.x;
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
__shared__ unsigned char block[16][16];
block[threadIdx.y][threadIdx.x] = (
(
image_origininal[ current_height *(width + 2) + current_width ] * (filter[0][0])
+ image_origininal[(current_height )*(width + 2) + (current_width + 1)] * (filter[0][1])
+ image_origininal[(current_height )*(width + 2) + (current_width + 2)] * (filter[0][2])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width )] * (filter[1][0])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 1)] * (filter[1][1])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 2)] * (filter[1][2])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width )] * (filter[2][0])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 1)] * (filter[2][1])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 2)] * (filter[2][2])
)
/ division_coef
);
image_result[current_height * width + current_width] = block[threadIdx.y][threadIdx.x];
}
Result perform_GPU_shared_worker(Task task)
{
hipEvent_t start_time, stop_time;
hipEventCreate(&start_time);
hipEventCreate(&stop_time);
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status =
hipMalloc((void**)(&image_original),
(task.image.matrix.height) * (task.image.matrix.width) * sizeof(unsigned char));
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status = hipMemcpy(image_original,
task.image.matrix.matrix,
(task.image.matrix.height) * (task.image.matrix.width) *
sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status =
hipMalloc((void**)(&image_result),
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char));
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
dim3 block(8, 8);
dim3 grid;
grid.x = task.work_matrix.height / block.x;
if (task.work_matrix.height % block.x != 0)
grid.x += 1;
grid.y = task.work_matrix.width / block.y;
if (task.work_matrix.width % block.y != 0)
grid.y += 1;
hipEventRecord(start_time);
gpu_shared_filter << <grid, block >> > (image_original, image_result, task.work_matrix.width, task.work_matrix.height, task.division_coef);
hipDeviceSynchronize();
hipEventRecord(stop_time);
hipEventSynchronize(stop_time);
Result result;
hipEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = hipMemcpy(task.work_matrix.matrix,
image_result,
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
result.result = task.work_matrix;
return result;
}
|
80c3cb9b55af1c6ded68c673fe89b9447db42a08.cu
|
#include "gpu_shared_worker.cuh"
__global__ void gpu_shared_filter(unsigned char * image_origininal, unsigned char * image_result, unsigned int width, unsigned int height, int division_coef)
{
int current_width = blockIdx.y * blockDim.y + threadIdx.y;
int current_height = blockIdx.x * blockDim.x + threadIdx.x;
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
__shared__ unsigned char block[16][16];
block[threadIdx.y][threadIdx.x] = (
(
image_origininal[ current_height *(width + 2) + current_width ] * (filter[0][0])
+ image_origininal[(current_height )*(width + 2) + (current_width + 1)] * (filter[0][1])
+ image_origininal[(current_height )*(width + 2) + (current_width + 2)] * (filter[0][2])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width )] * (filter[1][0])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 1)] * (filter[1][1])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 2)] * (filter[1][2])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width )] * (filter[2][0])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 1)] * (filter[2][1])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 2)] * (filter[2][2])
)
/ division_coef
);
image_result[current_height * width + current_width] = block[threadIdx.y][threadIdx.x];
}
Result perform_GPU_shared_worker(Task task)
{
cudaEvent_t start_time, stop_time;
cudaEventCreate(&start_time);
cudaEventCreate(&stop_time);
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status =
cudaMalloc((void**)(&image_original),
(task.image.matrix.height) * (task.image.matrix.width) * sizeof(unsigned char));
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status = cudaMemcpy(image_original,
task.image.matrix.matrix,
(task.image.matrix.height) * (task.image.matrix.width) *
sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status =
cudaMalloc((void**)(&image_result),
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char));
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
dim3 block(8, 8);
dim3 grid;
grid.x = task.work_matrix.height / block.x;
if (task.work_matrix.height % block.x != 0)
grid.x += 1;
grid.y = task.work_matrix.width / block.y;
if (task.work_matrix.width % block.y != 0)
grid.y += 1;
cudaEventRecord(start_time);
gpu_shared_filter << <grid, block >> > (image_original, image_result, task.work_matrix.width, task.work_matrix.height, task.division_coef);
cudaDeviceSynchronize();
cudaEventRecord(stop_time);
cudaEventSynchronize(stop_time);
Result result;
cudaEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = cudaMemcpy(task.work_matrix.matrix,
image_result,
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
result.result = task.work_matrix;
return result;
}
|
ccd76f3b6fb9e304028b5072610b10f33363828c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/axmy.hpp"
template <typename T>
__global__ void axmy_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = alpha * x[incx * index] * y[incy * index];
}
}
template <>
__global__ void axmy_kernel(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmulf(cuCmulf(alpha, x[incx * index]), y[incy * index]);
}
}
template <>
__global__ void axmy_kernel(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmul(cuCmul(alpha, x[incx * index]), y[incy * index]);
}
}
template <typename T>
__global__ void axmy_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] * y[incy * index];
}
}
template <>
__global__ void axmy_kernel1(size_t n, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmulf(x[incx * index], y[incy * index]);
}
}
template <>
__global__ void axmy_kernel1(size_t n, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmul(x[incx * index], y[incy * index]);
}
}
template <typename T>
__global__ void axmy_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <>
__global__ void axmy_kernel0(size_t n, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuComplex(0, 0);
}
}
template <>
__global__ void axmy_kernel0(size_t n, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuDoubleComplex(0, 0);
}
}
template <typename T>
void axmy_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( axmy_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, y, incy);
hipDeviceSynchronize();
}
template <typename T>
void axmy_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( axmy_kernel1<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, x, incx, y, incy);
hipDeviceSynchronize();
}
template <typename T>
void axmy_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( axmy_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy);
hipDeviceSynchronize();
}
void egblas_saxmy(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_daxmy(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_caxmy(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
if (alpha.x == 1.0f && alpha.y == 0.0f) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0f && alpha.y == 0.0f) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_zaxmy(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
if (alpha.x == 1.0 && alpha.y == 0.0) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0 && alpha.y == 0.0) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
|
ccd76f3b6fb9e304028b5072610b10f33363828c.cu
|
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/axmy.hpp"
template <typename T>
__global__ void axmy_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = alpha * x[incx * index] * y[incy * index];
}
}
template <>
__global__ void axmy_kernel(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmulf(cuCmulf(alpha, x[incx * index]), y[incy * index]);
}
}
template <>
__global__ void axmy_kernel(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmul(cuCmul(alpha, x[incx * index]), y[incy * index]);
}
}
template <typename T>
__global__ void axmy_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = x[incx * index] * y[incy * index];
}
}
template <>
__global__ void axmy_kernel1(size_t n, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmulf(x[incx * index], y[incy * index]);
}
}
template <>
__global__ void axmy_kernel1(size_t n, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCmul(x[incx * index], y[incy * index]);
}
}
template <typename T>
__global__ void axmy_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <>
__global__ void axmy_kernel0(size_t n, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuComplex(0, 0);
}
}
template <>
__global__ void axmy_kernel0(size_t n, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuDoubleComplex(0, 0);
}
}
template <typename T>
void axmy_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
axmy_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, y, incy);
cudaDeviceSynchronize();
}
template <typename T>
void axmy_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
axmy_kernel1<T><<<gridSize, blockSize>>>(n, x, incx, y, incy);
cudaDeviceSynchronize();
}
template <typename T>
void axmy_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axmy_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
axmy_kernel0<T><<<gridSize, blockSize>>>(n, y, incy);
cudaDeviceSynchronize();
}
void egblas_saxmy(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_daxmy(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_caxmy(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
if (alpha.x == 1.0f && alpha.y == 0.0f) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0f && alpha.y == 0.0f) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_zaxmy(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
if (alpha.x == 1.0 && alpha.y == 0.0) {
axmy_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0 && alpha.y == 0.0) {
axmy_kernel0_run(n, y, incy);
} else {
axmy_kernel_run(n, alpha, x, incx, y, incy);
}
}
|
7356539412277689426aeb058dc46612e1898169.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <helper_string.h>
#include "SobelFilter_kernels.h"
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static hipArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale)
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short)(fScale*(abs((int)Horz)+abs((int)Vert)));
if (Sum < 0)
{
return 0;
}
else if (Sum > 0xff)
{
return 0xff;
}
return (unsigned char) Sum;
}
__global__ void
SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale)
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x)
{
LocalBlock[SharedIdx+4*ib+0] = tex2D(tex,
(float)(u+4*ib-RADIUS+0), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+1] = tex2D(tex,
(float)(u+4*ib-RADIUS+1), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+2] = tex2D(tex,
(float)(u+4*ib-RADIUS+2), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+3] = tex2D(tex,
(float)(u+4*ib-RADIUS+3), (float)(v-RADIUS));
}
if (threadIdx.y < RADIUS*2)
{
//
// copy trailing RADIUS*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x)
{
LocalBlock[SharedIdx+4*ib+0] = tex2D(tex,
(float)(u+4*ib-RADIUS+0), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+1] = tex2D(tex,
(float)(u+4*ib-RADIUS+1), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+2] = tex2D(tex,
(float)(u+4*ib-RADIUS+2), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+3] = tex2D(tex,
(float)(u+4*ib-RADIUS+3), (float)(v+blockDim.y-RADIUS));
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *)(((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x)
{
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale);
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel(pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale);
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
if (u+ib < w/4 && v < h)
{
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
unsigned char pix00 = tex2D(tex, (float) i-1, (float) blockIdx.x-1);
unsigned char pix01 = tex2D(tex, (float) i+0, (float) blockIdx.x-1);
unsigned char pix02 = tex2D(tex, (float) i+1, (float) blockIdx.x-1);
unsigned char pix10 = tex2D(tex, (float) i-1, (float) blockIdx.x+0);
unsigned char pix11 = tex2D(tex, (float) i+0, (float) blockIdx.x+0);
unsigned char pix12 = tex2D(tex, (float) i+1, (float) blockIdx.x+0);
unsigned char pix20 = tex2D(tex, (float) i-1, (float) blockIdx.x+1);
unsigned char pix21 = tex2D(tex, (float) i+0, (float) blockIdx.x+1);
unsigned char pix22 = tex2D(tex, (float) i+1, (float) blockIdx.x+1);
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
hipChannelFormatDesc desc;
if (Bpp == 1)
{
desc = hipCreateChannelDesc<unsigned char>();
}
else
{
desc = hipCreateChannelDesc<uchar4>();
}
checkCudaErrors(hipMallocArray(&array, &desc, iw, ih));
checkCudaErrors(hipMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, hipMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
checkCudaErrors(hipFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
checkCudaErrors(hipBindTextureToArray(tex, array));
switch (mode)
{
case SOBELDISPLAY_IMAGE:
hipLaunchKernelGGL(( SobelCopyImage), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale);
break;
case SOBELDISPLAY_SOBELTEX:
hipLaunchKernelGGL(( SobelTex), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale);
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*RADIUS);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
hipLaunchKernelGGL(( SobelShared), dim3(blocks), dim3(threads), sharedMem, 0, (uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale);
}
break;
}
checkCudaErrors(hipUnbindTexture(tex));
}
|
7356539412277689426aeb058dc46612e1898169.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <helper_string.h>
#include "SobelFilter_kernels.h"
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static cudaArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale)
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short)(fScale*(abs((int)Horz)+abs((int)Vert)));
if (Sum < 0)
{
return 0;
}
else if (Sum > 0xff)
{
return 0xff;
}
return (unsigned char) Sum;
}
__global__ void
SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale)
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x)
{
LocalBlock[SharedIdx+4*ib+0] = tex2D(tex,
(float)(u+4*ib-RADIUS+0), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+1] = tex2D(tex,
(float)(u+4*ib-RADIUS+1), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+2] = tex2D(tex,
(float)(u+4*ib-RADIUS+2), (float)(v-RADIUS));
LocalBlock[SharedIdx+4*ib+3] = tex2D(tex,
(float)(u+4*ib-RADIUS+3), (float)(v-RADIUS));
}
if (threadIdx.y < RADIUS*2)
{
//
// copy trailing RADIUS*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x)
{
LocalBlock[SharedIdx+4*ib+0] = tex2D(tex,
(float)(u+4*ib-RADIUS+0), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+1] = tex2D(tex,
(float)(u+4*ib-RADIUS+1), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+2] = tex2D(tex,
(float)(u+4*ib-RADIUS+2), (float)(v+blockDim.y-RADIUS));
LocalBlock[SharedIdx+4*ib+3] = tex2D(tex,
(float)(u+4*ib-RADIUS+3), (float)(v+blockDim.y-RADIUS));
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *)(((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x)
{
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale);
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel(pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale);
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
if (u+ib < w/4 && v < h)
{
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
unsigned char pix00 = tex2D(tex, (float) i-1, (float) blockIdx.x-1);
unsigned char pix01 = tex2D(tex, (float) i+0, (float) blockIdx.x-1);
unsigned char pix02 = tex2D(tex, (float) i+1, (float) blockIdx.x-1);
unsigned char pix10 = tex2D(tex, (float) i-1, (float) blockIdx.x+0);
unsigned char pix11 = tex2D(tex, (float) i+0, (float) blockIdx.x+0);
unsigned char pix12 = tex2D(tex, (float) i+1, (float) blockIdx.x+0);
unsigned char pix20 = tex2D(tex, (float) i-1, (float) blockIdx.x+1);
unsigned char pix21 = tex2D(tex, (float) i+0, (float) blockIdx.x+1);
unsigned char pix22 = tex2D(tex, (float) i+1, (float) blockIdx.x+1);
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
cudaChannelFormatDesc desc;
if (Bpp == 1)
{
desc = cudaCreateChannelDesc<unsigned char>();
}
else
{
desc = cudaCreateChannelDesc<uchar4>();
}
checkCudaErrors(cudaMallocArray(&array, &desc, iw, ih));
checkCudaErrors(cudaMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, cudaMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
checkCudaErrors(cudaFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
checkCudaErrors(cudaBindTextureToArray(tex, array));
switch (mode)
{
case SOBELDISPLAY_IMAGE:
SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale);
break;
case SOBELDISPLAY_SOBELTEX:
SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale);
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*RADIUS);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
SobelShared<<<blocks, threads, sharedMem>>>((uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale);
}
break;
}
checkCudaErrors(cudaUnbindTexture(tex));
}
|
ac160ff241439a6cab954591924525589c0e57bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "generate_histogram.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *bins = NULL;
hipMalloc(&bins, XSIZE*YSIZE);
const float *dIn = NULL;
hipMalloc(&dIn, XSIZE*YSIZE);
const int binNumber = 1;
const float lumMin = 1;
const float lumMax = 1;
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
generate_histogram), dim3(gridBlock),dim3(threadBlock), 0, 0, bins,dIn,binNumber,lumMin,lumMax,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
generate_histogram), dim3(gridBlock),dim3(threadBlock), 0, 0, bins,dIn,binNumber,lumMin,lumMax,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
generate_histogram), dim3(gridBlock),dim3(threadBlock), 0, 0, bins,dIn,binNumber,lumMin,lumMax,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ac160ff241439a6cab954591924525589c0e57bd.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "generate_histogram.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *bins = NULL;
cudaMalloc(&bins, XSIZE*YSIZE);
const float *dIn = NULL;
cudaMalloc(&dIn, XSIZE*YSIZE);
const int binNumber = 1;
const float lumMin = 1;
const float lumMax = 1;
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
generate_histogram<<<gridBlock,threadBlock>>>(bins,dIn,binNumber,lumMin,lumMax,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
generate_histogram<<<gridBlock,threadBlock>>>(bins,dIn,binNumber,lumMin,lumMax,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
generate_histogram<<<gridBlock,threadBlock>>>(bins,dIn,binNumber,lumMin,lumMax,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
893bfd61b6139018fee9a20e762d72c33dcca59b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//#define KEY_OFFSET_ONE
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZ_PER_BLOCK 32
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, hipReadModeElementType> texData;
texture<unsigned char, 1, hipReadModeNormalizedFloat> texDataB;
texture<float2, 2, hipReadModeElementType> texDataF2;
texture<float4, 1, hipReadModeElementType> texDataF4;
texture<int4, 1, hipReadModeElementType> texDataI4;
texture<int4, 1, hipReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( UpsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 2 : hipLaunchKernelGGL(( UpsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 3 : hipLaunchKernelGGL(( UpsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( DownsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 2 :hipLaunchKernelGGL(( DownsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 3 : hipLaunchKernelGGL(( DownsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
default:hipLaunchKernelGGL(( DownsampleKernel) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
hipLaunchKernelGGL(( ChannelReduce_Convert_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}else
{
src->BindTexture(texData);
hipLaunchKernelGGL(( ChannelReduce_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
hipLaunchKernelGGL(( ConvertByteToFloat_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
hipLaunchKernelGGL(( FilterH<FW>), dim3(gridh), dim3(blockh), 0, 0, (float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
hipLaunchKernelGGL(( FilterV<FW>), dim3(gridv), dim3(blockv), 0, 0, (float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
hipMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, hipMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, hipReadModeElementType> texC;
texture<float, 1, hipReadModeElementType> texP;
texture<float, 1, hipReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, width, height);
}
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
#ifdef KEY_OFFSET_ONE
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
#else
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
#endif
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
#ifdef KEY_OFFSET_ONE
if(row < rowmax && col < colmax)
#else
if(row > 0 && col > 0 && row < rowmax && col < colmax)
#endif
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
#ifdef KEY_OFFSET_ONE
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#else
dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#endif
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
hipLaunchKernelGGL(( ComputeKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) key->_cuData, width,
width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
hipLaunchKernelGGL(( InitHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
hipLaunchKernelGGL(( ReduceHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( ComputeOrientation_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
//float aspect_ratio = key.w / key.z;
//float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25, spty = key.w * 0.25;
float xmin, ymin, xmax, ymax; float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floor(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floor(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}else
{
if(GlobalUtil::_UseDynamicIndexing)
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
hipDeviceSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
hipError_t e = hipGetLastError();
if(e)
{
if(location) fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", hipGetErrorString(e));
//assert(0);
return 1;
}else
{
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertGRD_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyPoint_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyBox_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
hipBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
hipBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float));
#else
hipChannelFormatDesc desc;
hipGetChannelDesc(&desc, _cuData2D);
hipBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if(hipGetDeviceCount(&count) != hipSuccess || count <= 0)
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}else if(count == 1)
{
hipDeviceProp_t deviceProp;
if ( hipGetDeviceProperties(&deviceProp, 0) != hipSuccess ||
(deviceProp.major == 9999 && deviceProp.minor == 9999))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if(device >0 && device < count)
{
hipSetDevice(device);
CheckErrorCUDA("hipSetDevice\n");
}
hipGetDevice(&device_used);
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, hipReadModeElementType> texDes1;
texture<uint4, 1, hipReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
}
texture<float, 1, hipReadModeElementType> texLoc1;
texture<float2, 1, hipReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = FDIV(x[0], x[2]) - loc2.x;
diff[1] = FDIV(x[1], x[2]) - loc2.y;
float hdist = diff[0] * diff[0] + diff[1] * diff[1];
if(hdist < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float* H, float hdistmax, float* F, float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptorG_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, hipReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = d_dot[base_address + threadIdx.x + i]; // tex1Dfetch(texDOT, base_address + threadIdx.x + i);
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
// texDot->BindTexture(texDOT);
hipLaunchKernelGGL(( RowMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, hipReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
hipLaunchKernelGGL(( ColMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
893bfd61b6139018fee9a20e762d72c33dcca59b.cu
|
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//#define KEY_OFFSET_ONE
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZ_PER_BLOCK 32
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, cudaReadModeElementType> texData;
texture<unsigned char, 1, cudaReadModeNormalizedFloat> texDataB;
texture<float2, 2, cudaReadModeElementType> texDataF2;
texture<float4, 1, cudaReadModeElementType> texDataF4;
texture<int4, 1, cudaReadModeElementType> texDataI4;
texture<int4, 1, cudaReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 2 : UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 3 : UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 2 : DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 3 : DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
default: DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData);
}else
{
src->BindTexture(texData);
ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
ConvertByteToFloat_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, cudaReadModeElementType> texC;
texture<float, 1, cudaReadModeElementType> texP;
texture<float, 1, cudaReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height);
}
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
#ifdef KEY_OFFSET_ONE
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
#else
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
#endif
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
#ifdef KEY_OFFSET_ONE
if(row < rowmax && col < colmax)
#else
if(row > 0 && col > 0 && row < rowmax && col < colmax)
#endif
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
#ifdef KEY_OFFSET_ONE
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#else
dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#endif
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
ComputeKEY_Kernel<<<grid, block>>>((float4*) key->_cuData, width,
width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
ListGen_Kernel<<<grid, block>>>((int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
ComputeOrientation_Kernel<<<grid, block>>>((float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
//float aspect_ratio = key.w / key.z;
//float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25, spty = key.w * 0.25;
float xmin, ymin, xmax, ymax; float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floor(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floor(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
ComputeDescriptorRECT_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptorRECT_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}else
{
if(GlobalUtil::_UseDynamicIndexing)
ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
NormalizeDescriptor_Kernel<<<grid, block>>>((float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
cudaThreadSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
cudaError_t e = cudaGetLastError();
if(e)
{
if(location) fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
//assert(0);
return 1;
}else
{
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
cudaBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float));
#else
cudaChannelFormatDesc desc;
cudaGetChannelDesc(&desc, _cuData2D);
cudaBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if(cudaGetDeviceCount(&count) != cudaSuccess || count <= 0)
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}else if(count == 1)
{
cudaDeviceProp deviceProp;
if ( cudaGetDeviceProperties(&deviceProp, 0) != cudaSuccess ||
(deviceProp.major == 9999 && deviceProp.minor == 9999))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if(device >0 && device < count)
{
cudaSetDevice(device);
CheckErrorCUDA("cudaSetDevice\n");
}
cudaGetDevice(&device_used);
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, cudaReadModeElementType> texDes1;
texture<uint4, 1, cudaReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
}
texture<float, 1, cudaReadModeElementType> texLoc1;
texture<float2, 1, cudaReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = FDIV(x[0], x[2]) - loc2.x;
diff[1] = FDIV(x[1], x[2]) - loc2.y;
float hdist = diff[0] * diff[0] + diff[1] * diff[1];
if(hdist < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float* H, float hdistmax, float* F, float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, cudaReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = d_dot[base_address + threadIdx.x + i]; // tex1Dfetch(texDOT, base_address + threadIdx.x + i);
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
// texDot->BindTexture(texDOT);
RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, cudaReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
8684db2309267fcea029aba0fec49ee6926da4e8.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) <2003-2021> <Julio Jerez, Newton Game Dynamics>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
#include <hip/hip_runtime.h>
#include "ndCudaContext.h"
#include "ndCudaSceneInfo.h"
#include "ndCudaIntrinsics.h"
#include "ndCudaPrefixScan.cuh"
#define D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE 1024
__global__ void ndCudaHillisSteeleSanityCheck(ndCudaSceneInfo& info)
{
const unsigned index = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned* histogram = info.m_histogram.m_array;
if ((index >= 1) && (index < info.m_histogram.m_size))
{
unsigned item0 = histogram[index - 1];
unsigned item1 = histogram[index - 0];
if (info.m_frameIsValid && (item0 > item1))
{
//printf("block(%d) id:%d (%d %d)\n", blockIdx.x, threadIdx.x, item0, item1);
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
}
}
}
__global__ void ndCudaHillisSteeleInternal(ndCudaSceneInfo& info)
{
__shared__ unsigned cacheBuffer[D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE / 2 + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE + 1];
const unsigned blockId = blockIdx.x;
const unsigned threadId0 = threadIdx.x;
const unsigned threadId1 = D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE / 2 + threadId0;
cacheBuffer[threadId0] = 0;
__syncthreads();
cacheBuffer[threadId1] = 0;
unsigned* histogram = info.m_histogram.m_array;
const unsigned index = threadId0 + blockDim.x * blockId;
if (index < info.m_histogram.m_size)
{
cacheBuffer[threadId1] = histogram[index];
}
__syncthreads();
for (int i = 1; i < D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
if (index < info.m_histogram.m_size)
{
histogram[index] = cacheBuffer[threadId1];
}
}
__global__ void ndCudaHillisSteeleAddBlocksInternal(ndCudaSceneInfo& info)
{
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned superBlockCount = (itemsCount + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
unsigned* histogram = info.m_histogram.m_array;
unsigned offset = D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
for (int i = 1; i < superBlockCount; i++)
{
const unsigned value = histogram[offset - 1];
histogram[offset + threadId] += value;
offset += D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
__syncthreads();
}
}
/*
__global__ void ndCudaHillisSteelePaddBufferInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned blockStart = blockDim.x * ((itemsCount + blockDim.x - 1) / blockDim.x);
const unsigned blockOffset = blockId * blockDim.x;
unsigned* histogram = info.m_histogram.m_array;
if (blockOffset >= blockStart)
{
histogram[blockOffset + threadId] = 0;
}
}
__global__ void ndCudaHillisSteelePrefixScanAddBlocksInternal(ndCudaSceneInfo& info, int bit)
{
const unsigned blockId = blockIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned alignedItemsCount = prefixScanSuperBlockAlign * ((itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign);
const unsigned blocks = ((alignedItemsCount + blockDim.x - 1) / blockDim.x);
if (blockId < blocks)
{
const unsigned power = 1 << (bit + 1);
const unsigned blockFrac = blockId & (power - 1);
if (blockFrac >= (power >> 1))
{
const unsigned threadId = threadIdx.x;
const unsigned dstIndex = blockDim.x * blockId;
const unsigned srcIndex = blockDim.x * (blockId - blockFrac + (power >> 1)) - 1;
unsigned* histogram = info.m_histogram.m_array;
const unsigned value = histogram[srcIndex];
histogram[dstIndex + threadId] += value;
}
}
}
__global__ void ndCudaHillisSteelePrefixScanAddBlocksFinalInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned alignedItemsCount = prefixScanSuperBlockAlign * ((itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign);
const unsigned blocks = ((alignedItemsCount + blockDim.x - 1) / blockDim.x);
if (blockId < blocks)
{
const unsigned power = 1 << D_PREFIX_SCAN_PASSES_BITS;
const unsigned blockFrac = blockId & (power - 1);
if (blockFrac >= (power >> 1))
{
const unsigned threadId = threadIdx.x;
const unsigned dstIndex = blockDim.x * blockId;
const unsigned srcIndex = blockDim.x * (blockId - blockFrac + (power >> 1)) - 1;
unsigned* histogram = info.m_histogram.m_array;
const unsigned value = histogram[srcIndex];
histogram[dstIndex + threadId] += value;
if (blockFrac == (power - 1))
{
__syncthreads();
if (threadId == (blockDim.x - 1))
{
const unsigned dstBlock = blockId / D_PREFIX_SCAN_PASSES;
const unsigned sum = histogram[blockId * blockDim.x + threadId];
histogram[alignedItemsCount + dstBlock] = sum;
}
}
}
}
}
__global__ void ndCudaHillisSteeleAddSupeBlocksInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned superBlockCount = (itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign;
unsigned* histogram = info.m_histogram.m_array;
unsigned offset = blockId * blockDim.x + prefixScanSuperBlockAlign;
const unsigned superBlockOffset = superBlockCount * prefixScanSuperBlockAlign;
unsigned value = histogram[superBlockOffset];
for (int i = 1; i < superBlockCount; i++)
{
histogram[offset + threadId] += value;
value += histogram[superBlockOffset + i];
offset += prefixScanSuperBlockAlign;
}
}
__global__ void ndCudaHillisSteelePrefixScan(ndCudaSceneInfo& info)
{
if (info.m_frameIsValid)
{
const unsigned threads = info.m_histogram.m_size;
//const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockSize;
//const unsigned superBlocks = (threads + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign;
//const unsigned histogramBlocks = D_PREFIX_SCAN_PASSES * superBlocks;
const unsigned blocks = (threads + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
if (blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE > info.m_histogram.m_capacity)
{
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
info.m_histogram.m_size = blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
return;
}
ndCudaHillisSteeleInternal << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
//ndCudaHillisSteelePaddBufferInternal << <D_PREFIX_SCAN_PASSES, blockSize, 0 >> > (info);
//for (int i = 0; i < (D_PREFIX_SCAN_PASSES_BITS - 1); i++)
//{
// ndCudaHillisSteelePrefixScanAddBlocksInternal << <histogramBlocks, blockSize, 0 >> > (info, i);
//}
//ndCudaHillisSteelePrefixScanAddBlocksFinalInternal << <histogramBlocks, blockSize, 0 >> > (info);
//ndCudaHillisSteeleAddSupeBlocksInternal << <D_PREFIX_SCAN_PASSES, blockSize, 0 >> > (info);
if (blocks > 1)
{
ndCudaHillisSteeleInternal << <1, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
}
#ifdef _DEBUG
ndCudaHillisSteeleSanityCheck << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
#endif
}
}
*/
__global__ void ndCudaHillisSteelePrefixScan(ndCudaSceneInfo& info)
{
if (info.m_frameIsValid)
{
const unsigned threads = info.m_histogram.m_size;
const unsigned blocks = (threads + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
if (blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE > info.m_histogram.m_capacity)
{
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
info.m_histogram.m_size = blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
return;
}
ndCudaHillisSteeleInternal << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
if (blocks > 1)
{
ndCudaHillisSteeleAddBlocksInternal << <1, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
}
#ifdef _DEBUG
ndCudaHillisSteeleSanityCheck << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
#endif
}
}
|
8684db2309267fcea029aba0fec49ee6926da4e8.cu
|
/* Copyright (c) <2003-2021> <Julio Jerez, Newton Game Dynamics>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
#include <cuda.h>
#include <vector_types.h>
#include <cuda_runtime.h>
#include "ndCudaContext.h"
#include "ndCudaSceneInfo.h"
#include "ndCudaIntrinsics.h"
#include "ndCudaPrefixScan.cuh"
#define D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE 1024
__global__ void ndCudaHillisSteeleSanityCheck(ndCudaSceneInfo& info)
{
const unsigned index = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned* histogram = info.m_histogram.m_array;
if ((index >= 1) && (index < info.m_histogram.m_size))
{
unsigned item0 = histogram[index - 1];
unsigned item1 = histogram[index - 0];
if (info.m_frameIsValid && (item0 > item1))
{
//printf("block(%d) id:%d (%d %d)\n", blockIdx.x, threadIdx.x, item0, item1);
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
}
}
}
__global__ void ndCudaHillisSteeleInternal(ndCudaSceneInfo& info)
{
__shared__ unsigned cacheBuffer[D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE / 2 + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE + 1];
const unsigned blockId = blockIdx.x;
const unsigned threadId0 = threadIdx.x;
const unsigned threadId1 = D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE / 2 + threadId0;
cacheBuffer[threadId0] = 0;
__syncthreads();
cacheBuffer[threadId1] = 0;
unsigned* histogram = info.m_histogram.m_array;
const unsigned index = threadId0 + blockDim.x * blockId;
if (index < info.m_histogram.m_size)
{
cacheBuffer[threadId1] = histogram[index];
}
__syncthreads();
for (int i = 1; i < D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
if (index < info.m_histogram.m_size)
{
histogram[index] = cacheBuffer[threadId1];
}
}
__global__ void ndCudaHillisSteeleAddBlocksInternal(ndCudaSceneInfo& info)
{
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned superBlockCount = (itemsCount + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
unsigned* histogram = info.m_histogram.m_array;
unsigned offset = D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
for (int i = 1; i < superBlockCount; i++)
{
const unsigned value = histogram[offset - 1];
histogram[offset + threadId] += value;
offset += D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
__syncthreads();
}
}
/*
__global__ void ndCudaHillisSteelePaddBufferInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned blockStart = blockDim.x * ((itemsCount + blockDim.x - 1) / blockDim.x);
const unsigned blockOffset = blockId * blockDim.x;
unsigned* histogram = info.m_histogram.m_array;
if (blockOffset >= blockStart)
{
histogram[blockOffset + threadId] = 0;
}
}
__global__ void ndCudaHillisSteelePrefixScanAddBlocksInternal(ndCudaSceneInfo& info, int bit)
{
const unsigned blockId = blockIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned alignedItemsCount = prefixScanSuperBlockAlign * ((itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign);
const unsigned blocks = ((alignedItemsCount + blockDim.x - 1) / blockDim.x);
if (blockId < blocks)
{
const unsigned power = 1 << (bit + 1);
const unsigned blockFrac = blockId & (power - 1);
if (blockFrac >= (power >> 1))
{
const unsigned threadId = threadIdx.x;
const unsigned dstIndex = blockDim.x * blockId;
const unsigned srcIndex = blockDim.x * (blockId - blockFrac + (power >> 1)) - 1;
unsigned* histogram = info.m_histogram.m_array;
const unsigned value = histogram[srcIndex];
histogram[dstIndex + threadId] += value;
}
}
}
__global__ void ndCudaHillisSteelePrefixScanAddBlocksFinalInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned alignedItemsCount = prefixScanSuperBlockAlign * ((itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign);
const unsigned blocks = ((alignedItemsCount + blockDim.x - 1) / blockDim.x);
if (blockId < blocks)
{
const unsigned power = 1 << D_PREFIX_SCAN_PASSES_BITS;
const unsigned blockFrac = blockId & (power - 1);
if (blockFrac >= (power >> 1))
{
const unsigned threadId = threadIdx.x;
const unsigned dstIndex = blockDim.x * blockId;
const unsigned srcIndex = blockDim.x * (blockId - blockFrac + (power >> 1)) - 1;
unsigned* histogram = info.m_histogram.m_array;
const unsigned value = histogram[srcIndex];
histogram[dstIndex + threadId] += value;
if (blockFrac == (power - 1))
{
__syncthreads();
if (threadId == (blockDim.x - 1))
{
const unsigned dstBlock = blockId / D_PREFIX_SCAN_PASSES;
const unsigned sum = histogram[blockId * blockDim.x + threadId];
histogram[alignedItemsCount + dstBlock] = sum;
}
}
}
}
}
__global__ void ndCudaHillisSteeleAddSupeBlocksInternal(ndCudaSceneInfo& info)
{
const unsigned blockId = blockIdx.x;
const unsigned threadId = threadIdx.x;
const unsigned itemsCount = info.m_histogram.m_size;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockDim.x;
const unsigned superBlockCount = (itemsCount + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign;
unsigned* histogram = info.m_histogram.m_array;
unsigned offset = blockId * blockDim.x + prefixScanSuperBlockAlign;
const unsigned superBlockOffset = superBlockCount * prefixScanSuperBlockAlign;
unsigned value = histogram[superBlockOffset];
for (int i = 1; i < superBlockCount; i++)
{
histogram[offset + threadId] += value;
value += histogram[superBlockOffset + i];
offset += prefixScanSuperBlockAlign;
}
}
__global__ void ndCudaHillisSteelePrefixScan(ndCudaSceneInfo& info)
{
if (info.m_frameIsValid)
{
const unsigned threads = info.m_histogram.m_size;
//const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * blockSize;
//const unsigned superBlocks = (threads + prefixScanSuperBlockAlign - 1) / prefixScanSuperBlockAlign;
//const unsigned histogramBlocks = D_PREFIX_SCAN_PASSES * superBlocks;
const unsigned blocks = (threads + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
if (blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE > info.m_histogram.m_capacity)
{
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
info.m_histogram.m_size = blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
return;
}
ndCudaHillisSteeleInternal << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
//ndCudaHillisSteelePaddBufferInternal << <D_PREFIX_SCAN_PASSES, blockSize, 0 >> > (info);
//for (int i = 0; i < (D_PREFIX_SCAN_PASSES_BITS - 1); i++)
//{
// ndCudaHillisSteelePrefixScanAddBlocksInternal << <histogramBlocks, blockSize, 0 >> > (info, i);
//}
//ndCudaHillisSteelePrefixScanAddBlocksFinalInternal << <histogramBlocks, blockSize, 0 >> > (info);
//ndCudaHillisSteeleAddSupeBlocksInternal << <D_PREFIX_SCAN_PASSES, blockSize, 0 >> > (info);
if (blocks > 1)
{
ndCudaHillisSteeleInternal << <1, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
}
#ifdef _DEBUG
ndCudaHillisSteeleSanityCheck << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
#endif
}
}
*/
__global__ void ndCudaHillisSteelePrefixScan(ndCudaSceneInfo& info)
{
if (info.m_frameIsValid)
{
const unsigned threads = info.m_histogram.m_size;
const unsigned blocks = (threads + D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE - 1) / D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
if (blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE > info.m_histogram.m_capacity)
{
cuInvalidateFrame(info, __FUNCTION__, __LINE__);
info.m_histogram.m_size = blocks * D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE;
return;
}
ndCudaHillisSteeleInternal << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
if (blocks > 1)
{
ndCudaHillisSteeleAddBlocksInternal << <1, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
}
#ifdef _DEBUG
ndCudaHillisSteeleSanityCheck << <blocks, D_HILL_STEELE_PREFIX_SCAN_BLOCK_SIZE, 0 >> > (info);
#endif
}
}
|
c8f9328f06426677869b4cb73e6d018584c011d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void d_updateTransforms (float* d_currentTransform, float3* d_cameraPosition)
{
d_cameraPosition->x = d_currentTransform[3];
d_cameraPosition->y = d_currentTransform[7];
d_cameraPosition->z = d_currentTransform[11];
}
|
c8f9328f06426677869b4cb73e6d018584c011d5.cu
|
#include "includes.h"
__global__ void d_updateTransforms (float* d_currentTransform, float3* d_cameraPosition)
{
d_cameraPosition->x = d_currentTransform[3];
d_cameraPosition->y = d_currentTransform[7];
d_cameraPosition->z = d_currentTransform[11];
}
|
4e957c6550f5e12c240ebff9677cfd523527dcd0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <unistd.h>
#include <png.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define rel params[0]
#define img params[1]
#define scl params[2]
__device__ void writeHSV(uint8_t *pixel, int theta) {
unsigned char region, remainder, q, t;
region = theta / 43;
remainder = (theta - (region * 43)) * 6;
q = (255 * (255 - ((255 * remainder) >> 8))) >> 8;
t = (255 * (255 - ((255 * (255 - remainder)) >> 8))) >> 8;
switch (region) {
case 0:
*pixel++ = 255;
*pixel++ = t;
*pixel++ = 0;
return;
case 1:
*pixel++ = q;
*pixel++ = 255;
*pixel++ = 0;
return;
case 2:
*pixel++ = 0;
*pixel++ = 255;
*pixel++ = t;
return;
case 3:
*pixel++ = 0;
*pixel++ = q;
*pixel++ = 255;
return;
case 4:
*pixel++ = t;
*pixel++ = 0;
*pixel++ = 255;
return;
default:
*pixel++ = 255;
*pixel++ = 0;
*pixel++ = q;
return;
}
}
__global__ void euclid (uint8_t *gpu, double *params, int streamNumber ) {
int index, pos;
int c, t;
uint32_t x, y;
index = streamNumber * 65536 + threadIdx.x * 256;
for (pos = 0; pos < 256; pos++) {
x = (uint32_t) (((rel + 2.0) + (double) (.5 + (index % 1024)) * scl) * 1048576);
y = (uint32_t) (((img + 2.0) + (double) (.5 + (index / 1024)) * scl) * 1048576);
c = 0;
t = 1;
while (1) {
if (x > y) {
x -= y;
c++;
} else if (y > x) {
y -= x;
} else {
break;
}
t++;
if (t > 1000) break;
}
uint8_t *pixel = (gpu + index++ * 3);
*pixel++ = (255 * c) / t;
*pixel++ = (255 * c) / t;
*pixel++ = (255 * c) / t;
}
}
__global__ void mandelbrot (uint8_t *gpu, double *params, int streamNumber ) {
int index, c, pos;
double cr, ci, zr, zi, t;
index = streamNumber * 65536 + threadIdx.x * 256;
for (pos = 0; pos < 256; pos++) {
c = 0;
cr = rel + (double) (.5 + (index % 1024)) * scl / 1024.0;
ci = img + (double) (.5 + (index / 1024)) * scl / 1024.0;
zr = cr;
zi = ci;
while (++c < 1000 && zr * zr + zi * zi < 4) {
t = zr;
zr = zr * zr - zi * zi + cr;
zi = 2 * t * zi + ci;
}
uint8_t *pixel = (gpu + index * 3);
if (c == 1000) {
*pixel++ = 0;
*pixel++ = 0;
*pixel++ = 0;
} else {
writeHSV(pixel, c);
}
index ++;
}
}
// GPU variables
double *gpu_params;
uint8_t *gpu;
// Host variables
hipStream_t streams[16];
double params[3];
png_byte ** row_pointers;
void (*kernel) (uint8_t *, double *, int);
// reads parameters from stdin and writes them to params array
// initializes rel, img, and scl macros
void readParams() {
rel = -2.0;
img = -2.0;
scl = 4.0;
char c = getchar();
switch (c) {
case 'm':
kernel = mandelbrot;
break;
default:
kernel = euclid;
}
while ((c = getchar()) != '@') {
scl /= 3.0;
switch (c) {
case '3':
case '6':
case '9':
rel += scl;
case '2':
case '5':
case '8':
rel += scl;
default:
break;
}
switch (c) {
case '7':
case '8':
case '9':
img += scl;
case '4':
case '5':
case '6':
img += scl;
default:
break;
}
}
}
// begins computation
void computeKernel() {
// setup params
hipMemcpy( gpu_params, params, 3 * sizeof(double), hipMemcpyHostToDevice);
// initialize streams
int i, r;
for (i = 0; i < 16; i++) {
hipStreamCreate((streams + i));
}
// execute kernels in the streams
for (i = 0; i < 16; i++) {
hipLaunchKernelGGL(( kernel), dim3(1), dim3(256), 0, streams[i], gpu, gpu_params, i );
}
// setup asynchronous memory copy after completion
for (i = 0; i < 16; i++) {
for (r = 0; r < 64; r++) {
hipMemcpyAsync(row_pointers[64 * i + r], (gpu + i * 65536 * 3 + r * 1024 * 3), sizeof(uint8_t) * 1024 * 3, hipMemcpyDeviceToHost, streams[i]);
}
}
hipDeviceSynchronize();
}
extern void writePngOutput();
int main(int argc, char **argv) {
// Initialize memory
hipMalloc( (void**) &gpu, 1024 * 1024 * sizeof(uint8_t) * 3 );
hipMalloc( (void**) &gpu_params, 3 * sizeof(double) );
row_pointers = (png_byte **) malloc (1024 * sizeof (png_byte *));
for (int y = 0; y < 1024; y++) {
row_pointers[y] = (png_byte *) malloc (sizeof (uint8_t) * 1024 * 3);
}
// do the process
while (1) {
readParams();
computeKernel();
writePngOutput();
}
}
size_t pngBufferFill = 0;
extern void writeFn(png_structp png_ptr, png_bytep data, uint32_t size);
extern void flushFn(png_structp png_ptr);
void writePngOutput() {
png_structp png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct (png_ptr);
png_set_IHDR (png_ptr,
info_ptr,
1024, // width
1024, // height
8, // depth
PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
png_set_write_fn(png_ptr, NULL, (png_rw_ptr) writeFn, (png_flush_ptr) flushFn);
png_init_io (png_ptr, stdout);
png_set_rows (png_ptr, info_ptr, row_pointers);
png_write_png (png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL);
write(2, &pngBufferFill, 4);
pngBufferFill = 0;
png_destroy_write_struct (&png_ptr, &info_ptr);
}
void writeFn(png_structp png_ptr, png_bytep data, uint32_t size) {
write(1, data, size);
pngBufferFill += size;
}
void flushFn(png_structp png_ptr) {
fflush(stdout);
}
|
4e957c6550f5e12c240ebff9677cfd523527dcd0.cu
|
#include <stdint.h>
#include <unistd.h>
#include <png.h>
#include <cuda.h>
#include <math.h>
#define rel params[0]
#define img params[1]
#define scl params[2]
__device__ void writeHSV(uint8_t *pixel, int theta) {
unsigned char region, remainder, q, t;
region = theta / 43;
remainder = (theta - (region * 43)) * 6;
q = (255 * (255 - ((255 * remainder) >> 8))) >> 8;
t = (255 * (255 - ((255 * (255 - remainder)) >> 8))) >> 8;
switch (region) {
case 0:
*pixel++ = 255;
*pixel++ = t;
*pixel++ = 0;
return;
case 1:
*pixel++ = q;
*pixel++ = 255;
*pixel++ = 0;
return;
case 2:
*pixel++ = 0;
*pixel++ = 255;
*pixel++ = t;
return;
case 3:
*pixel++ = 0;
*pixel++ = q;
*pixel++ = 255;
return;
case 4:
*pixel++ = t;
*pixel++ = 0;
*pixel++ = 255;
return;
default:
*pixel++ = 255;
*pixel++ = 0;
*pixel++ = q;
return;
}
}
__global__ void euclid (uint8_t *gpu, double *params, int streamNumber ) {
int index, pos;
int c, t;
uint32_t x, y;
index = streamNumber * 65536 + threadIdx.x * 256;
for (pos = 0; pos < 256; pos++) {
x = (uint32_t) (((rel + 2.0) + (double) (.5 + (index % 1024)) * scl) * 1048576);
y = (uint32_t) (((img + 2.0) + (double) (.5 + (index / 1024)) * scl) * 1048576);
c = 0;
t = 1;
while (1) {
if (x > y) {
x -= y;
c++;
} else if (y > x) {
y -= x;
} else {
break;
}
t++;
if (t > 1000) break;
}
uint8_t *pixel = (gpu + index++ * 3);
*pixel++ = (255 * c) / t;
*pixel++ = (255 * c) / t;
*pixel++ = (255 * c) / t;
}
}
__global__ void mandelbrot (uint8_t *gpu, double *params, int streamNumber ) {
int index, c, pos;
double cr, ci, zr, zi, t;
index = streamNumber * 65536 + threadIdx.x * 256;
for (pos = 0; pos < 256; pos++) {
c = 0;
cr = rel + (double) (.5 + (index % 1024)) * scl / 1024.0;
ci = img + (double) (.5 + (index / 1024)) * scl / 1024.0;
zr = cr;
zi = ci;
while (++c < 1000 && zr * zr + zi * zi < 4) {
t = zr;
zr = zr * zr - zi * zi + cr;
zi = 2 * t * zi + ci;
}
uint8_t *pixel = (gpu + index * 3);
if (c == 1000) {
*pixel++ = 0;
*pixel++ = 0;
*pixel++ = 0;
} else {
writeHSV(pixel, c);
}
index ++;
}
}
// GPU variables
double *gpu_params;
uint8_t *gpu;
// Host variables
cudaStream_t streams[16];
double params[3];
png_byte ** row_pointers;
void (*kernel) (uint8_t *, double *, int);
// reads parameters from stdin and writes them to params array
// initializes rel, img, and scl macros
void readParams() {
rel = -2.0;
img = -2.0;
scl = 4.0;
char c = getchar();
switch (c) {
case 'm':
kernel = mandelbrot;
break;
default:
kernel = euclid;
}
while ((c = getchar()) != '@') {
scl /= 3.0;
switch (c) {
case '3':
case '6':
case '9':
rel += scl;
case '2':
case '5':
case '8':
rel += scl;
default:
break;
}
switch (c) {
case '7':
case '8':
case '9':
img += scl;
case '4':
case '5':
case '6':
img += scl;
default:
break;
}
}
}
// begins computation
void computeKernel() {
// setup params
cudaMemcpy( gpu_params, params, 3 * sizeof(double), cudaMemcpyHostToDevice);
// initialize streams
int i, r;
for (i = 0; i < 16; i++) {
cudaStreamCreate((streams + i));
}
// execute kernels in the streams
for (i = 0; i < 16; i++) {
kernel<<<1, 256, 0, streams[i]>>>( gpu, gpu_params, i );
}
// setup asynchronous memory copy after completion
for (i = 0; i < 16; i++) {
for (r = 0; r < 64; r++) {
cudaMemcpyAsync(row_pointers[64 * i + r], (gpu + i * 65536 * 3 + r * 1024 * 3), sizeof(uint8_t) * 1024 * 3, cudaMemcpyDeviceToHost, streams[i]);
}
}
cudaDeviceSynchronize();
}
extern void writePngOutput();
int main(int argc, char **argv) {
// Initialize memory
cudaMalloc( (void**) &gpu, 1024 * 1024 * sizeof(uint8_t) * 3 );
cudaMalloc( (void**) &gpu_params, 3 * sizeof(double) );
row_pointers = (png_byte **) malloc (1024 * sizeof (png_byte *));
for (int y = 0; y < 1024; y++) {
row_pointers[y] = (png_byte *) malloc (sizeof (uint8_t) * 1024 * 3);
}
// do the process
while (1) {
readParams();
computeKernel();
writePngOutput();
}
}
size_t pngBufferFill = 0;
extern void writeFn(png_structp png_ptr, png_bytep data, uint32_t size);
extern void flushFn(png_structp png_ptr);
void writePngOutput() {
png_structp png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct (png_ptr);
png_set_IHDR (png_ptr,
info_ptr,
1024, // width
1024, // height
8, // depth
PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
png_set_write_fn(png_ptr, NULL, (png_rw_ptr) writeFn, (png_flush_ptr) flushFn);
png_init_io (png_ptr, stdout);
png_set_rows (png_ptr, info_ptr, row_pointers);
png_write_png (png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL);
write(2, &pngBufferFill, 4);
pngBufferFill = 0;
png_destroy_write_struct (&png_ptr, &info_ptr);
}
void writeFn(png_structp png_ptr, png_bytep data, uint32_t size) {
write(1, data, size);
pngBufferFill += size;
}
void flushFn(png_structp png_ptr) {
fflush(stdout);
}
|
7b0e26907f74558d02b25330f962645bc431d9cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*This puppy is gonna go up on github since I'll be using
*it in my next project so before I forget...
*
*(c) Zachary Job
*All rights reserved, I am not liable for damages.
*Re-distributable and use with my permission contact me
*at [email protected]
*
*Presentable without modification including comments
*for educational purposes
*
*wattoken_kernel.cu
*4/15/2015
*
*This is designed for a minimum architecture of the
*GK104 specifications (specifically the k520).
*Otherwise go away.
*
*Tokenize the input buffer and update the global histogram
*/
#include <stdio.h>
#include "../Definitions/defs.h"
#include "../../../settings.h"
/*
*
* The most intense unrolling you may ever see, behold...
*
* The goal is to utilize EVERY drop of memory that is lbuffer
* >global to regs
* >process regs and move to shared
* >waterfall process
* >parallel reduction where their content will only survive if they are the first
* occurrence of a key from left to right in the buffer
* EXAMPLE... REMEMBER, high memory volume is required for effectiveness unlike here
* buffer of 8x4 w/ 16 threads
ENTIRE SH MEM IS PROCESSED
******** 24 threads check 24 elements each and modify their count
********
********
********
EX
*123****-> *123****
********-> ********
********-> ********
******15-> ******05
* >write shared to global with all threads, also using the SH 0 bucket to recklessly
* remove contention because life in the fast lane has caused me to loose my mind
*
*/
__global__ void wattoken_kernel(int *bufferLoc, int *cpuHist)
{
__shared__ int shmem[FEED_BY]; //The waterfall buffer where computed hashes are stored
char //iterators and offset chunk into shared
trip; //the buffer flush indicator and the waterfall trip
int
idxL, j, i,
shchnk, count; //hold the result of a hash
//shoffs;
/*
idxL = threadIdx.x * FDBY_LD;
//Fetch everything to memory in every other position
for(i = idxL; i < idxL + FDBY_LD; i++)
shmem[i] = bufferLoc[i];
__syncthreads();
*/
//unrolled - see above for comments
idxL = threadIdx.x * FDBY_LD;
shmem[idxL] = bufferLoc[idxL];
shmem[idxL + 1] = bufferLoc[idxL + 1];
shmem[idxL + 2] = bufferLoc[idxL + 2];
shmem[idxL + 3] = bufferLoc[idxL + 3];
shmem[idxL + 4] = bufferLoc[idxL + 4];
shmem[idxL + 5] = bufferLoc[idxL + 5];
shmem[idxL + 6] = bufferLoc[idxL + 6];
shmem[idxL + 7] = bufferLoc[idxL + 7];
shmem[idxL + 8] = bufferLoc[idxL + 8];
shmem[idxL + 9] = bufferLoc[idxL + 9];
shmem[idxL + 10] = bufferLoc[idxL + 10];
shmem[idxL + 11] = bufferLoc[idxL + 11];
shmem[idxL + 12] = bufferLoc[idxL + 12];
shmem[idxL + 13] = bufferLoc[idxL + 13];
shmem[idxL + 14] = bufferLoc[idxL + 14];
shmem[idxL + 15] = bufferLoc[idxL + 15];
shmem[idxL + 16] = bufferLoc[idxL + 16];
shmem[idxL + 17] = bufferLoc[idxL + 17];
shmem[idxL + 18] = bufferLoc[idxL + 18];
shmem[idxL + 19] = bufferLoc[idxL + 19];
shmem[idxL + 20] = bufferLoc[idxL + 20];
shmem[idxL + 21] = bufferLoc[idxL + 21];
shmem[idxL + 22] = bufferLoc[idxL + 22];
shmem[idxL + 23] = bufferLoc[idxL + 23];
shmem[idxL + 24] = bufferLoc[idxL + 24];
shmem[idxL + 25] = bufferLoc[idxL + 25];
shmem[idxL + 26] = bufferLoc[idxL + 26];
shmem[idxL + 27] = bufferLoc[idxL + 27];
shmem[idxL + 28] = bufferLoc[idxL + 28];
shmem[idxL + 29] = bufferLoc[idxL + 29];
shmem[idxL + 30] = bufferLoc[idxL + 30];
shmem[idxL + 31] = bufferLoc[idxL + 31];
shmem[idxL + 32] = bufferLoc[idxL + 32];
shmem[idxL + 33] = bufferLoc[idxL + 33];
shmem[idxL + 34] = bufferLoc[idxL + 34];
shmem[idxL + 35] = bufferLoc[idxL + 35];
shmem[idxL + 36] = bufferLoc[idxL + 36];
shmem[idxL + 37] = bufferLoc[idxL + 37];
shmem[idxL + 38] = bufferLoc[idxL + 38];
shmem[idxL + 39] = bufferLoc[idxL + 39];
shmem[idxL + 40] = bufferLoc[idxL + 40];
shmem[idxL + 41] = bufferLoc[idxL + 41];
shmem[idxL + 42] = bufferLoc[idxL + 42];
shmem[idxL + 43] = bufferLoc[idxL + 43];
shmem[idxL + 44] = bufferLoc[idxL + 44];
shmem[idxL + 45] = bufferLoc[idxL + 45];
shmem[idxL + 46] = bufferLoc[idxL + 46];
shmem[idxL + 47] = bufferLoc[idxL + 47];
__syncthreads();
for(shchnk = 0, trip = 1; shchnk < FEED_BY; shchnk += THREADS_PER_BUFF, trip = 1)
{
//index
idxL = threadIdx.x + shchnk;
//get value of index
j = shmem[idxL];
//occurrences
count = 0;
//Warp target code. Each element in the shared buffer is checked
//per thread with this calculation. Instances are shifted
//backwards as the priority potential lessens as you go
//down the buffer. Waterfall reduction is my best analogy
//via zeroing greater indexes of already existing values
//
// SEE FUNCTION COMMENT EXAMPLE
//
for(i = shchnk, trip = 1; i < THREADS_PER_BUFF + shchnk; i++)
count += (int)(shmem[i] == j), trip &= (char)((!((i < idxL) && shmem[i] == j)) || (i >= idxL));
//Flush if bad result
j *= trip;
count *= trip;
//Now the tricky part. Multiple blocks will be running. Four
//is the intended amount for the k520. Testing must be performed
//I may end up using only four feeders and have four cpu threads
//process the results into the main hashmap. However, this is
//a single test kernel. Once integrated these concerns will be
//dealt with
//The home histogram 0 position will be recklessly written. That
//is the point. This is meant to allow concurrency.
//
//cannot use j, could have been flushed
// Hash
j += (j << 12);
j ^= (j >> 22);
j += (j << 4);
j ^= (j >> 9);
j += (j << 10);
j ^= (j >> 2);
j += (j << 7);
j ^= (j >> 12);
j &= LANG_BITS_MSK;
if(trip == 1)
atomicAdd(&cpuHist[j], count);
}
//unrolled - see above for comments
/*
for(shchnk = 0, trip = 1; shchnk < FEED_BY; shchnk += THREADS_PER_BUFF, trip = 1)
{
idxL = threadIdx.x + shchnk;
j = shmem[idxL];
count = 0;
//0
count += (int)(shmem[shchnk] == j), trip &= (char)((!((shchnk < idxL) && shmem[shchnk] == j)) || shchnk >= idxL);
shoffs = 1 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 2 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 3 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 4 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 5 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 6 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 7 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 8 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 9 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 10 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 11 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 12 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 13 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 14 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 15 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 16 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 17 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 18 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 19 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 20 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 21 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 22 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 23 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 24 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 25 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 26 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 27 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 28 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 29 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 30 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 31 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 32 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 33 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 34 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 35 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 36 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 37 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 38 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 39 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 40 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 41 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 42 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 43 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 44 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 45 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 46 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 47 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 48 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 49 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 50 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 51 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 52 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 53 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 54 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 55 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 56 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 57 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 58 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 59 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 60 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 61 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 62 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 63 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 64 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 65 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 66 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 67 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 68 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 69 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 70 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 71 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 72 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 73 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 74 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 75 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 76 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 77 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 78 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 79 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 80 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 81 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 82 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 83 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 84 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 85 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 86 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 87 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 88 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 89 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 90 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 91 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 92 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 93 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 94 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 95 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 96 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 97 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 98 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 99 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 100 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 101 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 102 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 103 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 104 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 105 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 106 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 107 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 108 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 109 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 110 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 111 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 112 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 113 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 114 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 115 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 116 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 117 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 118 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 119 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 120 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 121 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 122 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 123 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 124 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 125 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 126 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 127 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 128 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 129 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 130 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 131 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 132 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 133 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 134 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 135 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 136 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 137 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 138 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 139 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 140 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 141 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 142 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 143 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 144 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 145 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 146 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 147 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 148 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 149 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 150 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 151 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 152 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 153 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 154 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 155 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 156 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 157 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 158 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 159 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 160 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 161 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 162 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 163 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 164 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 165 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 166 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 167 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 168 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 169 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 170 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 171 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 172 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 173 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 174 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 175 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 176 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 177 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 178 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 179 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 180 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 181 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 182 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 183 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 184 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 185 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 186 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 187 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 188 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 189 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 190 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 191 + shchnk;
count += (int)(shmem[shoffs] == j);
// Hash
j += (j << 12);
j ^= (j >> 22);
j += (j << 4);
j ^= (j >> 9);
j += (j << 10);
j ^= (j >> 2);
j += (j << 7);
j ^= (j >> 12);
j &= LANG_BITS_MSK;
j *= trip;
count *= trip;
if(trip == 1)
atomicAdd(&cpuHist[j], count);
}
*/
//end unroll - see above for comments
}
|
7b0e26907f74558d02b25330f962645bc431d9cf.cu
|
/*
*This puppy is gonna go up on github since I'll be using
*it in my next project so before I forget...
*
*(c) Zachary Job
*All rights reserved, I am not liable for damages.
*Re-distributable and use with my permission contact me
*at [email protected]
*
*Presentable without modification including comments
*for educational purposes
*
*wattoken_kernel.cu
*4/15/2015
*
*This is designed for a minimum architecture of the
*GK104 specifications (specifically the k520).
*Otherwise go away.
*
*Tokenize the input buffer and update the global histogram
*/
#include <stdio.h>
#include "../Definitions/defs.h"
#include "../../../settings.h"
/*
*
* The most intense unrolling you may ever see, behold...
*
* The goal is to utilize EVERY drop of memory that is lbuffer
* >global to regs
* >process regs and move to shared
* >waterfall process
* >parallel reduction where their content will only survive if they are the first
* occurrence of a key from left to right in the buffer
* EXAMPLE... REMEMBER, high memory volume is required for effectiveness unlike here
* buffer of 8x4 w/ 16 threads
ENTIRE SH MEM IS PROCESSED
******** 24 threads check 24 elements each and modify their count
********
********
********
EX
*123****-> *123****
********-> ********
********-> ********
******15-> ******05
* >write shared to global with all threads, also using the SH 0 bucket to recklessly
* remove contention because life in the fast lane has caused me to loose my mind
*
*/
__global__ void wattoken_kernel(int *bufferLoc, int *cpuHist)
{
__shared__ int shmem[FEED_BY]; //The waterfall buffer where computed hashes are stored
char //iterators and offset chunk into shared
trip; //the buffer flush indicator and the waterfall trip
int
idxL, j, i,
shchnk, count; //hold the result of a hash
//shoffs;
/*
idxL = threadIdx.x * FDBY_LD;
//Fetch everything to memory in every other position
for(i = idxL; i < idxL + FDBY_LD; i++)
shmem[i] = bufferLoc[i];
__syncthreads();
*/
//unrolled - see above for comments
idxL = threadIdx.x * FDBY_LD;
shmem[idxL] = bufferLoc[idxL];
shmem[idxL + 1] = bufferLoc[idxL + 1];
shmem[idxL + 2] = bufferLoc[idxL + 2];
shmem[idxL + 3] = bufferLoc[idxL + 3];
shmem[idxL + 4] = bufferLoc[idxL + 4];
shmem[idxL + 5] = bufferLoc[idxL + 5];
shmem[idxL + 6] = bufferLoc[idxL + 6];
shmem[idxL + 7] = bufferLoc[idxL + 7];
shmem[idxL + 8] = bufferLoc[idxL + 8];
shmem[idxL + 9] = bufferLoc[idxL + 9];
shmem[idxL + 10] = bufferLoc[idxL + 10];
shmem[idxL + 11] = bufferLoc[idxL + 11];
shmem[idxL + 12] = bufferLoc[idxL + 12];
shmem[idxL + 13] = bufferLoc[idxL + 13];
shmem[idxL + 14] = bufferLoc[idxL + 14];
shmem[idxL + 15] = bufferLoc[idxL + 15];
shmem[idxL + 16] = bufferLoc[idxL + 16];
shmem[idxL + 17] = bufferLoc[idxL + 17];
shmem[idxL + 18] = bufferLoc[idxL + 18];
shmem[idxL + 19] = bufferLoc[idxL + 19];
shmem[idxL + 20] = bufferLoc[idxL + 20];
shmem[idxL + 21] = bufferLoc[idxL + 21];
shmem[idxL + 22] = bufferLoc[idxL + 22];
shmem[idxL + 23] = bufferLoc[idxL + 23];
shmem[idxL + 24] = bufferLoc[idxL + 24];
shmem[idxL + 25] = bufferLoc[idxL + 25];
shmem[idxL + 26] = bufferLoc[idxL + 26];
shmem[idxL + 27] = bufferLoc[idxL + 27];
shmem[idxL + 28] = bufferLoc[idxL + 28];
shmem[idxL + 29] = bufferLoc[idxL + 29];
shmem[idxL + 30] = bufferLoc[idxL + 30];
shmem[idxL + 31] = bufferLoc[idxL + 31];
shmem[idxL + 32] = bufferLoc[idxL + 32];
shmem[idxL + 33] = bufferLoc[idxL + 33];
shmem[idxL + 34] = bufferLoc[idxL + 34];
shmem[idxL + 35] = bufferLoc[idxL + 35];
shmem[idxL + 36] = bufferLoc[idxL + 36];
shmem[idxL + 37] = bufferLoc[idxL + 37];
shmem[idxL + 38] = bufferLoc[idxL + 38];
shmem[idxL + 39] = bufferLoc[idxL + 39];
shmem[idxL + 40] = bufferLoc[idxL + 40];
shmem[idxL + 41] = bufferLoc[idxL + 41];
shmem[idxL + 42] = bufferLoc[idxL + 42];
shmem[idxL + 43] = bufferLoc[idxL + 43];
shmem[idxL + 44] = bufferLoc[idxL + 44];
shmem[idxL + 45] = bufferLoc[idxL + 45];
shmem[idxL + 46] = bufferLoc[idxL + 46];
shmem[idxL + 47] = bufferLoc[idxL + 47];
__syncthreads();
for(shchnk = 0, trip = 1; shchnk < FEED_BY; shchnk += THREADS_PER_BUFF, trip = 1)
{
//index
idxL = threadIdx.x + shchnk;
//get value of index
j = shmem[idxL];
//occurrences
count = 0;
//Warp target code. Each element in the shared buffer is checked
//per thread with this calculation. Instances are shifted
//backwards as the priority potential lessens as you go
//down the buffer. Waterfall reduction is my best analogy
//via zeroing greater indexes of already existing values
//
// SEE FUNCTION COMMENT EXAMPLE
//
for(i = shchnk, trip = 1; i < THREADS_PER_BUFF + shchnk; i++)
count += (int)(shmem[i] == j), trip &= (char)((!((i < idxL) && shmem[i] == j)) || (i >= idxL));
//Flush if bad result
j *= trip;
count *= trip;
//Now the tricky part. Multiple blocks will be running. Four
//is the intended amount for the k520. Testing must be performed
//I may end up using only four feeders and have four cpu threads
//process the results into the main hashmap. However, this is
//a single test kernel. Once integrated these concerns will be
//dealt with
//The home histogram 0 position will be recklessly written. That
//is the point. This is meant to allow concurrency.
//
//cannot use j, could have been flushed
// Hash
j += (j << 12);
j ^= (j >> 22);
j += (j << 4);
j ^= (j >> 9);
j += (j << 10);
j ^= (j >> 2);
j += (j << 7);
j ^= (j >> 12);
j &= LANG_BITS_MSK;
if(trip == 1)
atomicAdd(&cpuHist[j], count);
}
//unrolled - see above for comments
/*
for(shchnk = 0, trip = 1; shchnk < FEED_BY; shchnk += THREADS_PER_BUFF, trip = 1)
{
idxL = threadIdx.x + shchnk;
j = shmem[idxL];
count = 0;
//0
count += (int)(shmem[shchnk] == j), trip &= (char)((!((shchnk < idxL) && shmem[shchnk] == j)) || shchnk >= idxL);
shoffs = 1 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 2 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 3 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 4 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 5 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 6 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 7 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 8 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 9 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 10 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 11 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 12 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 13 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 14 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 15 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 16 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 17 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 18 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 19 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 20 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 21 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 22 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 23 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 24 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 25 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 26 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 27 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 28 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 29 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 30 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 31 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 32 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 33 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 34 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 35 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 36 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 37 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 38 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 39 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 40 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 41 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 42 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 43 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 44 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 45 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 46 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 47 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 48 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 49 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 50 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 51 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 52 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 53 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 54 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 55 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 56 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 57 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 58 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 59 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 60 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 61 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 62 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 63 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 64 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 65 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 66 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 67 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 68 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 69 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 70 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 71 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 72 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 73 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 74 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 75 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 76 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 77 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 78 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 79 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 80 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 81 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 82 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 83 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 84 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 85 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 86 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 87 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 88 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 89 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 90 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 91 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 92 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 93 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 94 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 95 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 96 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 97 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 98 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 99 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 100 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 101 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 102 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 103 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 104 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 105 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 106 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 107 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 108 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 109 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 110 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 111 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 112 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 113 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 114 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 115 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 116 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 117 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 118 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 119 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 120 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 121 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 122 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 123 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 124 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 125 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 126 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 127 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 128 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 129 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 130 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 131 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 132 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 133 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 134 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 135 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 136 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 137 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 138 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 139 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 140 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 141 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 142 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 143 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 144 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 145 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 146 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 147 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 148 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 149 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 150 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 151 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 152 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 153 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 154 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 155 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 156 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 157 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 158 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 159 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 160 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 161 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 162 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 163 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 164 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 165 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 166 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 167 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 168 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 169 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 170 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 171 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 172 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 173 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 174 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 175 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 176 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 177 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 178 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 179 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 180 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 181 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 182 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 183 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 184 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 185 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 186 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 187 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 188 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 189 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 190 + shchnk;
count += (int)(shmem[shoffs] == j), trip &= (char)((!((shoffs < idxL) && shmem[shoffs] == j)) || shoffs >= idxL);
shoffs = 191 + shchnk;
count += (int)(shmem[shoffs] == j);
// Hash
j += (j << 12);
j ^= (j >> 22);
j += (j << 4);
j ^= (j >> 9);
j += (j << 10);
j ^= (j >> 2);
j += (j << 7);
j ^= (j >> 12);
j &= LANG_BITS_MSK;
j *= trip;
count *= trip;
if(trip == 1)
atomicAdd(&cpuHist[j], count);
}
*/
//end unroll - see above for comments
}
|
728836f8b21e0c6f16a011b9faea68e98c2db930.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
}
|
728836f8b21e0c6f16a011b9faea68e98c2db930.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
}
|
9aa36f5b062e9ca6e6d2f69a31d905ee6b114662.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel4_ydir;
int xdim0_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel4_ydir;
int ydim0_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel4_ydir;
int xdim1_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel4_ydir;
int ydim1_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel4_ydir;
int xdim2_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel4_ydir;
int ydim2_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel4_ydir;
int xdim3_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel4_ydir;
int ydim3_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel4_ydir;
int xdim4_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel4_ydir;
int ydim4_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim5_advec_cell_kernel4_ydir;
int xdim5_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim5_advec_cell_kernel4_ydir;
int ydim5_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim6_advec_cell_kernel4_ydir;
int xdim6_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim6_advec_cell_kernel4_ydir;
int ydim6_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim7_advec_cell_kernel4_ydir;
int xdim7_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim7_advec_cell_kernel4_ydir;
int ydim7_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim8_advec_cell_kernel4_ydir;
int xdim8_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim8_advec_cell_kernel4_ydir;
int ydim8_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim9_advec_cell_kernel4_ydir;
int xdim9_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim9_advec_cell_kernel4_ydir;
int ydim9_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim10_advec_cell_kernel4_ydir;
int xdim10_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim10_advec_cell_kernel4_ydir;
int ydim10_advec_cell_kernel4_ydir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel4_ydir*(y)+xdim0_advec_cell_kernel4_ydir*ydim0_advec_cell_kernel4_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel4_ydir*(y)+xdim1_advec_cell_kernel4_ydir*ydim1_advec_cell_kernel4_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel4_ydir*(y)+xdim2_advec_cell_kernel4_ydir*ydim2_advec_cell_kernel4_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel4_ydir*(y)+xdim3_advec_cell_kernel4_ydir*ydim3_advec_cell_kernel4_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel4_ydir*(y)+xdim4_advec_cell_kernel4_ydir*ydim4_advec_cell_kernel4_ydir*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel4_ydir*(y)+xdim5_advec_cell_kernel4_ydir*ydim5_advec_cell_kernel4_ydir*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel4_ydir*(y)+xdim6_advec_cell_kernel4_ydir*ydim6_advec_cell_kernel4_ydir*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel4_ydir*(y)+xdim7_advec_cell_kernel4_ydir*ydim7_advec_cell_kernel4_ydir*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_advec_cell_kernel4_ydir*(y)+xdim8_advec_cell_kernel4_ydir*ydim8_advec_cell_kernel4_ydir*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_advec_cell_kernel4_ydir*(y)+xdim9_advec_cell_kernel4_ydir*ydim9_advec_cell_kernel4_ydir*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_advec_cell_kernel4_ydir*(y)+xdim10_advec_cell_kernel4_ydir*ydim10_advec_cell_kernel4_ydir*(z))
//user function
__device__
inline void advec_cell_kernel4_ydir_gpu( double *density1, double *energy1,
const double *mass_flux_y, const double *vol_flux_y,
const double *pre_vol, const double *post_vol,
double *pre_mass, double *post_mass,
double *advec_vol, double *post_ener,
const double *ener_flux) {
pre_mass[OPS_ACC6(0,0,0)] = density1[OPS_ACC0(0,0,0)] * pre_vol[OPS_ACC4(0,0,0)];
post_mass[OPS_ACC7(0,0,0)] = pre_mass[OPS_ACC6(0,0,0)] + mass_flux_y[OPS_ACC2(0,0,0)] - mass_flux_y[OPS_ACC2(0,1,0)];
post_ener[OPS_ACC9(0,0,0)] = ( energy1[OPS_ACC1(0,0,0)] * pre_mass[OPS_ACC6(0,0,0)] + ener_flux[OPS_ACC10(0,0,0)] - ener_flux[OPS_ACC10(0,1,0)])/post_mass[OPS_ACC7(0,0,0)];
advec_vol[OPS_ACC8(0,0,0)] = pre_vol[OPS_ACC4(0,0,0)] + vol_flux_y[OPS_ACC3(0,0,0)] - vol_flux_y[OPS_ACC3(0,1,0)];
density1[OPS_ACC0(0,0,0)] = post_mass[OPS_ACC7(0,0,0)]/advec_vol[OPS_ACC8(0,0,0)];
energy1[OPS_ACC1(0,0,0)] = post_ener[OPS_ACC9(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_advec_cell_kernel4_ydir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir;
arg8 += idx_x * 1*1 + idx_y * 1*1 * xdim8_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir;
arg9 += idx_x * 1*1 + idx_y * 1*1 * xdim9_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir;
arg10 += idx_x * 1*1 + idx_y * 1*1 * xdim10_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel4_ydir_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel4_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
#else
void ops_par_loop_advec_cell_kernel4_ydir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,11,range,116)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(116,"advec_cell_kernel4_ydir");
OPS_kernels[116].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel4_ydir_h || ydim0 != ydim0_advec_cell_kernel4_ydir_h || xdim1 != xdim1_advec_cell_kernel4_ydir_h || ydim1 != ydim1_advec_cell_kernel4_ydir_h || xdim2 != xdim2_advec_cell_kernel4_ydir_h || ydim2 != ydim2_advec_cell_kernel4_ydir_h || xdim3 != xdim3_advec_cell_kernel4_ydir_h || ydim3 != ydim3_advec_cell_kernel4_ydir_h || xdim4 != xdim4_advec_cell_kernel4_ydir_h || ydim4 != ydim4_advec_cell_kernel4_ydir_h || xdim5 != xdim5_advec_cell_kernel4_ydir_h || ydim5 != ydim5_advec_cell_kernel4_ydir_h || xdim6 != xdim6_advec_cell_kernel4_ydir_h || ydim6 != ydim6_advec_cell_kernel4_ydir_h || xdim7 != xdim7_advec_cell_kernel4_ydir_h || ydim7 != ydim7_advec_cell_kernel4_ydir_h || xdim8 != xdim8_advec_cell_kernel4_ydir_h || ydim8 != ydim8_advec_cell_kernel4_ydir_h || xdim9 != xdim9_advec_cell_kernel4_ydir_h || ydim9 != ydim9_advec_cell_kernel4_ydir_h || xdim10 != xdim10_advec_cell_kernel4_ydir_h || ydim10 != ydim10_advec_cell_kernel4_ydir_h) {
hipMemcpyToSymbol( xdim0_advec_cell_kernel4_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel4_ydir_h = xdim0;
hipMemcpyToSymbol( ydim0_advec_cell_kernel4_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel4_ydir_h = ydim0;
hipMemcpyToSymbol( xdim1_advec_cell_kernel4_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel4_ydir_h = xdim1;
hipMemcpyToSymbol( ydim1_advec_cell_kernel4_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel4_ydir_h = ydim1;
hipMemcpyToSymbol( xdim2_advec_cell_kernel4_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel4_ydir_h = xdim2;
hipMemcpyToSymbol( ydim2_advec_cell_kernel4_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel4_ydir_h = ydim2;
hipMemcpyToSymbol( xdim3_advec_cell_kernel4_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel4_ydir_h = xdim3;
hipMemcpyToSymbol( ydim3_advec_cell_kernel4_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel4_ydir_h = ydim3;
hipMemcpyToSymbol( xdim4_advec_cell_kernel4_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel4_ydir_h = xdim4;
hipMemcpyToSymbol( ydim4_advec_cell_kernel4_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel4_ydir_h = ydim4;
hipMemcpyToSymbol( xdim5_advec_cell_kernel4_ydir, &xdim5, sizeof(int) );
xdim5_advec_cell_kernel4_ydir_h = xdim5;
hipMemcpyToSymbol( ydim5_advec_cell_kernel4_ydir, &ydim5, sizeof(int) );
ydim5_advec_cell_kernel4_ydir_h = ydim5;
hipMemcpyToSymbol( xdim6_advec_cell_kernel4_ydir, &xdim6, sizeof(int) );
xdim6_advec_cell_kernel4_ydir_h = xdim6;
hipMemcpyToSymbol( ydim6_advec_cell_kernel4_ydir, &ydim6, sizeof(int) );
ydim6_advec_cell_kernel4_ydir_h = ydim6;
hipMemcpyToSymbol( xdim7_advec_cell_kernel4_ydir, &xdim7, sizeof(int) );
xdim7_advec_cell_kernel4_ydir_h = xdim7;
hipMemcpyToSymbol( ydim7_advec_cell_kernel4_ydir, &ydim7, sizeof(int) );
ydim7_advec_cell_kernel4_ydir_h = ydim7;
hipMemcpyToSymbol( xdim8_advec_cell_kernel4_ydir, &xdim8, sizeof(int) );
xdim8_advec_cell_kernel4_ydir_h = xdim8;
hipMemcpyToSymbol( ydim8_advec_cell_kernel4_ydir, &ydim8, sizeof(int) );
ydim8_advec_cell_kernel4_ydir_h = ydim8;
hipMemcpyToSymbol( xdim9_advec_cell_kernel4_ydir, &xdim9, sizeof(int) );
xdim9_advec_cell_kernel4_ydir_h = xdim9;
hipMemcpyToSymbol( ydim9_advec_cell_kernel4_ydir, &ydim9, sizeof(int) );
ydim9_advec_cell_kernel4_ydir_h = ydim9;
hipMemcpyToSymbol( xdim10_advec_cell_kernel4_ydir, &xdim10, sizeof(int) );
xdim10_advec_cell_kernel4_ydir_h = xdim10;
hipMemcpyToSymbol( ydim10_advec_cell_kernel4_ydir, &ydim10, sizeof(int) );
ydim10_advec_cell_kernel4_ydir_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[11];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[116].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_cell_kernel4_ydir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[116].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[116].mpi_time += t2-t1;
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel4_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 116;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 116;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 11;
desc->args = (ops_arg*)malloc(11*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->function = ops_par_loop_advec_cell_kernel4_ydir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(116,"advec_cell_kernel4_ydir");
}
ops_enqueue_kernel(desc);
}
#endif
|
9aa36f5b062e9ca6e6d2f69a31d905ee6b114662.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel4_ydir;
int xdim0_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel4_ydir;
int ydim0_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel4_ydir;
int xdim1_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel4_ydir;
int ydim1_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel4_ydir;
int xdim2_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel4_ydir;
int ydim2_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel4_ydir;
int xdim3_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel4_ydir;
int ydim3_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel4_ydir;
int xdim4_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel4_ydir;
int ydim4_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim5_advec_cell_kernel4_ydir;
int xdim5_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim5_advec_cell_kernel4_ydir;
int ydim5_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim6_advec_cell_kernel4_ydir;
int xdim6_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim6_advec_cell_kernel4_ydir;
int ydim6_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim7_advec_cell_kernel4_ydir;
int xdim7_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim7_advec_cell_kernel4_ydir;
int ydim7_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim8_advec_cell_kernel4_ydir;
int xdim8_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim8_advec_cell_kernel4_ydir;
int ydim8_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim9_advec_cell_kernel4_ydir;
int xdim9_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim9_advec_cell_kernel4_ydir;
int ydim9_advec_cell_kernel4_ydir_h = -1;
__constant__ int xdim10_advec_cell_kernel4_ydir;
int xdim10_advec_cell_kernel4_ydir_h = -1;
__constant__ int ydim10_advec_cell_kernel4_ydir;
int ydim10_advec_cell_kernel4_ydir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel4_ydir*(y)+xdim0_advec_cell_kernel4_ydir*ydim0_advec_cell_kernel4_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel4_ydir*(y)+xdim1_advec_cell_kernel4_ydir*ydim1_advec_cell_kernel4_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel4_ydir*(y)+xdim2_advec_cell_kernel4_ydir*ydim2_advec_cell_kernel4_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel4_ydir*(y)+xdim3_advec_cell_kernel4_ydir*ydim3_advec_cell_kernel4_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel4_ydir*(y)+xdim4_advec_cell_kernel4_ydir*ydim4_advec_cell_kernel4_ydir*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel4_ydir*(y)+xdim5_advec_cell_kernel4_ydir*ydim5_advec_cell_kernel4_ydir*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel4_ydir*(y)+xdim6_advec_cell_kernel4_ydir*ydim6_advec_cell_kernel4_ydir*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel4_ydir*(y)+xdim7_advec_cell_kernel4_ydir*ydim7_advec_cell_kernel4_ydir*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_advec_cell_kernel4_ydir*(y)+xdim8_advec_cell_kernel4_ydir*ydim8_advec_cell_kernel4_ydir*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_advec_cell_kernel4_ydir*(y)+xdim9_advec_cell_kernel4_ydir*ydim9_advec_cell_kernel4_ydir*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_advec_cell_kernel4_ydir*(y)+xdim10_advec_cell_kernel4_ydir*ydim10_advec_cell_kernel4_ydir*(z))
//user function
__device__
inline void advec_cell_kernel4_ydir_gpu( double *density1, double *energy1,
const double *mass_flux_y, const double *vol_flux_y,
const double *pre_vol, const double *post_vol,
double *pre_mass, double *post_mass,
double *advec_vol, double *post_ener,
const double *ener_flux) {
pre_mass[OPS_ACC6(0,0,0)] = density1[OPS_ACC0(0,0,0)] * pre_vol[OPS_ACC4(0,0,0)];
post_mass[OPS_ACC7(0,0,0)] = pre_mass[OPS_ACC6(0,0,0)] + mass_flux_y[OPS_ACC2(0,0,0)] - mass_flux_y[OPS_ACC2(0,1,0)];
post_ener[OPS_ACC9(0,0,0)] = ( energy1[OPS_ACC1(0,0,0)] * pre_mass[OPS_ACC6(0,0,0)] + ener_flux[OPS_ACC10(0,0,0)] - ener_flux[OPS_ACC10(0,1,0)])/post_mass[OPS_ACC7(0,0,0)];
advec_vol[OPS_ACC8(0,0,0)] = pre_vol[OPS_ACC4(0,0,0)] + vol_flux_y[OPS_ACC3(0,0,0)] - vol_flux_y[OPS_ACC3(0,1,0)];
density1[OPS_ACC0(0,0,0)] = post_mass[OPS_ACC7(0,0,0)]/advec_vol[OPS_ACC8(0,0,0)];
energy1[OPS_ACC1(0,0,0)] = post_ener[OPS_ACC9(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_advec_cell_kernel4_ydir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir;
arg8 += idx_x * 1*1 + idx_y * 1*1 * xdim8_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir;
arg9 += idx_x * 1*1 + idx_y * 1*1 * xdim9_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir;
arg10 += idx_x * 1*1 + idx_y * 1*1 * xdim10_advec_cell_kernel4_ydir + idx_z * 1*1 * xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel4_ydir_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel4_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
#else
void ops_par_loop_advec_cell_kernel4_ydir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,11,range,116)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(116,"advec_cell_kernel4_ydir");
OPS_kernels[116].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel4_ydir_h || ydim0 != ydim0_advec_cell_kernel4_ydir_h || xdim1 != xdim1_advec_cell_kernel4_ydir_h || ydim1 != ydim1_advec_cell_kernel4_ydir_h || xdim2 != xdim2_advec_cell_kernel4_ydir_h || ydim2 != ydim2_advec_cell_kernel4_ydir_h || xdim3 != xdim3_advec_cell_kernel4_ydir_h || ydim3 != ydim3_advec_cell_kernel4_ydir_h || xdim4 != xdim4_advec_cell_kernel4_ydir_h || ydim4 != ydim4_advec_cell_kernel4_ydir_h || xdim5 != xdim5_advec_cell_kernel4_ydir_h || ydim5 != ydim5_advec_cell_kernel4_ydir_h || xdim6 != xdim6_advec_cell_kernel4_ydir_h || ydim6 != ydim6_advec_cell_kernel4_ydir_h || xdim7 != xdim7_advec_cell_kernel4_ydir_h || ydim7 != ydim7_advec_cell_kernel4_ydir_h || xdim8 != xdim8_advec_cell_kernel4_ydir_h || ydim8 != ydim8_advec_cell_kernel4_ydir_h || xdim9 != xdim9_advec_cell_kernel4_ydir_h || ydim9 != ydim9_advec_cell_kernel4_ydir_h || xdim10 != xdim10_advec_cell_kernel4_ydir_h || ydim10 != ydim10_advec_cell_kernel4_ydir_h) {
cudaMemcpyToSymbol( xdim0_advec_cell_kernel4_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel4_ydir_h = xdim0;
cudaMemcpyToSymbol( ydim0_advec_cell_kernel4_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel4_ydir_h = ydim0;
cudaMemcpyToSymbol( xdim1_advec_cell_kernel4_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel4_ydir_h = xdim1;
cudaMemcpyToSymbol( ydim1_advec_cell_kernel4_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel4_ydir_h = ydim1;
cudaMemcpyToSymbol( xdim2_advec_cell_kernel4_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel4_ydir_h = xdim2;
cudaMemcpyToSymbol( ydim2_advec_cell_kernel4_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel4_ydir_h = ydim2;
cudaMemcpyToSymbol( xdim3_advec_cell_kernel4_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel4_ydir_h = xdim3;
cudaMemcpyToSymbol( ydim3_advec_cell_kernel4_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel4_ydir_h = ydim3;
cudaMemcpyToSymbol( xdim4_advec_cell_kernel4_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel4_ydir_h = xdim4;
cudaMemcpyToSymbol( ydim4_advec_cell_kernel4_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel4_ydir_h = ydim4;
cudaMemcpyToSymbol( xdim5_advec_cell_kernel4_ydir, &xdim5, sizeof(int) );
xdim5_advec_cell_kernel4_ydir_h = xdim5;
cudaMemcpyToSymbol( ydim5_advec_cell_kernel4_ydir, &ydim5, sizeof(int) );
ydim5_advec_cell_kernel4_ydir_h = ydim5;
cudaMemcpyToSymbol( xdim6_advec_cell_kernel4_ydir, &xdim6, sizeof(int) );
xdim6_advec_cell_kernel4_ydir_h = xdim6;
cudaMemcpyToSymbol( ydim6_advec_cell_kernel4_ydir, &ydim6, sizeof(int) );
ydim6_advec_cell_kernel4_ydir_h = ydim6;
cudaMemcpyToSymbol( xdim7_advec_cell_kernel4_ydir, &xdim7, sizeof(int) );
xdim7_advec_cell_kernel4_ydir_h = xdim7;
cudaMemcpyToSymbol( ydim7_advec_cell_kernel4_ydir, &ydim7, sizeof(int) );
ydim7_advec_cell_kernel4_ydir_h = ydim7;
cudaMemcpyToSymbol( xdim8_advec_cell_kernel4_ydir, &xdim8, sizeof(int) );
xdim8_advec_cell_kernel4_ydir_h = xdim8;
cudaMemcpyToSymbol( ydim8_advec_cell_kernel4_ydir, &ydim8, sizeof(int) );
ydim8_advec_cell_kernel4_ydir_h = ydim8;
cudaMemcpyToSymbol( xdim9_advec_cell_kernel4_ydir, &xdim9, sizeof(int) );
xdim9_advec_cell_kernel4_ydir_h = xdim9;
cudaMemcpyToSymbol( ydim9_advec_cell_kernel4_ydir, &ydim9, sizeof(int) );
ydim9_advec_cell_kernel4_ydir_h = ydim9;
cudaMemcpyToSymbol( xdim10_advec_cell_kernel4_ydir, &xdim10, sizeof(int) );
xdim10_advec_cell_kernel4_ydir_h = xdim10;
cudaMemcpyToSymbol( ydim10_advec_cell_kernel4_ydir, &ydim10, sizeof(int) );
ydim10_advec_cell_kernel4_ydir_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[11];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[116].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_cell_kernel4_ydir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[116].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[116].mpi_time += t2-t1;
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[116].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel4_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 116;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 116;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 11;
desc->args = (ops_arg*)malloc(11*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->function = ops_par_loop_advec_cell_kernel4_ydir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(116,"advec_cell_kernel4_ydir");
}
ops_enqueue_kernel(desc);
}
#endif
|
94fd9ff76e5795dc0e59fc9ee36659b50a4c0120.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d3r-64x16-2-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 37
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 4;
const AN5D_TYPE __side3Len = 52;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.25000f * A[t%2][i][j][k]
+ 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2]
+ 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1]
+ 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3]
+ 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k]
+ 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k]
+ 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k]
+ 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k]
+ 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k]
+ 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
94fd9ff76e5795dc0e59fc9ee36659b50a4c0120.cu
|
#include <assert.h>
#include <stdio.h>
#include "star3d3r-64x16-2-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 37
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 4;
const AN5D_TYPE __side3Len = 52;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.25000f * A[t%2][i][j][k]
+ 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2]
+ 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1]
+ 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3]
+ 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k]
+ 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k]
+ 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k]
+ 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k]
+ 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k]
+ 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
5607f6775d6eb6713c524c27ea7bd7e6183ee96b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <cmath>
#include "multiply.h"
bool isSquare(int num){ return (floor (sqrt(num)) == sqrt(num));}
int main (int argc, char* argv[]){
//variables
int matDim, blockDim, threadDim;
// get inputs
if (argc < 4){
std::cout << "Not enough arguments. <<matrix dimension>> << block dimension>> << thread dimension>>" << std::endl;
return 1;
}
else{
matDim = atoi (argv [1]);
blockDim = atoi(argv [2]);
threadDim = atoi(argv [3]);
}
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
// bounds checking
/*if ( matDim <=0 || matDim >= 32000){
std::cout << "Matrix dimension not valid. Must be between 0 and 32000." << std::endl;
return 1;
}*/
if ( blockDim <=0 || blockDim >= 25000 ){
std::cout << "Block dimension not valid. Must be between 0 and 25000." << std::endl;
return 1;
}
if ( threadDim <=0 || threadDim > sqrt(prop.maxThreadsPerBlock) ){
std::cout << "Thread dimension not valid. Must be between 0 and " << sqrt(prop.maxThreadsPerBlock) << "." << std::endl;
return 1;
}
/*if ( blockDim * threadDim != matDim){
std::cout << "Not enough/too many blocks and threads for given matrix dimensions" << std::endl;
return 1;
}*/
// initalize more varaibles
dim3 grid (blockDim, blockDim);
dim3 block (threadDim , threadDim );
//create arrays
float *MatA, *MatB, *MatC;
//alloc memory
hipMallocManaged( (void**)&MatA, (float)pow(matDim, 2) * sizeof(float) );
hipMallocManaged( (void**)&MatB, (float)pow(matDim, 2) * sizeof(float) );
hipMallocManaged( (void**)&MatC, (float)pow(matDim, 2) * sizeof(float) );
for (int i=0; i < (int)pow(matDim, 2); i++) {
MatA[i] = (float) i;
MatB[i] = (float) i;
}
// begin timing
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord( start, 0 );
//multiply
hipLaunchKernelGGL(( multiply) , dim3(grid), dim3(block), 0, 0, MatA, MatB, MatC, matDim);
//end time
hipEventRecord( end, 0 );
hipEventSynchronize( end );
//for testing output
for (int i = 0; i < matDim; i++){
for (int j = 0; j < matDim; j++){
printf ("%.2f \t", MatC[(i*matDim)+j]);
//std::cout << MatC[(i*matDim)+j] << "\t";
}
std::cout << std::endl;
}
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, end );
std::cout << "Time: " << elapsedTime << " ms." << std::endl;
//dealloc memory
hipEventDestroy( start );
hipEventDestroy( end );
hipFree (MatA);
hipFree (MatB);
hipFree (MatC);
}
|
5607f6775d6eb6713c524c27ea7bd7e6183ee96b.cu
|
#include <iostream>
#include <stdio.h>
#include <cmath>
#include "multiply.h"
bool isSquare(int num){ return (floor (sqrt(num)) == sqrt(num));}
int main (int argc, char* argv[]){
//variables
int matDim, blockDim, threadDim;
// get inputs
if (argc < 4){
std::cout << "Not enough arguments. <<matrix dimension>> << block dimension>> << thread dimension>>" << std::endl;
return 1;
}
else{
matDim = atoi (argv [1]);
blockDim = atoi(argv [2]);
threadDim = atoi(argv [3]);
}
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
// bounds checking
/*if ( matDim <=0 || matDim >= 32000){
std::cout << "Matrix dimension not valid. Must be between 0 and 32000." << std::endl;
return 1;
}*/
if ( blockDim <=0 || blockDim >= 25000 ){
std::cout << "Block dimension not valid. Must be between 0 and 25000." << std::endl;
return 1;
}
if ( threadDim <=0 || threadDim > sqrt(prop.maxThreadsPerBlock) ){
std::cout << "Thread dimension not valid. Must be between 0 and " << sqrt(prop.maxThreadsPerBlock) << "." << std::endl;
return 1;
}
/*if ( blockDim * threadDim != matDim){
std::cout << "Not enough/too many blocks and threads for given matrix dimensions" << std::endl;
return 1;
}*/
// initalize more varaibles
dim3 grid (blockDim, blockDim);
dim3 block (threadDim , threadDim );
//create arrays
float *MatA, *MatB, *MatC;
//alloc memory
cudaMallocManaged( (void**)&MatA, (float)pow(matDim, 2) * sizeof(float) );
cudaMallocManaged( (void**)&MatB, (float)pow(matDim, 2) * sizeof(float) );
cudaMallocManaged( (void**)&MatC, (float)pow(matDim, 2) * sizeof(float) );
for (int i=0; i < (int)pow(matDim, 2); i++) {
MatA[i] = (float) i;
MatB[i] = (float) i;
}
// begin timing
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord( start, 0 );
//multiply
multiply <<<grid, block>>> (MatA, MatB, MatC, matDim);
//end time
cudaEventRecord( end, 0 );
cudaEventSynchronize( end );
//for testing output
for (int i = 0; i < matDim; i++){
for (int j = 0; j < matDim; j++){
printf ("%.2f \t", MatC[(i*matDim)+j]);
//std::cout << MatC[(i*matDim)+j] << "\t";
}
std::cout << std::endl;
}
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, end );
std::cout << "Time: " << elapsedTime << " ms." << std::endl;
//dealloc memory
cudaEventDestroy( start );
cudaEventDestroy( end );
cudaFree (MatA);
cudaFree (MatB);
cudaFree (MatC);
}
|
d5f9be451a84a7d6072cce8813a85fa425b8bd5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_b1;
int xdim0_update_halo_kernel1_b1_h = -1;
__constant__ int xdim1_update_halo_kernel1_b1;
int xdim1_update_halo_kernel1_b1_h = -1;
__constant__ int xdim2_update_halo_kernel1_b1;
int xdim2_update_halo_kernel1_b1_h = -1;
__constant__ int xdim3_update_halo_kernel1_b1;
int xdim3_update_halo_kernel1_b1_h = -1;
__constant__ int xdim4_update_halo_kernel1_b1;
int xdim4_update_halo_kernel1_b1_h = -1;
__constant__ int xdim5_update_halo_kernel1_b1;
int xdim5_update_halo_kernel1_b1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_b1*(y))
#define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_b1*(y))
#define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_b1*(y))
#define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_b1*(y))
#define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_b1*(y))
#define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_b1*(y))
//user function
__device__
inline void update_halo_kernel1_b1_gpu(double *density0,
double *energy0, double *energy1,
double *u, double *p,
double *sd , const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,1)];
if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,1)];
if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,1)];
if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_update_halo_kernel1_b1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_b1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_b1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_b1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_b1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_b1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_b1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_b1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_b1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,50)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(50,"update_halo_kernel1_b1");
OPS_kernels[50].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_b1_h || xdim1 != xdim1_update_halo_kernel1_b1_h || xdim2 != xdim2_update_halo_kernel1_b1_h || xdim3 != xdim3_update_halo_kernel1_b1_h || xdim4 != xdim4_update_halo_kernel1_b1_h || xdim5 != xdim5_update_halo_kernel1_b1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_b1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_b1_h = xdim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_b1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_b1_h = xdim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_b1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_b1_h = xdim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_b1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_b1_h = xdim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_b1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_b1_h = xdim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_b1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_b1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[50].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_b1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[50].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[50].mpi_time += t2-t1;
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 50;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 50;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_b1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(50,"update_halo_kernel1_b1");
}
ops_enqueue_kernel(desc);
}
#endif
|
d5f9be451a84a7d6072cce8813a85fa425b8bd5a.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_b1;
int xdim0_update_halo_kernel1_b1_h = -1;
__constant__ int xdim1_update_halo_kernel1_b1;
int xdim1_update_halo_kernel1_b1_h = -1;
__constant__ int xdim2_update_halo_kernel1_b1;
int xdim2_update_halo_kernel1_b1_h = -1;
__constant__ int xdim3_update_halo_kernel1_b1;
int xdim3_update_halo_kernel1_b1_h = -1;
__constant__ int xdim4_update_halo_kernel1_b1;
int xdim4_update_halo_kernel1_b1_h = -1;
__constant__ int xdim5_update_halo_kernel1_b1;
int xdim5_update_halo_kernel1_b1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_b1*(y))
#define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_b1*(y))
#define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_b1*(y))
#define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_b1*(y))
#define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_b1*(y))
#define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_b1*(y))
//user function
__device__
inline void update_halo_kernel1_b1_gpu(double *density0,
double *energy0, double *energy1,
double *u, double *p,
double *sd , const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,1)];
if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,1)];
if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,1)];
if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_update_halo_kernel1_b1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_b1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_b1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_b1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_b1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_b1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_b1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_b1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_b1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,50)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(50,"update_halo_kernel1_b1");
OPS_kernels[50].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_b1_h || xdim1 != xdim1_update_halo_kernel1_b1_h || xdim2 != xdim2_update_halo_kernel1_b1_h || xdim3 != xdim3_update_halo_kernel1_b1_h || xdim4 != xdim4_update_halo_kernel1_b1_h || xdim5 != xdim5_update_halo_kernel1_b1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_b1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_b1_h = xdim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_b1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_b1_h = xdim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_b1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_b1_h = xdim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_b1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_b1_h = xdim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_b1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_b1_h = xdim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_b1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_b1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[50].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_update_halo_kernel1_b1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[50].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[50].mpi_time += t2-t1;
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[50].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 50;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 50;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_b1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(50,"update_halo_kernel1_b1");
}
ops_enqueue_kernel(desc);
}
#endif
|
2eb57f98d9e0d66d05cfb5c94107706999e0249b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include <mpi.h>
extern "C" {
// #include <float/float32.h>
// #include <float/slapack.h>
#include <Rinternals.h>
#include <stdlib.h>
#include "../common.h"
#include "../mpi_utils.h"
#include "../nm.h"
}
#include "blas.hh"
#include "cu_utils.hh"
typedef struct {
hipblasHandle_t handle;
int m;
int n;
const double *__restrict__ x;
const int *__restrict__ y;
double *__restrict__ w;
double *__restrict__ work;
double *__restrict__ s;
MPI_Comm *__restrict__ comm;
} svm_param_t;
static inline double euc_norm_sq(hipblasHandle_t handle, const int n, const double *const __restrict__ x)
{
double norm;
hipblasStatus_t ret = hipblasDnrm2(handle, n, x, 1, &norm);
return norm;
}
__global__ static void hinge_loss_sum(double *s, const int m, const int *const __restrict__ y, const double *const __restrict__ work)
{
int tid = threadIdx.x;
int i = tid + blockIdx.x*blockDim.x;
if (i >= m)
return;
__shared__ double temp[TPB];
double tmp = 1.0 - y[i]*work[i];
if (tmp < 0.0)
temp[tid] = 0.0;
else
temp[tid] = tmp;
__syncthreads();
if (tid == 0)
{
double sum = 0.0;
for (int i=0; i<TPB; i++)
sum += temp[i];
atomicAdd(s, sum);
}
}
static inline double svm_cost(hipblasHandle_t handle,
const int m, const int n, const double *const __restrict__ x,
const int *const __restrict__ y, const double *const __restrict__ w,
double *const __restrict__ s, double *const __restrict__ work,
const MPI_Comm *const __restrict__ comm)
{
int check;
double J;
double norm;
double s_cpu;
int nb = m / TPB;
if (m % TPB)
nb++;
// J_local = 1/m * sum(hinge_loss(1.0 - y * (x %*% w)))
norm = euc_norm_sq(handle, n, w);
mvm(handle, m, n, x, w, work);
hipMemset(s, 0, 1*sizeof(*s));
hipLaunchKernelGGL(( hinge_loss_sum), dim3(nb), dim3(TPB), 0, 0, s, m, y, work);
hipMemcpy(&s_cpu, s, sizeof(*s), hipMemcpyDeviceToHost);
J = ((double) 1.0/m) * s_cpu;
// J = allreduce(J_local) + 1/m * 0.5 * norm2(w)
check = MPI_Allreduce(MPI_IN_PLACE, &J, 1, MPI_DOUBLE, MPI_SUM, *comm);
MPI_CHECK(comm, check);
J += ((double) 1.0/m) * 0.5 * norm;
return J;
}
static inline void svm_nmwrap(int n, point_t *point, const void *arg)
{
const svm_param_t *args = (const svm_param_t*) arg;
hipMemcpy(args->w, point->x, n*sizeof(double), hipMemcpyHostToDevice);
point->fx = svm_cost(args->handle, args->m, n, args->x, args->y, args->w, args->s, args->work, args->comm);
hipMemcpy(point->x, args->w, n*sizeof(double), hipMemcpyDeviceToHost);
}
static inline void svm(const int m, const int n, const double *const __restrict__ x,
const int *const __restrict__ y, double *const __restrict__ w, MPI_Comm *const __restrict__ comm,
optimset_t *const __restrict__ optimset)
{
svm_param_t args;
point_t start, solution;
hipblasHandle_t handle;
hipblasStatus_t st = hipblasCreate(&handle);
if (st != HIPBLAS_STATUS_SUCCESS)
error("hipblasCreate() failed\n");
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
double *x_gpu;
int *y_gpu;
double *w_gpu;
double *work_gpu;
double *s_gpu;
hipMalloc(&x_gpu, m*n*sizeof(*x_gpu));
hipMalloc(&y_gpu, m*sizeof(*y_gpu));
hipMalloc(&w_gpu, n*sizeof(*w_gpu));
hipMalloc(&work_gpu, m*sizeof(*work_gpu));
hipMalloc(&s_gpu, sizeof(*s_gpu));
if (x_gpu == NULL || y_gpu == NULL || w_gpu == NULL || work_gpu == NULL || s_gpu == NULL)
{
CUFREE(x_gpu);
CUFREE(y_gpu);
CUFREE(w_gpu);
CUFREE(work_gpu);
CUFREE(s_gpu);
error("Unable to allocate device memory");
}
hipMemcpy(x_gpu, x, m*n*sizeof(*x), hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, m*sizeof(*y), hipMemcpyHostToDevice);
start.x = w;
memset(w, 0, n*sizeof(*w));
args.handle = handle;
args.m = m;
args.n = n;
args.x = x_gpu;
args.y = y_gpu;
args.w = w_gpu;
args.s = s_gpu;
args.work = work_gpu;
args.comm = comm;
nelder_mead(n, &start, &solution, &svm_nmwrap, &args, optimset);
for (int i=0; i<n; i++)
w[i] = solution.x[i];
hipblasDestroy(handle);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(w_gpu);
hipFree(work_gpu);
free(solution.x);
}
extern "C" SEXP R_svm(SEXP x, SEXP y, SEXP maxiter, SEXP comm_)
{
SEXP ret, ret_names, w, niters;
optimset_t opts;
MPI_Comm *comm = get_mpi_comm_from_Robj(comm_);
const int m = nrows(x);
const int n = ncols(x);
PROTECT(ret = allocVector(VECSXP, 2));
PROTECT(ret_names = allocVector(STRSXP, 2));
PROTECT(w = allocVector(REALSXP, n));
PROTECT(niters = allocVector(INTSXP, 1));
SET_VECTOR_ELT(ret, 0, w);
SET_VECTOR_ELT(ret, 1, niters);
SET_STRING_ELT(ret_names, 0, mkChar("w"));
SET_STRING_ELT(ret_names, 1, mkChar("niters"));
setAttrib(ret, R_NamesSymbol, ret_names);
set_nm_opts(INTEGER(maxiter)[0], &opts);
svm(m, n, REAL(x), INTEGER(y), REAL(w), comm, &opts);
UNPROTECT(4);
return ret;
}
|
2eb57f98d9e0d66d05cfb5c94107706999e0249b.cu
|
#include <cublas_v2.h>
#include <mpi.h>
extern "C" {
// #include <float/float32.h>
// #include <float/slapack.h>
#include <Rinternals.h>
#include <stdlib.h>
#include "../common.h"
#include "../mpi_utils.h"
#include "../nm.h"
}
#include "blas.hh"
#include "cu_utils.hh"
typedef struct {
cublasHandle_t handle;
int m;
int n;
const double *__restrict__ x;
const int *__restrict__ y;
double *__restrict__ w;
double *__restrict__ work;
double *__restrict__ s;
MPI_Comm *__restrict__ comm;
} svm_param_t;
static inline double euc_norm_sq(cublasHandle_t handle, const int n, const double *const __restrict__ x)
{
double norm;
cublasStatus_t ret = cublasDnrm2(handle, n, x, 1, &norm);
return norm;
}
__global__ static void hinge_loss_sum(double *s, const int m, const int *const __restrict__ y, const double *const __restrict__ work)
{
int tid = threadIdx.x;
int i = tid + blockIdx.x*blockDim.x;
if (i >= m)
return;
__shared__ double temp[TPB];
double tmp = 1.0 - y[i]*work[i];
if (tmp < 0.0)
temp[tid] = 0.0;
else
temp[tid] = tmp;
__syncthreads();
if (tid == 0)
{
double sum = 0.0;
for (int i=0; i<TPB; i++)
sum += temp[i];
atomicAdd(s, sum);
}
}
static inline double svm_cost(cublasHandle_t handle,
const int m, const int n, const double *const __restrict__ x,
const int *const __restrict__ y, const double *const __restrict__ w,
double *const __restrict__ s, double *const __restrict__ work,
const MPI_Comm *const __restrict__ comm)
{
int check;
double J;
double norm;
double s_cpu;
int nb = m / TPB;
if (m % TPB)
nb++;
// J_local = 1/m * sum(hinge_loss(1.0 - y * (x %*% w)))
norm = euc_norm_sq(handle, n, w);
mvm(handle, m, n, x, w, work);
cudaMemset(s, 0, 1*sizeof(*s));
hinge_loss_sum<<<nb, TPB>>>(s, m, y, work);
cudaMemcpy(&s_cpu, s, sizeof(*s), cudaMemcpyDeviceToHost);
J = ((double) 1.0/m) * s_cpu;
// J = allreduce(J_local) + 1/m * 0.5 * norm2(w)
check = MPI_Allreduce(MPI_IN_PLACE, &J, 1, MPI_DOUBLE, MPI_SUM, *comm);
MPI_CHECK(comm, check);
J += ((double) 1.0/m) * 0.5 * norm;
return J;
}
static inline void svm_nmwrap(int n, point_t *point, const void *arg)
{
const svm_param_t *args = (const svm_param_t*) arg;
cudaMemcpy(args->w, point->x, n*sizeof(double), cudaMemcpyHostToDevice);
point->fx = svm_cost(args->handle, args->m, n, args->x, args->y, args->w, args->s, args->work, args->comm);
cudaMemcpy(point->x, args->w, n*sizeof(double), cudaMemcpyDeviceToHost);
}
static inline void svm(const int m, const int n, const double *const __restrict__ x,
const int *const __restrict__ y, double *const __restrict__ w, MPI_Comm *const __restrict__ comm,
optimset_t *const __restrict__ optimset)
{
svm_param_t args;
point_t start, solution;
cublasHandle_t handle;
cublasStatus_t st = cublasCreate_v2(&handle);
if (st != CUBLAS_STATUS_SUCCESS)
error("cublasCreate() failed\n");
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
double *x_gpu;
int *y_gpu;
double *w_gpu;
double *work_gpu;
double *s_gpu;
cudaMalloc(&x_gpu, m*n*sizeof(*x_gpu));
cudaMalloc(&y_gpu, m*sizeof(*y_gpu));
cudaMalloc(&w_gpu, n*sizeof(*w_gpu));
cudaMalloc(&work_gpu, m*sizeof(*work_gpu));
cudaMalloc(&s_gpu, sizeof(*s_gpu));
if (x_gpu == NULL || y_gpu == NULL || w_gpu == NULL || work_gpu == NULL || s_gpu == NULL)
{
CUFREE(x_gpu);
CUFREE(y_gpu);
CUFREE(w_gpu);
CUFREE(work_gpu);
CUFREE(s_gpu);
error("Unable to allocate device memory");
}
cudaMemcpy(x_gpu, x, m*n*sizeof(*x), cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, m*sizeof(*y), cudaMemcpyHostToDevice);
start.x = w;
memset(w, 0, n*sizeof(*w));
args.handle = handle;
args.m = m;
args.n = n;
args.x = x_gpu;
args.y = y_gpu;
args.w = w_gpu;
args.s = s_gpu;
args.work = work_gpu;
args.comm = comm;
nelder_mead(n, &start, &solution, &svm_nmwrap, &args, optimset);
for (int i=0; i<n; i++)
w[i] = solution.x[i];
cublasDestroy_v2(handle);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(w_gpu);
cudaFree(work_gpu);
free(solution.x);
}
extern "C" SEXP R_svm(SEXP x, SEXP y, SEXP maxiter, SEXP comm_)
{
SEXP ret, ret_names, w, niters;
optimset_t opts;
MPI_Comm *comm = get_mpi_comm_from_Robj(comm_);
const int m = nrows(x);
const int n = ncols(x);
PROTECT(ret = allocVector(VECSXP, 2));
PROTECT(ret_names = allocVector(STRSXP, 2));
PROTECT(w = allocVector(REALSXP, n));
PROTECT(niters = allocVector(INTSXP, 1));
SET_VECTOR_ELT(ret, 0, w);
SET_VECTOR_ELT(ret, 1, niters);
SET_STRING_ELT(ret_names, 0, mkChar("w"));
SET_STRING_ELT(ret_names, 1, mkChar("niters"));
setAttrib(ret, R_NamesSymbol, ret_names);
set_nm_opts(INTEGER(maxiter)[0], &opts);
svm(m, n, REAL(x), INTEGER(y), REAL(w), comm, &opts);
UNPROTECT(4);
return ret;
}
|
9677200f6767a6c6e2be6fe714899355fbc86fa6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/common_shape.h"
namespace phi {
namespace sparse {
template <typename T>
inline __device__ bool DevIsZero(const T* data, const int64_t cols) {
const T zero = static_cast<T>(0);
// TODO(zhangkaihuo): check the data is zero or not in parallen when cols > 1
for (int64_t i = 0; i < cols; i++) {
if (data[i] != zero) {
return false;
}
}
return true;
}
template <typename T>
__global__ void GetNonZeroNums(const T* dense_data,
const int rows,
const int cols,
int* non_zero_num,
int* temp_indexs) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int counter;
if (threadIdx.x == 0) counter = 0;
__syncthreads();
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
int index = -1;
// TODO(zhangkaihuo): when cols=1, vectorization can be used
if (!DevIsZero(dense_data + i * cols, cols)) {
// use reductions?
atomicAdd(&counter, 1);
index = i;
}
temp_indexs[i] = index;
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(non_zero_num, counter);
}
}
template <typename T>
__global__ void GetNonZeroElementsAndIndices(const T* dense_data,
const int64_t sparse_dim,
const int64_t cols,
const int64_t* x_dims,
const int non_zero_num,
const int* indexs,
int64_t* indices,
T* sparse_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t sparse_index = indexs[i];
int64_t x_index = sparse_index;
for (int64_t j = sparse_dim - 1; j >= 0; j--) {
indices[j * non_zero_num + i] = sparse_index % x_dims[j];
sparse_index /= x_dims[j];
}
for (int j = 0; j < cols; j++) {
sparse_data[i * cols + j] = dense_data[x_index * cols + j];
}
}
}
template <typename T, typename Context>
void DenseToSparseCooKernel(const Context& dev_ctx,
const DenseTensor& x,
const int64_t sparse_dim,
SparseCooTensor* out) {
const T* x_data = x.data<T>();
const auto& x_dims = x.dims();
PADDLE_ENFORCE_LE(sparse_dim,
x_dims.size(),
phi::errors::InvalidArgument(
"sparse_dim must be less than the size of x.dims()"));
PADDLE_ENFORCE_GT(
sparse_dim, 0, phi::errors::InvalidArgument("sparse_dim must be >0"));
auto dims_2d = flatten_to_2d(x_dims, sparse_dim);
const int rows = dims_2d[0];
const int cols = dims_2d[1];
DenseTensor nums = phi::Empty<int32_t>(dev_ctx, {1});
DenseTensor d_x_dims = phi::Empty<int64_t>(dev_ctx, {x_dims.size()});
// 1. get numbers of non zero elements, and get the index of non zero elements
int* nums_ptr = nums.data<int>();
phi::backends::gpu::GpuMemsetAsync(
nums_ptr, 0, sizeof(int), dev_ctx.stream());
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rows, 1);
DenseTensor temp_indexs = phi::Empty<int32_t>(dev_ctx, {rows});
int* temp_indexs_ptr = temp_indexs.data<int>();
hipLaunchKernelGGL(( GetNonZeroNums), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
x_data, rows, cols, nums_ptr, temp_indexs_ptr);
#ifdef PADDLE_WITH_HIP
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#endif
temp_indexs_ptr,
temp_indexs_ptr + rows,
-1);
// 2. copy non_zero_num to host, copy x_dims to device
int non_zero_num = 0;
phi::backends::gpu::GpuMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
gpuMemcpyDeviceToHost,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(d_x_dims.data<int64_t>(),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
gpuMemcpyHostToDevice,
dev_ctx.stream());
dev_ctx.Wait(); // wait the copy
const auto values_dims =
phi::funcs::sparse::InferDenseDims(x_dims, sparse_dim, non_zero_num);
phi::DenseTensor indices = phi::Empty<int64_t>(
dev_ctx, {sparse_dim, static_cast<int64_t>(non_zero_num)});
int64_t* indices_data = indices.data<int64_t>();
phi::DenseTensor values;
values.Resize(values_dims);
T* sparse_data = dev_ctx.template Alloc<T>(&values);
// 3. calc indices by indexs and get values by indexs
config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
hipLaunchKernelGGL(( GetNonZeroElementsAndIndices), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), x_data,
sparse_dim,
cols,
d_x_dims.data<int64_t>(),
non_zero_num,
temp_indexs_ptr,
indices_data,
sparse_data);
out->SetMember(indices, values, x_dims, true);
}
template <typename IntT>
__global__ void GetBatchSizes(const IntT* crows,
const int rows,
const int batchs,
IntT* batch_sizes) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < batchs) {
batch_sizes[tid] = crows[tid * (rows + 1) + rows];
}
}
template <typename IntT>
__global__ void ConvertCsrCrowsToCooRows(const IntT* crows_ptr,
const IntT* crows_offsets,
IntT* rows_ptr,
IntT* batch_ptr,
const int rows) {
const int b = blockIdx.y;
const int64_t offset = crows_offsets ? crows_offsets[b] : 0;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
for (int j = crows_ptr[b * (rows + 1) + i];
j < crows_ptr[b * (rows + 1) + i + 1];
j++) {
rows_ptr[offset + j] = i;
if (batch_ptr) {
batch_ptr[offset + j] = b;
}
}
}
}
template <typename T, typename IntT>
void SparseCsrToCooGPUKernel(const GPUContext& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
const DDim& x_dims = x.dims();
const int64_t non_zero_num = x.non_zero_cols().numel();
const auto& csr_crows = x.non_zero_crows();
const auto& csr_cols = x.non_zero_cols();
const auto& csr_values = x.non_zero_elements();
const IntT* csr_crows_data = csr_crows.data<IntT>();
const IntT* csr_cols_data = csr_cols.data<IntT>();
const T* csr_values_data = csr_values.data<T>();
int64_t sparse_dim = 2;
if (x_dims.size() == 3) {
sparse_dim = 3;
}
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
DenseTensor indices = phi::Empty<IntT>(dev_ctx, {sparse_dim, non_zero_num});
DenseTensor values = phi::EmptyLike<T, GPUContext>(dev_ctx, csr_values);
DenseTensor offsets = phi::Empty<IntT>(dev_ctx, {batchs});
IntT* coo_indices = indices.data<IntT>();
IntT* batch_ptr = x_dims.size() == 2 ? nullptr : coo_indices;
IntT* coo_rows_data =
x_dims.size() == 2 ? coo_indices : batch_ptr + non_zero_num;
IntT* coo_cols_data = coo_rows_data + non_zero_num;
IntT* offsets_ptr = batchs == 1 ? nullptr : offsets.data<IntT>();
T* coo_values_data = values.data<T>();
if (batchs > 1) {
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, batchs, 1);
hipLaunchKernelGGL(( GetBatchSizes<IntT>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, 0,
csr_crows_data, rows, batchs, offsets_ptr);
#ifdef PADDLE_WITH_HIP
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#endif
offsets_ptr,
offsets_ptr + batchs,
offsets_ptr);
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rows, 1);
config.block_per_grid.y = batchs;
hipLaunchKernelGGL(( ConvertCsrCrowsToCooRows<IntT>)
, dim3(config.block_per_grid), dim3(config.thread_per_block.x), 0, 0,
csr_crows_data, offsets_ptr, coo_rows_data, batch_ptr, rows);
phi::backends::gpu::GpuMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(IntT) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
out->SetMember(indices, values, x_dims, true);
}
template <typename T, typename Context>
void SparseCsrToCooKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_crows().dtype(), "SparseCsrToCooGPUKernel", ([&] {
SparseCsrToCooGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
template <typename IntT>
__global__ void GetBatchsOffset(const IntT* batchs_ptr,
const int batchs,
const int non_zero_num,
int* batchs_offset) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
if (i == non_zero_num - 1 || batchs_ptr[i] != batchs_ptr[i + 1]) {
const int start = batchs_ptr[i];
const int end = i == non_zero_num - 1 ? batchs : batchs_ptr[i + 1];
for (int j = start; j < end; j++) {
batchs_offset[j] = i + 1;
}
}
}
}
template <typename IntT>
__global__ void ConvertCooRowsToCsrCrows(
const int* batchs_offset, // can be null if batchs = 1
const IntT* coo_rows_data,
IntT* csr_crows_data,
const int rows,
const int64_t non_zero_num) {
const int b = blockIdx.y;
int batch_non_zero_num =
batchs_offset == nullptr ? non_zero_num : batchs_offset[b];
IntT batch_start = 0;
if (b > 0) {
batch_start = batchs_offset[b - 1];
batch_non_zero_num -= batch_start;
}
const IntT* coo_rows_ptr = coo_rows_data + batch_start;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < batch_non_zero_num; i += gridDim.x * blockDim.x) {
if (i == 0) {
for (IntT j = 0; j <= coo_rows_ptr[0]; j++) {
csr_crows_data[b * (rows + 1) + j] = 0;
}
} else {
for (IntT j = coo_rows_ptr[i - 1]; j < coo_rows_ptr[i]; j++) {
csr_crows_data[b * (rows + 1) + j + 1] = i;
}
}
if (i == batch_non_zero_num - 1) {
for (IntT i = coo_rows_ptr[batch_non_zero_num - 1] + 1; i < rows + 1;
i++) {
csr_crows_data[b * (rows + 1) + i] = batch_non_zero_num;
}
}
}
if (batch_non_zero_num == 0) {
for (int i = tid; i < rows + 1; i += gridDim.x * blockDim.x) {
csr_crows_data[b * (rows + 1) + i] = 0;
}
}
}
template <typename T, typename IntT>
void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
const auto& x_dims = x.dims();
bool valid = x_dims.size() == 2 || x_dims.size() == 3;
PADDLE_ENFORCE_EQ(valid,
true,
phi::errors::InvalidArgument(
"SparseCsrTensor only support 2-D or 3-D matrix"));
const int64_t non_zero_num = x.nnz();
if (non_zero_num <= 0) return;
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
phi::DenseTensor non_zero_crows =
phi::Empty<IntT>(dev_ctx, {batchs * (rows + 1)});
phi::DenseTensor non_zero_cols = phi::Empty<IntT>(dev_ctx, {non_zero_num});
phi::DenseTensor non_zero_elements =
phi::EmptyLike<T, GPUContext>(dev_ctx, x.non_zero_elements());
IntT* csr_crows_data = non_zero_crows.data<IntT>();
IntT* csr_cols_data = non_zero_cols.data<IntT>();
T* csr_values_data = non_zero_elements.data<T>();
const auto& coo_indices = x.non_zero_indices();
const auto& coo_values = x.non_zero_elements();
const IntT* batchs_ptr = coo_indices.data<IntT>();
const IntT* coo_rows_data =
x_dims.size() == 2 ? batchs_ptr : batchs_ptr + non_zero_num;
const IntT* coo_cols_data = coo_rows_data + non_zero_num;
const T* coo_values_data = coo_values.data<T>();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, batchs, 1);
if (batchs > 1) {
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
phi::DenseTensor batchs_offset = phi::Empty<int>(dev_ctx, {batchs});
int* batchs_offset_ptr = batchs_offset.data<int>();
phi::funcs::SetConstant<GPUContext, int> set_zero;
// set zero if the nnz=0 of batchs[0]
set_zero(dev_ctx, &batchs_offset, static_cast<IntT>(0));
hipLaunchKernelGGL(( GetBatchsOffset<IntT>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
batchs_ptr, batchs, non_zero_num, batchs_offset_ptr);
config.block_per_grid.y = batchs;
hipLaunchKernelGGL(( ConvertCooRowsToCsrCrows<IntT>), dim3(config.block_per_grid),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
batchs_offset_ptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
} else {
hipLaunchKernelGGL(( ConvertCooRowsToCsrCrows<IntT>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
nullptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
}
phi::backends::gpu::GpuMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(IntT) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
out->SetMember(non_zero_crows, non_zero_cols, non_zero_elements, x_dims);
}
template <typename T, typename Context>
void SparseCooToCsrKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "SparseCooToCsrGPUKernel", ([&] {
SparseCooToCsrGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
template <typename ValueT, typename IndicesT>
__global__ void KernelSparseCooToDense(const IndicesT* indices,
const int64_t* sparse_offsets,
const ValueT* data,
ValueT* dense_data,
const IndicesT non_zero_num,
const int64_t base_offset,
const int64_t sparse_dim) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices[j * non_zero_num + i] * sparse_offsets[j];
}
for (int j = 0; j < base_offset; j++) {
dense_data[index * base_offset + j] = data[i * base_offset + j];
}
}
}
template <typename T, typename IntT>
void SparseCooToDenseGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
const auto non_zero_num = x.nnz();
const auto dense_dims = x.dims();
const auto indices = x.non_zero_indices();
const auto values = x.non_zero_elements();
const auto indices_dims = indices.dims();
int64_t sparse_dim = indices_dims[0];
if (indices_dims.size() == 1) {
sparse_dim = 1;
}
const int64_t dense_dim = values.dims().size() - 1;
const auto place = dev_ctx.GetPlace();
const T* x_data = values.data<T>();
*out = phi::Empty(dev_ctx,
phi::DenseTensorMeta(
x.dtype(), x.dims(), x.non_zero_elements().layout()));
T* out_data = out->data<T>();
int64_t base_offset = 1;
for (int64_t i = 0; i < dense_dim; i++) {
base_offset *= dense_dims[sparse_dim + i];
}
std::vector<int64_t> sparse_offsets(sparse_dim);
int64_t offset = 1;
for (int i = sparse_dim - 1; i >= 0; i--) {
sparse_offsets[i] = offset;
offset *= dense_dims[i];
}
DenseTensor d_sparse_offsets = Empty<int64_t>(dev_ctx, {sparse_dim});
phi::backends::gpu::GpuMemcpyAsync(d_sparse_offsets.data<int64_t>(),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
gpuMemcpyHostToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemsetAsync(
out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream());
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
hipLaunchKernelGGL(( KernelSparseCooToDense<T, IntT>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), indices.data<IntT>(),
d_sparse_offsets.data<int64_t>(),
x_data,
out_data,
non_zero_num,
base_offset,
sparse_dim);
}
template <typename T, typename Context>
void SparseCooToDenseKernel(const Context& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "SparseCooToDenseGPUKernel", ([&] {
SparseCooToDenseGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(dense_to_sparse_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(dense_to_sparse_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(coo_values,
GPU,
ALL_LAYOUT,
phi::sparse::CooValuesKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(csr_values,
GPU,
ALL_LAYOUT,
phi::sparse::CsrValuesKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(sparse_coo_tensor,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooTensorKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int16_t,
int,
int64_t) {}
|
9677200f6767a6c6e2be6fe714899355fbc86fa6.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/common_shape.h"
namespace phi {
namespace sparse {
template <typename T>
inline __device__ bool DevIsZero(const T* data, const int64_t cols) {
const T zero = static_cast<T>(0);
// TODO(zhangkaihuo): check the data is zero or not in parallen when cols > 1
for (int64_t i = 0; i < cols; i++) {
if (data[i] != zero) {
return false;
}
}
return true;
}
template <typename T>
__global__ void GetNonZeroNums(const T* dense_data,
const int rows,
const int cols,
int* non_zero_num,
int* temp_indexs) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int counter;
if (threadIdx.x == 0) counter = 0;
__syncthreads();
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
int index = -1;
// TODO(zhangkaihuo): when cols=1, vectorization can be used
if (!DevIsZero(dense_data + i * cols, cols)) {
// use reductions?
atomicAdd(&counter, 1);
index = i;
}
temp_indexs[i] = index;
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(non_zero_num, counter);
}
}
template <typename T>
__global__ void GetNonZeroElementsAndIndices(const T* dense_data,
const int64_t sparse_dim,
const int64_t cols,
const int64_t* x_dims,
const int non_zero_num,
const int* indexs,
int64_t* indices,
T* sparse_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t sparse_index = indexs[i];
int64_t x_index = sparse_index;
for (int64_t j = sparse_dim - 1; j >= 0; j--) {
indices[j * non_zero_num + i] = sparse_index % x_dims[j];
sparse_index /= x_dims[j];
}
for (int j = 0; j < cols; j++) {
sparse_data[i * cols + j] = dense_data[x_index * cols + j];
}
}
}
template <typename T, typename Context>
void DenseToSparseCooKernel(const Context& dev_ctx,
const DenseTensor& x,
const int64_t sparse_dim,
SparseCooTensor* out) {
const T* x_data = x.data<T>();
const auto& x_dims = x.dims();
PADDLE_ENFORCE_LE(sparse_dim,
x_dims.size(),
phi::errors::InvalidArgument(
"sparse_dim must be less than the size of x.dims()"));
PADDLE_ENFORCE_GT(
sparse_dim, 0, phi::errors::InvalidArgument("sparse_dim must be >0"));
auto dims_2d = flatten_to_2d(x_dims, sparse_dim);
const int rows = dims_2d[0];
const int cols = dims_2d[1];
DenseTensor nums = phi::Empty<int32_t>(dev_ctx, {1});
DenseTensor d_x_dims = phi::Empty<int64_t>(dev_ctx, {x_dims.size()});
// 1. get numbers of non zero elements, and get the index of non zero elements
int* nums_ptr = nums.data<int>();
phi::backends::gpu::GpuMemsetAsync(
nums_ptr, 0, sizeof(int), dev_ctx.stream());
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rows, 1);
DenseTensor temp_indexs = phi::Empty<int32_t>(dev_ctx, {rows});
int* temp_indexs_ptr = temp_indexs.data<int>();
GetNonZeroNums<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
x_data, rows, cols, nums_ptr, temp_indexs_ptr);
#ifdef PADDLE_WITH_HIP
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::remove(thrust::cuda::par.on(dev_ctx.stream()),
#endif
temp_indexs_ptr,
temp_indexs_ptr + rows,
-1);
// 2. copy non_zero_num to host, copy x_dims to device
int non_zero_num = 0;
phi::backends::gpu::GpuMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
gpuMemcpyDeviceToHost,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(d_x_dims.data<int64_t>(),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
gpuMemcpyHostToDevice,
dev_ctx.stream());
dev_ctx.Wait(); // wait the copy
const auto values_dims =
phi::funcs::sparse::InferDenseDims(x_dims, sparse_dim, non_zero_num);
phi::DenseTensor indices = phi::Empty<int64_t>(
dev_ctx, {sparse_dim, static_cast<int64_t>(non_zero_num)});
int64_t* indices_data = indices.data<int64_t>();
phi::DenseTensor values;
values.Resize(values_dims);
T* sparse_data = dev_ctx.template Alloc<T>(&values);
// 3. calc indices by indexs and get values by indexs
config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
GetNonZeroElementsAndIndices<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(x_data,
sparse_dim,
cols,
d_x_dims.data<int64_t>(),
non_zero_num,
temp_indexs_ptr,
indices_data,
sparse_data);
out->SetMember(indices, values, x_dims, true);
}
template <typename IntT>
__global__ void GetBatchSizes(const IntT* crows,
const int rows,
const int batchs,
IntT* batch_sizes) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < batchs) {
batch_sizes[tid] = crows[tid * (rows + 1) + rows];
}
}
template <typename IntT>
__global__ void ConvertCsrCrowsToCooRows(const IntT* crows_ptr,
const IntT* crows_offsets,
IntT* rows_ptr,
IntT* batch_ptr,
const int rows) {
const int b = blockIdx.y;
const int64_t offset = crows_offsets ? crows_offsets[b] : 0;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
for (int j = crows_ptr[b * (rows + 1) + i];
j < crows_ptr[b * (rows + 1) + i + 1];
j++) {
rows_ptr[offset + j] = i;
if (batch_ptr) {
batch_ptr[offset + j] = b;
}
}
}
}
template <typename T, typename IntT>
void SparseCsrToCooGPUKernel(const GPUContext& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
const DDim& x_dims = x.dims();
const int64_t non_zero_num = x.non_zero_cols().numel();
const auto& csr_crows = x.non_zero_crows();
const auto& csr_cols = x.non_zero_cols();
const auto& csr_values = x.non_zero_elements();
const IntT* csr_crows_data = csr_crows.data<IntT>();
const IntT* csr_cols_data = csr_cols.data<IntT>();
const T* csr_values_data = csr_values.data<T>();
int64_t sparse_dim = 2;
if (x_dims.size() == 3) {
sparse_dim = 3;
}
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
DenseTensor indices = phi::Empty<IntT>(dev_ctx, {sparse_dim, non_zero_num});
DenseTensor values = phi::EmptyLike<T, GPUContext>(dev_ctx, csr_values);
DenseTensor offsets = phi::Empty<IntT>(dev_ctx, {batchs});
IntT* coo_indices = indices.data<IntT>();
IntT* batch_ptr = x_dims.size() == 2 ? nullptr : coo_indices;
IntT* coo_rows_data =
x_dims.size() == 2 ? coo_indices : batch_ptr + non_zero_num;
IntT* coo_cols_data = coo_rows_data + non_zero_num;
IntT* offsets_ptr = batchs == 1 ? nullptr : offsets.data<IntT>();
T* coo_values_data = values.data<T>();
if (batchs > 1) {
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, batchs, 1);
GetBatchSizes<IntT><<<config.block_per_grid.x, config.thread_per_block.x>>>(
csr_crows_data, rows, batchs, offsets_ptr);
#ifdef PADDLE_WITH_HIP
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::exclusive_scan(thrust::cuda::par.on(dev_ctx.stream()),
#endif
offsets_ptr,
offsets_ptr + batchs,
offsets_ptr);
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rows, 1);
config.block_per_grid.y = batchs;
ConvertCsrCrowsToCooRows<IntT>
<<<config.block_per_grid, config.thread_per_block.x>>>(
csr_crows_data, offsets_ptr, coo_rows_data, batch_ptr, rows);
phi::backends::gpu::GpuMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(IntT) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
out->SetMember(indices, values, x_dims, true);
}
template <typename T, typename Context>
void SparseCsrToCooKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_crows().dtype(), "SparseCsrToCooGPUKernel", ([&] {
SparseCsrToCooGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
template <typename IntT>
__global__ void GetBatchsOffset(const IntT* batchs_ptr,
const int batchs,
const int non_zero_num,
int* batchs_offset) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
if (i == non_zero_num - 1 || batchs_ptr[i] != batchs_ptr[i + 1]) {
const int start = batchs_ptr[i];
const int end = i == non_zero_num - 1 ? batchs : batchs_ptr[i + 1];
for (int j = start; j < end; j++) {
batchs_offset[j] = i + 1;
}
}
}
}
template <typename IntT>
__global__ void ConvertCooRowsToCsrCrows(
const int* batchs_offset, // can be null if batchs = 1
const IntT* coo_rows_data,
IntT* csr_crows_data,
const int rows,
const int64_t non_zero_num) {
const int b = blockIdx.y;
int batch_non_zero_num =
batchs_offset == nullptr ? non_zero_num : batchs_offset[b];
IntT batch_start = 0;
if (b > 0) {
batch_start = batchs_offset[b - 1];
batch_non_zero_num -= batch_start;
}
const IntT* coo_rows_ptr = coo_rows_data + batch_start;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < batch_non_zero_num; i += gridDim.x * blockDim.x) {
if (i == 0) {
for (IntT j = 0; j <= coo_rows_ptr[0]; j++) {
csr_crows_data[b * (rows + 1) + j] = 0;
}
} else {
for (IntT j = coo_rows_ptr[i - 1]; j < coo_rows_ptr[i]; j++) {
csr_crows_data[b * (rows + 1) + j + 1] = i;
}
}
if (i == batch_non_zero_num - 1) {
for (IntT i = coo_rows_ptr[batch_non_zero_num - 1] + 1; i < rows + 1;
i++) {
csr_crows_data[b * (rows + 1) + i] = batch_non_zero_num;
}
}
}
if (batch_non_zero_num == 0) {
for (int i = tid; i < rows + 1; i += gridDim.x * blockDim.x) {
csr_crows_data[b * (rows + 1) + i] = 0;
}
}
}
template <typename T, typename IntT>
void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
const auto& x_dims = x.dims();
bool valid = x_dims.size() == 2 || x_dims.size() == 3;
PADDLE_ENFORCE_EQ(valid,
true,
phi::errors::InvalidArgument(
"SparseCsrTensor only support 2-D or 3-D matrix"));
const int64_t non_zero_num = x.nnz();
if (non_zero_num <= 0) return;
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
phi::DenseTensor non_zero_crows =
phi::Empty<IntT>(dev_ctx, {batchs * (rows + 1)});
phi::DenseTensor non_zero_cols = phi::Empty<IntT>(dev_ctx, {non_zero_num});
phi::DenseTensor non_zero_elements =
phi::EmptyLike<T, GPUContext>(dev_ctx, x.non_zero_elements());
IntT* csr_crows_data = non_zero_crows.data<IntT>();
IntT* csr_cols_data = non_zero_cols.data<IntT>();
T* csr_values_data = non_zero_elements.data<T>();
const auto& coo_indices = x.non_zero_indices();
const auto& coo_values = x.non_zero_elements();
const IntT* batchs_ptr = coo_indices.data<IntT>();
const IntT* coo_rows_data =
x_dims.size() == 2 ? batchs_ptr : batchs_ptr + non_zero_num;
const IntT* coo_cols_data = coo_rows_data + non_zero_num;
const T* coo_values_data = coo_values.data<T>();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, batchs, 1);
if (batchs > 1) {
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
phi::DenseTensor batchs_offset = phi::Empty<int>(dev_ctx, {batchs});
int* batchs_offset_ptr = batchs_offset.data<int>();
phi::funcs::SetConstant<GPUContext, int> set_zero;
// set zero if the nnz=0 of batchs[0]
set_zero(dev_ctx, &batchs_offset, static_cast<IntT>(0));
GetBatchsOffset<IntT><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
batchs_ptr, batchs, non_zero_num, batchs_offset_ptr);
config.block_per_grid.y = batchs;
ConvertCooRowsToCsrCrows<IntT><<<config.block_per_grid,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
batchs_offset_ptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
} else {
ConvertCooRowsToCsrCrows<IntT><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
nullptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
}
phi::backends::gpu::GpuMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(IntT) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
gpuMemcpyDeviceToDevice,
dev_ctx.stream());
out->SetMember(non_zero_crows, non_zero_cols, non_zero_elements, x_dims);
}
template <typename T, typename Context>
void SparseCooToCsrKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "SparseCooToCsrGPUKernel", ([&] {
SparseCooToCsrGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
template <typename ValueT, typename IndicesT>
__global__ void KernelSparseCooToDense(const IndicesT* indices,
const int64_t* sparse_offsets,
const ValueT* data,
ValueT* dense_data,
const IndicesT non_zero_num,
const int64_t base_offset,
const int64_t sparse_dim) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices[j * non_zero_num + i] * sparse_offsets[j];
}
for (int j = 0; j < base_offset; j++) {
dense_data[index * base_offset + j] = data[i * base_offset + j];
}
}
}
template <typename T, typename IntT>
void SparseCooToDenseGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
const auto non_zero_num = x.nnz();
const auto dense_dims = x.dims();
const auto indices = x.non_zero_indices();
const auto values = x.non_zero_elements();
const auto indices_dims = indices.dims();
int64_t sparse_dim = indices_dims[0];
if (indices_dims.size() == 1) {
sparse_dim = 1;
}
const int64_t dense_dim = values.dims().size() - 1;
const auto place = dev_ctx.GetPlace();
const T* x_data = values.data<T>();
*out = phi::Empty(dev_ctx,
phi::DenseTensorMeta(
x.dtype(), x.dims(), x.non_zero_elements().layout()));
T* out_data = out->data<T>();
int64_t base_offset = 1;
for (int64_t i = 0; i < dense_dim; i++) {
base_offset *= dense_dims[sparse_dim + i];
}
std::vector<int64_t> sparse_offsets(sparse_dim);
int64_t offset = 1;
for (int i = sparse_dim - 1; i >= 0; i--) {
sparse_offsets[i] = offset;
offset *= dense_dims[i];
}
DenseTensor d_sparse_offsets = Empty<int64_t>(dev_ctx, {sparse_dim});
phi::backends::gpu::GpuMemcpyAsync(d_sparse_offsets.data<int64_t>(),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
gpuMemcpyHostToDevice,
dev_ctx.stream());
phi::backends::gpu::GpuMemsetAsync(
out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream());
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1);
KernelSparseCooToDense<T, IntT>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(indices.data<IntT>(),
d_sparse_offsets.data<int64_t>(),
x_data,
out_data,
non_zero_num,
base_offset,
sparse_dim);
}
template <typename T, typename Context>
void SparseCooToDenseKernel(const Context& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "SparseCooToDenseGPUKernel", ([&] {
SparseCooToDenseGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(dense_to_sparse_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(dense_to_sparse_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(coo_values,
GPU,
ALL_LAYOUT,
phi::sparse::CooValuesKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(csr_values,
GPU,
ALL_LAYOUT,
phi::sparse::CsrValuesKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(sparse_coo_tensor,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooTensorKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int16_t,
int,
int64_t) {}
|
0f77244c8226331ca3de0f0acfc005f7c61bbb7e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void pooling3dCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW, const int poolingMode,
const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ sd::LongType zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if (zInd >= zLen) return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if (dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD);
if (hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH);
if (wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW);
if (dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD);
if (hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH);
if (wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) {
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) max = val;
}
}
}
z[zOffset] = max;
} break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { // Exclude padding
sd::Unsigned a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
sd::Unsigned b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
sd::Unsigned c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(
a * b * c); // /= sd::math::sd_ceil<double,T>(static_cast<double>(dend - dstart) /
// static_cast<double>(dD)) * sd::math::sd_ceil<double,T>(static_cast<double>(hend - hstart) /
// static_cast<double>(dH)) * sd::math::sd_ceil<double,T>(static_cast<double>(wend - wstart) /
// static_cast<double>(dW)); //Accounts for dilation
} else if (extraParam0 == 1) // Include padding
sum /= kProd;
z[zOffset] = sum;
} break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = sd::math::sd_pow<T, T, T>(sum, (T)1.f / extraParam0);
z[zOffset] = sum;
} break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW, const int poolingMode,
const int extraParam0) {
hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream,
vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD,
const int kH, const int kW, const int sD, const int sH, const int sW, const int pD,
const int pH, const int pW, const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(
input.dataType(), pooling3dCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW,
dD, dH, dW, poolingMode, extraParam0),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
} // namespace ops
} // namespace sd
|
0f77244c8226331ca3de0f0acfc005f7c61bbb7e.cu
|
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void pooling3dCuda(const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW, const int poolingMode,
const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ sd::LongType zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if (zInd >= zLen) return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if (dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD);
if (hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH);
if (wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW);
if (dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD);
if (hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH);
if (wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) {
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) max = val;
}
}
}
z[zOffset] = max;
} break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { // Exclude padding
sd::Unsigned a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
sd::Unsigned b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
sd::Unsigned c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(
a * b * c); // /= sd::math::sd_ceil<double,T>(static_cast<double>(dend - dstart) /
// static_cast<double>(dD)) * sd::math::sd_ceil<double,T>(static_cast<double>(hend - hstart) /
// static_cast<double>(dH)) * sd::math::sd_ceil<double,T>(static_cast<double>(wend - wstart) /
// static_cast<double>(dW)); //Accounts for dilation
} else if (extraParam0 == 1) // Include padding
sum /= kProd;
z[zOffset] = sum;
} break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = sd::math::sd_pow<T, T, T>(sum, (T)1.f / extraParam0);
z[zOffset] = sum;
} break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW, const int poolingMode,
const int extraParam0) {
pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(
vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD,
const int kH, const int kW, const int sD, const int sH, const int sW, const int pD,
const int pH, const int pW, const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(
input.dataType(), pooling3dCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW,
dD, dH, dW, poolingMode, extraParam0),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
} // namespace ops
} // namespace sd
|
9d6b9af7ce1272d2ba8708cf2fc6bcfd5a4acf47.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zlobpcg_residuals.cu, normal z -> c, Tue Aug 30 09:38:42 2016
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// copied from scnrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_clobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
magmaFloatComplex * X,
magmaFloatComplex * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows) {
for( int i=0; i < num_vecs; i++ ) {
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_C_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_scnrm2_kernel(
int m,
magmaFloatComplex * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++) {
magma_caxpy(m, MAGMA_C_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloatComplex_ptr
block of eigenvector approximations
@param[in]
R magmaFloatComplex_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloatComplex_ptr X,
magmaFloatComplex_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
hipLaunchKernelGGL(( magma_clobpcg_res_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
9d6b9af7ce1272d2ba8708cf2fc6bcfd5a4acf47.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zlobpcg_residuals.cu, normal z -> c, Tue Aug 30 09:38:42 2016
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// copied from scnrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_clobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
magmaFloatComplex * X,
magmaFloatComplex * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows) {
for( int i=0; i < num_vecs; i++ ) {
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_C_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_scnrm2_kernel(
int m,
magmaFloatComplex * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++) {
magma_caxpy(m, MAGMA_C_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloatComplex_ptr
block of eigenvector approximations
@param[in]
R magmaFloatComplex_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloatComplex_ptr X,
magmaFloatComplex_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
magma_clobpcg_res_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
aad42b398aa17223c3cc0adc466d656e64aefaf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2016 by Contributors
* \file permutohedral.cu
* \brief
* \author Junyuan Xie
*/
#include "./permutohedral-inl.h"
namespace mxnet {
namespace op {
namespace permutohedral {
template<int key_size>
__global__ void init(CuHashTable<key_size> table,
const int n_elements,
const float *pos,
const float *scale,
Pair *matrix) {
float elevated[key_size+1];
int greedy[key_size+1];
int rank[key_size+1];
float barycentric[key_size+2];
short key[key_size];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n_elements) return;
float sm = 0;
for (int i = key_size; i > 0; i--) {
float cf = pos[(i-1)*n_elements + idx]*scale[i-1];
elevated[i] = sm - i*cf;
sm += cf;
}
elevated[0] = sm;
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
short sum = 0;
for (int i = 0; i <= key_size; i++) {
float v = elevated[i]*(1.0f/(key_size+1));
float up = ceilf(v) * (key_size+1);
float down = floorf(v) * (key_size+1);
if (up - elevated[i] < elevated[i] - down) {
greedy[i] = static_cast<short>(up);
} else {
greedy[i] = static_cast<short>(down);
}
sum += greedy[i];
}
sum /= key_size+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= key_size; i++) {
rank[i] = 0;
for (int j = 0; j <= key_size; j++) {
if (elevated[i] - greedy[i] < elevated[j] - greedy[j] ||
(elevated[i] - greedy[i] == elevated[j] - greedy[j]
&& i > j)) {
rank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] >= key_size + 1 - sum) {
greedy[i] -= key_size+1;
rank[i] += sum - (key_size+1);
} else {
rank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] < -sum) {
greedy[i] += key_size+1;
rank[i] += (key_size+1) + sum;
} else {
rank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= key_size+1; i++) {
barycentric[i] = 0;
}
for (int i = 0; i <= key_size; i++) {
float delta = (elevated[i] - greedy[i]) * (1.0f/(key_size+1));
barycentric[key_size-rank[i]] += delta;
barycentric[key_size+1-rank[i]] -= delta;
}
barycentric[0] += 1.0f + barycentric[key_size+1];
for (int color = 0; color <= key_size; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
for (int i = 0; i < key_size; i++) {
key[i] = greedy[i] + color;
if (rank[i] > key_size-color) key[i] -= (key_size+1);
}
Pair r;
r.index = table.insert(key, idx*(key_size+1)+color);
r.weight = barycentric[color];
matrix[idx*(key_size+1) + color] = r;
}
}
template<int key_size, bool normalize>
__global__ void splat(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
float *data,
float *val,
Pair *matrix) {
const int idx = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_elements) return;
const int color = threadIdx.x;
Pair r = matrix[idx*(key_size+1)+color];
float *dst = val + r.index*val_size;
if (!normalize) {
for (int j = 0; j < val_size; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
} else {
for (int j = 0; j < val_size-1; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
atomicAdd(dst+val_size-1, 1.f*r.weight);
}
}
template<int key_size>
__global__ static void blur(CuHashTable<key_size> table,
const int32_t val_size,
const int32_t color,
float *val,
float *new_val,
Pair *matrix) {
short key[key_size+1];
short np[key_size+1];
short nm[key_size+1];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= table.n_keys_) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
for (int i = 0; i < key_size; i++) {
key[i] = table.keys_[idx*key_size+i];
np[i] = key[i]+1;
nm[i] = key[i]-1;
}
np[color] -= key_size+1;
nm[color] += key_size+1;
int offNp = table.find(np);
int offNm = table.find(nm);
float *valMe = val + val_size*idx;
float *valNp = val + val_size*offNp;
float *valNm = val + val_size*offNm;
float *valOut = new_val + val_size*idx;
for (int i = 0; i < val_size; i++) {
float o = valMe[i];
if (offNp >= 0) o += 0.5f*valNp[i];
if (offNm >= 0) o += 0.5f*valNm[i];
valOut[i] = o;
}
}
template<int key_size, bool normalize, bool save>
__global__ void slice(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
float *val,
float *out,
Pair *matrix,
float *norm) {
const float alpha = 1.0f / (1+powf(2, -key_size-1));
int32_t index[key_size+1];
float weight[key_size+1];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
for (int i = 0; i <= key_size; ++i) {
Pair r = matrix[idx*(key_size+1) + i];
index[i] = r.index;
weight[i] = r.weight;
}
if (!normalize) {
for (int j = 0; j < val_size; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * alpha;
}
} else {
float n = 0.0f;
for (int i = 0; i <= key_size; ++i) {
n += weight[i]*val[index[i]*val_size + val_size - 1];
}
n = 1.0f/n;
for (int j = 0; j < val_size-1; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * n;
}
if (save)
norm[idx] = n;
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_init(const int32_t n_elements, const int32_t val_size,
float *ograd, float *pos, float *data, float *out, float *norm, float *buf) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
for (int i = 0; i < key_size; ++i)
p[i] = pos[i*n_elements + idx];
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
f2[idx24] = vj;
f4[idx24] = deltaj;
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltaj;
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
f2[idx24] = vj;
f4[idx24] = deltan;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltan;
}
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_reduce(const int32_t n_elements, const int32_t val_size,
float *ograd, float *pos, float *data, float *out,
float *norm, float *buf, float *pgrad) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
float pg[key_size];
for (int i = 0; i < key_size; ++i) {
p[i] = pos[i*n_elements + idx];
pg[i] = 0;
}
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
pg[i] += deltaj*f1[idx13] - deltaj*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
pg[i] += deltan*f1[idx13] - deltan*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
for (int i = 0; i < key_size; ++i) {
pgrad[i*n_elements + idx] = pg[i];
}
}
}
template<int key_size>
void CuPermutohedralOp<key_size>::GetTempSpace(const OpContext &ctx, int val_size) {
using namespace mshadow;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 1, uint8_t> tmp =
ctx.requested[kTemp].get_space_typed<gpu, 1, uint8_t>(
Shape1(n_keys_*2*sizeof(int32_t) +
n_keys_*key_size*sizeof(int16_t) +
n_keys_*val_size*sizeof(float) +
n_keys_*val_size*sizeof(float) +
n_keys_*sizeof(Pair)), s);
uint8_t *ptr = tmp.dptr_;
int32_t *entries = (int32_t*)ptr;
entries_ = Tensor<gpu, 1, int32_t>(entries, Shape1(n_keys_*2), s);
ptr += n_keys_*2*sizeof(int32_t);
int16_t *keys = (int16_t*)ptr;
keys_ = Tensor<gpu, 2, int16_t>(keys, Shape2(key_size, n_keys_), s);
ptr += n_keys_*key_size*sizeof(int16_t);
float *vals = (float*)ptr;
vals_ = Tensor<gpu, 2, float>(vals, Shape2(val_size, n_keys_), s);
ptr += n_keys_*val_size*sizeof(float);
float *new_vals = (float*)ptr;
new_vals_ = Tensor<gpu, 2, float>(new_vals, Shape2(val_size, n_keys_), s);
ptr += n_keys_*val_size*sizeof(float);
Pair *matrix = (Pair*)ptr;
matrix_ = Tensor<gpu, 1, Pair>(matrix, Shape1(n_keys_), s);
ptr += n_keys_*sizeof(Pair);
CHECK_EQ(ptr, tmp.dptr_ + tmp.shape_.Size());
}
template<int key_size>
void CuPermutohedralOp<key_size>::Filter(hipStream_t stream, permutohedral::CuHashTable<key_size> table, bool normalize, int val_size,
float *scale, float *data, float *pos, float *out, float *norm) {
using namespace permutohedral;
vals_ = 0;
if (normalize) {
hipLaunchKernelGGL(( splat<key_size, true>), dim3(dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1)), dim3(dim3(key_size+1, lblock_/(key_size+1), 1)), 0, stream,
table, n_elements_, val_size, data, vals_.dptr_, matrix_.dptr_);
} else {
hipLaunchKernelGGL(( splat<key_size, false>), dim3(dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1)), dim3(dim3(key_size+1, lblock_/(key_size+1), 1)), 0, stream,
table, n_elements_, val_size, data, vals_.dptr_, matrix_.dptr_);
}
CHECK_EQ(hipGetLastError(), hipSuccess);
float *pval = vals_.dptr_;
float *pnew_val = new_vals_.dptr_;
for (int j = 0; j <= key_size; ++j) {
hipLaunchKernelGGL(( blur<key_size>), dim3(dim3((n_keys_-1)/lblock_+1, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
table, val_size, j, pval, pnew_val, matrix_.dptr_);
CHECK_EQ(hipGetLastError(), hipSuccess);
std::swap(pval, pnew_val);
}
if (normalize) {
if (norm == NULL) {
hipLaunchKernelGGL(( slice<key_size, true, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
table, n_elements_, val_size, pval, out, matrix_.dptr_, NULL);
} else {
hipLaunchKernelGGL(( slice<key_size, true, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
table, n_elements_, val_size, pval, out, matrix_.dptr_, norm);
}
} else {
hipLaunchKernelGGL(( slice<key_size, false, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
table, n_elements_, val_size, pval, out, matrix_.dptr_, NULL);
}
CHECK_EQ(hipGetLastError(), hipSuccess);
}
template<int key_size>
void CuPermutohedralOp<key_size>::Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
Tensor<gpu, 1, float> scale = aux_args[kScale].get<gpu, 1, float>(s);
if (!init_) {
TShape data_shape = in_data[kData].shape_;
batch_size_ = data_shape[0];
data_size_ = data_shape[1];
if (param_.normalize) {
val_size_ = data_size_ + 1;
} else {
val_size_ = data_size_;
}
n_elements_ = data_shape.Size()/batch_size_/data_size_;
n_keys_ = n_elements_*(key_size+1);
CHECK_EQ(in_data[kPos].size(1), key_size);
lblock_ = cuda::kBaseThreadNum;
nblock_ = (n_elements_-1)/lblock_+1;
float cpu_scale[key_size];
for (int i = 0; i < key_size; i++) {
cpu_scale[i] = (key_size+1)*sqrtf((2.0/3.0)/((i+1)*(i+2)));
}
CHECK_EQ(hipMemcpyAsync((void*)scale.dptr_, (void*)cpu_scale, key_size*sizeof(float), hipMemcpyHostToDevice, stream), hipSuccess);
init_ = true;
}
Shape<3> shape = Shape3(batch_size_, data_size_, n_elements_);
Tensor<gpu, 3, float> in = in_data[kData].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> out = out_data[kOut].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = key_size;
Tensor<gpu, 3, float> pos = in_data[kPos].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = 1;
Tensor<gpu, 3, float> norm = out_data[kNorm].get_with_shape<gpu, 3, float>(shape, s);
GetTempSpace(ctx, val_size_);
CuHashTable<key_size> table(n_keys_, entries_.dptr_, keys_.dptr_);
for (int i = 0; i < batch_size_; ++i) {
entries_ = -1;
hipLaunchKernelGGL(( init<key_size>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_,1,1)), 0, stream,
table, n_elements_, pos.dptr_ + i*key_size*n_elements_, scale.dptr_, matrix_.dptr_);
CHECK_EQ(hipGetLastError(), hipSuccess);
Filter(stream, table, param_.normalize, val_size_,
scale.dptr_,
in.dptr_+i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_);
}
}
template<int key_size>
void CuPermutohedralOp<key_size>::Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
Tensor<gpu, 1, float> scale = aux_args[kScale].get<gpu, 1, float>(s);
Shape<3> shape = Shape3(batch_size_, data_size_, n_elements_);
Tensor<gpu, 3, float> out = out_data[kOut].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> ograd = out_grad[kOut].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> data = in_data[kData].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> data_grad = in_grad[kData].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = key_size;
Tensor<gpu, 3, float> pos = in_data[kPos].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> pos_grad = in_grad[kPos].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = 1;
Tensor<gpu, 3, float> norm = out_data[kNorm].get_with_shape<gpu, 3, float>(shape, s);
GetTempSpace(ctx, req[kPos] == kNullOp ? val_size_ : ::max(val_size_, 2*(key_size+1)*val_size_));
CuHashTable<key_size> table(n_keys_, entries_.dptr_, keys_.dptr_);
for (int i = 0; i < batch_size_; ++i) {
entries_ = -1;
hipLaunchKernelGGL(( init<key_size>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_,1,1)), 0, stream,
table, n_elements_, pos.dptr_ + i*key_size*n_elements_, scale.dptr_, matrix_.dptr_);
CHECK_EQ(hipGetLastError(), hipSuccess);
if (req[kData] != kNullOp) {
CHECK(req[kData] != kAddTo);
Filter(stream, table, param_.normalize, val_size_,
scale.dptr_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data_grad.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_);
}
if (req[kPos] != kNullOp) {
CHECK(req[kData] != kAddTo);
if (param_.normalize) {
hipLaunchKernelGGL(( pos_grad_init<key_size, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_,
new_vals_.dptr_);
} else {
hipLaunchKernelGGL(( pos_grad_init<key_size, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
NULL,
new_vals_.dptr_);
}
CHECK_EQ(hipGetLastError(), hipSuccess);
Filter(stream, table, false, 2*(key_size+1)*val_size_,
scale.dptr_,
new_vals_.dptr_,
pos.dptr_ + i*key_size*n_elements_,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
NULL);
if (param_.normalize) {
hipLaunchKernelGGL(( pos_grad_reduce<key_size, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
pos_grad.dptr_ + i*key_size*n_elements_);
} else {
hipLaunchKernelGGL(( pos_grad_reduce<key_size, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
NULL,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
pos_grad.dptr_ + i*key_size*n_elements_);
}
CHECK_EQ(hipGetLastError(), hipSuccess);
}
}
}
template<>
Operator *CreateOp<gpu>(PermutohedralParam param, int key_size) {
switch (key_size) {
case 2: return new CuPermutohedralOp<2>(param);
case 3: return new CuPermutohedralOp<3>(param);
case 4: return new CuPermutohedralOp<4>(param);
case 5: return new CuPermutohedralOp<5>(param);
case 6: return new CuPermutohedralOp<6>(param);
case 7: return new CuPermutohedralOp<7>(param);
case 8: return new CuPermutohedralOp<8>(param);
case 9: return new CuPermutohedralOp<9>(param);
case 10: return new CuPermutohedralOp<10>(param);
case 11: return new CuPermutohedralOp<11>(param);
case 12: return new CuPermutohedralOp<12>(param);
case 13: return new CuPermutohedralOp<13>(param);
case 14: return new CuPermutohedralOp<14>(param);
case 15: return new CuPermutohedralOp<15>(param);
case 16: return new CuPermutohedralOp<16>(param);
default:
LOG(FATAL) << "GPU not supported";
return NULL;
}
}
} // namespace op
} // namespace mxnet
|
aad42b398aa17223c3cc0adc466d656e64aefaf4.cu
|
/*!
* Copyright (c) 2016 by Contributors
* \file permutohedral.cu
* \brief
* \author Junyuan Xie
*/
#include "./permutohedral-inl.h"
namespace mxnet {
namespace op {
namespace permutohedral {
template<int key_size>
__global__ void init(CuHashTable<key_size> table,
const int n_elements,
const float *pos,
const float *scale,
Pair *matrix) {
float elevated[key_size+1];
int greedy[key_size+1];
int rank[key_size+1];
float barycentric[key_size+2];
short key[key_size];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n_elements) return;
float sm = 0;
for (int i = key_size; i > 0; i--) {
float cf = pos[(i-1)*n_elements + idx]*scale[i-1];
elevated[i] = sm - i*cf;
sm += cf;
}
elevated[0] = sm;
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
short sum = 0;
for (int i = 0; i <= key_size; i++) {
float v = elevated[i]*(1.0f/(key_size+1));
float up = ceilf(v) * (key_size+1);
float down = floorf(v) * (key_size+1);
if (up - elevated[i] < elevated[i] - down) {
greedy[i] = static_cast<short>(up);
} else {
greedy[i] = static_cast<short>(down);
}
sum += greedy[i];
}
sum /= key_size+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= key_size; i++) {
rank[i] = 0;
for (int j = 0; j <= key_size; j++) {
if (elevated[i] - greedy[i] < elevated[j] - greedy[j] ||
(elevated[i] - greedy[i] == elevated[j] - greedy[j]
&& i > j)) {
rank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] >= key_size + 1 - sum) {
greedy[i] -= key_size+1;
rank[i] += sum - (key_size+1);
} else {
rank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] < -sum) {
greedy[i] += key_size+1;
rank[i] += (key_size+1) + sum;
} else {
rank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= key_size+1; i++) {
barycentric[i] = 0;
}
for (int i = 0; i <= key_size; i++) {
float delta = (elevated[i] - greedy[i]) * (1.0f/(key_size+1));
barycentric[key_size-rank[i]] += delta;
barycentric[key_size+1-rank[i]] -= delta;
}
barycentric[0] += 1.0f + barycentric[key_size+1];
for (int color = 0; color <= key_size; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
for (int i = 0; i < key_size; i++) {
key[i] = greedy[i] + color;
if (rank[i] > key_size-color) key[i] -= (key_size+1);
}
Pair r;
r.index = table.insert(key, idx*(key_size+1)+color);
r.weight = barycentric[color];
matrix[idx*(key_size+1) + color] = r;
}
}
template<int key_size, bool normalize>
__global__ void splat(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
float *data,
float *val,
Pair *matrix) {
const int idx = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_elements) return;
const int color = threadIdx.x;
Pair r = matrix[idx*(key_size+1)+color];
float *dst = val + r.index*val_size;
if (!normalize) {
for (int j = 0; j < val_size; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
} else {
for (int j = 0; j < val_size-1; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
atomicAdd(dst+val_size-1, 1.f*r.weight);
}
}
template<int key_size>
__global__ static void blur(CuHashTable<key_size> table,
const int32_t val_size,
const int32_t color,
float *val,
float *new_val,
Pair *matrix) {
short key[key_size+1];
short np[key_size+1];
short nm[key_size+1];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= table.n_keys_) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
for (int i = 0; i < key_size; i++) {
key[i] = table.keys_[idx*key_size+i];
np[i] = key[i]+1;
nm[i] = key[i]-1;
}
np[color] -= key_size+1;
nm[color] += key_size+1;
int offNp = table.find(np);
int offNm = table.find(nm);
float *valMe = val + val_size*idx;
float *valNp = val + val_size*offNp;
float *valNm = val + val_size*offNm;
float *valOut = new_val + val_size*idx;
for (int i = 0; i < val_size; i++) {
float o = valMe[i];
if (offNp >= 0) o += 0.5f*valNp[i];
if (offNm >= 0) o += 0.5f*valNm[i];
valOut[i] = o;
}
}
template<int key_size, bool normalize, bool save>
__global__ void slice(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
float *val,
float *out,
Pair *matrix,
float *norm) {
const float alpha = 1.0f / (1+powf(2, -key_size-1));
int32_t index[key_size+1];
float weight[key_size+1];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
for (int i = 0; i <= key_size; ++i) {
Pair r = matrix[idx*(key_size+1) + i];
index[i] = r.index;
weight[i] = r.weight;
}
if (!normalize) {
for (int j = 0; j < val_size; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * alpha;
}
} else {
float n = 0.0f;
for (int i = 0; i <= key_size; ++i) {
n += weight[i]*val[index[i]*val_size + val_size - 1];
}
n = 1.0f/n;
for (int j = 0; j < val_size-1; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * n;
}
if (save)
norm[idx] = n;
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_init(const int32_t n_elements, const int32_t val_size,
float *ograd, float *pos, float *data, float *out, float *norm, float *buf) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
for (int i = 0; i < key_size; ++i)
p[i] = pos[i*n_elements + idx];
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
f2[idx24] = vj;
f4[idx24] = deltaj;
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltaj;
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
f2[idx24] = vj;
f4[idx24] = deltan;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltan;
}
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_reduce(const int32_t n_elements, const int32_t val_size,
float *ograd, float *pos, float *data, float *out,
float *norm, float *buf, float *pgrad) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
float pg[key_size];
for (int i = 0; i < key_size; ++i) {
p[i] = pos[i*n_elements + idx];
pg[i] = 0;
}
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
pg[i] += deltaj*f1[idx13] - deltaj*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
pg[i] += deltan*f1[idx13] - deltan*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
for (int i = 0; i < key_size; ++i) {
pgrad[i*n_elements + idx] = pg[i];
}
}
}
template<int key_size>
void CuPermutohedralOp<key_size>::GetTempSpace(const OpContext &ctx, int val_size) {
using namespace mshadow;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 1, uint8_t> tmp =
ctx.requested[kTemp].get_space_typed<gpu, 1, uint8_t>(
Shape1(n_keys_*2*sizeof(int32_t) +
n_keys_*key_size*sizeof(int16_t) +
n_keys_*val_size*sizeof(float) +
n_keys_*val_size*sizeof(float) +
n_keys_*sizeof(Pair)), s);
uint8_t *ptr = tmp.dptr_;
int32_t *entries = (int32_t*)ptr;
entries_ = Tensor<gpu, 1, int32_t>(entries, Shape1(n_keys_*2), s);
ptr += n_keys_*2*sizeof(int32_t);
int16_t *keys = (int16_t*)ptr;
keys_ = Tensor<gpu, 2, int16_t>(keys, Shape2(key_size, n_keys_), s);
ptr += n_keys_*key_size*sizeof(int16_t);
float *vals = (float*)ptr;
vals_ = Tensor<gpu, 2, float>(vals, Shape2(val_size, n_keys_), s);
ptr += n_keys_*val_size*sizeof(float);
float *new_vals = (float*)ptr;
new_vals_ = Tensor<gpu, 2, float>(new_vals, Shape2(val_size, n_keys_), s);
ptr += n_keys_*val_size*sizeof(float);
Pair *matrix = (Pair*)ptr;
matrix_ = Tensor<gpu, 1, Pair>(matrix, Shape1(n_keys_), s);
ptr += n_keys_*sizeof(Pair);
CHECK_EQ(ptr, tmp.dptr_ + tmp.shape_.Size());
}
template<int key_size>
void CuPermutohedralOp<key_size>::Filter(cudaStream_t stream, permutohedral::CuHashTable<key_size> table, bool normalize, int val_size,
float *scale, float *data, float *pos, float *out, float *norm) {
using namespace permutohedral;
vals_ = 0;
if (normalize) {
splat<key_size, true><<<dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1), dim3(key_size+1, lblock_/(key_size+1), 1), 0, stream>>>(
table, n_elements_, val_size, data, vals_.dptr_, matrix_.dptr_);
} else {
splat<key_size, false><<<dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1), dim3(key_size+1, lblock_/(key_size+1), 1), 0, stream>>>(
table, n_elements_, val_size, data, vals_.dptr_, matrix_.dptr_);
}
CHECK_EQ(cudaGetLastError(), cudaSuccess);
float *pval = vals_.dptr_;
float *pnew_val = new_vals_.dptr_;
for (int j = 0; j <= key_size; ++j) {
blur<key_size><<<dim3((n_keys_-1)/lblock_+1, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
table, val_size, j, pval, pnew_val, matrix_.dptr_);
CHECK_EQ(cudaGetLastError(), cudaSuccess);
std::swap(pval, pnew_val);
}
if (normalize) {
if (norm == NULL) {
slice<key_size, true, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
table, n_elements_, val_size, pval, out, matrix_.dptr_, NULL);
} else {
slice<key_size, true, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
table, n_elements_, val_size, pval, out, matrix_.dptr_, norm);
}
} else {
slice<key_size, false, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
table, n_elements_, val_size, pval, out, matrix_.dptr_, NULL);
}
CHECK_EQ(cudaGetLastError(), cudaSuccess);
}
template<int key_size>
void CuPermutohedralOp<key_size>::Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
Tensor<gpu, 1, float> scale = aux_args[kScale].get<gpu, 1, float>(s);
if (!init_) {
TShape data_shape = in_data[kData].shape_;
batch_size_ = data_shape[0];
data_size_ = data_shape[1];
if (param_.normalize) {
val_size_ = data_size_ + 1;
} else {
val_size_ = data_size_;
}
n_elements_ = data_shape.Size()/batch_size_/data_size_;
n_keys_ = n_elements_*(key_size+1);
CHECK_EQ(in_data[kPos].size(1), key_size);
lblock_ = cuda::kBaseThreadNum;
nblock_ = (n_elements_-1)/lblock_+1;
float cpu_scale[key_size];
for (int i = 0; i < key_size; i++) {
cpu_scale[i] = (key_size+1)*sqrtf((2.0/3.0)/((i+1)*(i+2)));
}
CHECK_EQ(cudaMemcpyAsync((void*)scale.dptr_, (void*)cpu_scale, key_size*sizeof(float), cudaMemcpyHostToDevice, stream), cudaSuccess);
init_ = true;
}
Shape<3> shape = Shape3(batch_size_, data_size_, n_elements_);
Tensor<gpu, 3, float> in = in_data[kData].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> out = out_data[kOut].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = key_size;
Tensor<gpu, 3, float> pos = in_data[kPos].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = 1;
Tensor<gpu, 3, float> norm = out_data[kNorm].get_with_shape<gpu, 3, float>(shape, s);
GetTempSpace(ctx, val_size_);
CuHashTable<key_size> table(n_keys_, entries_.dptr_, keys_.dptr_);
for (int i = 0; i < batch_size_; ++i) {
entries_ = -1;
init<key_size><<<dim3(nblock_, 1, 1), dim3(lblock_,1,1), 0, stream>>>(
table, n_elements_, pos.dptr_ + i*key_size*n_elements_, scale.dptr_, matrix_.dptr_);
CHECK_EQ(cudaGetLastError(), cudaSuccess);
Filter(stream, table, param_.normalize, val_size_,
scale.dptr_,
in.dptr_+i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_);
}
}
template<int key_size>
void CuPermutohedralOp<key_size>::Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace permutohedral;
Stream<gpu> *s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
Tensor<gpu, 1, float> scale = aux_args[kScale].get<gpu, 1, float>(s);
Shape<3> shape = Shape3(batch_size_, data_size_, n_elements_);
Tensor<gpu, 3, float> out = out_data[kOut].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> ograd = out_grad[kOut].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> data = in_data[kData].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> data_grad = in_grad[kData].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = key_size;
Tensor<gpu, 3, float> pos = in_data[kPos].get_with_shape<gpu, 3, float>(shape, s);
Tensor<gpu, 3, float> pos_grad = in_grad[kPos].get_with_shape<gpu, 3, float>(shape, s);
shape[1] = 1;
Tensor<gpu, 3, float> norm = out_data[kNorm].get_with_shape<gpu, 3, float>(shape, s);
GetTempSpace(ctx, req[kPos] == kNullOp ? val_size_ : std::max(val_size_, 2*(key_size+1)*val_size_));
CuHashTable<key_size> table(n_keys_, entries_.dptr_, keys_.dptr_);
for (int i = 0; i < batch_size_; ++i) {
entries_ = -1;
init<key_size><<<dim3(nblock_, 1, 1), dim3(lblock_,1,1), 0, stream>>>(
table, n_elements_, pos.dptr_ + i*key_size*n_elements_, scale.dptr_, matrix_.dptr_);
CHECK_EQ(cudaGetLastError(), cudaSuccess);
if (req[kData] != kNullOp) {
CHECK(req[kData] != kAddTo);
Filter(stream, table, param_.normalize, val_size_,
scale.dptr_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data_grad.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_);
}
if (req[kPos] != kNullOp) {
CHECK(req[kData] != kAddTo);
if (param_.normalize) {
pos_grad_init<key_size, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_,
new_vals_.dptr_);
} else {
pos_grad_init<key_size, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
NULL,
new_vals_.dptr_);
}
CHECK_EQ(cudaGetLastError(), cudaSuccess);
Filter(stream, table, false, 2*(key_size+1)*val_size_,
scale.dptr_,
new_vals_.dptr_,
pos.dptr_ + i*key_size*n_elements_,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
NULL);
if (param_.normalize) {
pos_grad_reduce<key_size, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
norm.dptr_ + i*n_elements_,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
pos_grad.dptr_ + i*key_size*n_elements_);
} else {
pos_grad_reduce<key_size, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
n_elements_, val_size_,
ograd.dptr_ + i*data_size_*n_elements_,
pos.dptr_ + i*key_size*n_elements_,
data.dptr_ + i*data_size_*n_elements_,
out.dptr_ + i*data_size_*n_elements_,
NULL,
key_size%2 ? new_vals_.dptr_ : vals_.dptr_,
pos_grad.dptr_ + i*key_size*n_elements_);
}
CHECK_EQ(cudaGetLastError(), cudaSuccess);
}
}
}
template<>
Operator *CreateOp<gpu>(PermutohedralParam param, int key_size) {
switch (key_size) {
case 2: return new CuPermutohedralOp<2>(param);
case 3: return new CuPermutohedralOp<3>(param);
case 4: return new CuPermutohedralOp<4>(param);
case 5: return new CuPermutohedralOp<5>(param);
case 6: return new CuPermutohedralOp<6>(param);
case 7: return new CuPermutohedralOp<7>(param);
case 8: return new CuPermutohedralOp<8>(param);
case 9: return new CuPermutohedralOp<9>(param);
case 10: return new CuPermutohedralOp<10>(param);
case 11: return new CuPermutohedralOp<11>(param);
case 12: return new CuPermutohedralOp<12>(param);
case 13: return new CuPermutohedralOp<13>(param);
case 14: return new CuPermutohedralOp<14>(param);
case 15: return new CuPermutohedralOp<15>(param);
case 16: return new CuPermutohedralOp<16>(param);
default:
LOG(FATAL) << "GPU not supported";
return NULL;
}
}
} // namespace op
} // namespace mxnet
|
23a70d358d1eaf98a736758204794d1c38a9e518.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1, MT beta2, MT epsilon, MT beta1_pow_,
MT beta2_pow_, const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = moment1[id];
MT mom2 = moment2[id];
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1, MT beta2, MT epsilon,
const MT* beta1_pow_, const MT* beta2_pow_,
const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1, T beta2, const T* beta1_pow_,
const T* beta2_pow_, T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(
MT beta1, MT beta2, MT epsilon, const MT beta1_pow, const MT beta2_pow,
const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_,
const MT* lr_, const T* grad_, const T* param_, T* param_out_,
const MT* master_param, MT* master_param_out, const int64_t* rows_,
int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
math::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 / (sqrt(mom2) +
epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T>
class AdamOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor;
using MPDType = typename details::MPTypeTrait<T>::Type;
int64_t min_row_size_to_use_multithread =
ctx.Attr<int64_t>("min_row_size_to_use_multithread");
bool lazy_mode = ctx.Attr<bool>("lazy_mode");
bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow");
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
auto* param = ctx.Input<LoDTensor>("Param");
auto* grad_var = ctx.InputVar("Grad");
auto* mom1 = ctx.Input<LoDTensor>("Moment1");
auto* mom2 = ctx.Input<LoDTensor>("Moment2");
auto* lr = ctx.Input<LoDTensor>("LearningRate");
auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");
auto* param_out = ctx.Output<LoDTensor>("ParamOut");
auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
bool skip_update = false;
if (ctx.HasInput("SkipUpdate")) {
auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate");
PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(SkipUpdate) size must be 1, but get %d",
skip_update_tensor->numel()));
std::vector<bool> skip_update_vec;
TensorToVector(*skip_update_tensor, ctx.device_context(),
&skip_update_vec);
skip_update = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update) {
VLOG(4) << "Adam skip update";
framework::TensorCopy(
*param, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), param_out);
framework::TensorCopy(
*mom1, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), mom1_out);
framework::TensorCopy(
*mom2, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), mom2_out);
framework::TensorCopy(
*beta1_pow, beta1_pow->place(),
ctx.template device_context<platform::DeviceContext>(),
beta1_pow_out);
framework::TensorCopy(
*beta2_pow, beta2_pow->place(),
ctx.template device_context<platform::DeviceContext>(),
beta2_pow_out);
return;
}
MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1"));
if (ctx.HasInput("Beta1Tensor")) {
auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta1Tensor) size must be 1, but get %d",
beta1_tensor->numel()));
beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor));
}
MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2"));
if (ctx.HasInput("Beta2Tensor")) {
auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta2Tensor) size must be 1, but get %d",
beta2_tensor->numel()));
beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor));
}
MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon"));
if (ctx.HasInput("EpsilonTensor")) {
auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor");
PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(EpsilonTensor) size must be 1, but get %d",
epsilon_tensor->numel()));
epsilon = static_cast<MPDType>(GetAttrFromTensor(epsilon_tensor));
}
VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
<< "beta2_pow.numel() : " << beta2_pow->numel();
VLOG(3) << "param.numel(): " << param->numel();
PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const LoDTensor* master_param = nullptr;
LoDTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master, true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<LoDTensor>("MasterParam");
master_param_out = ctx.Output<LoDTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
if (grad_var->IsType<framework::LoDTensor>()) {
auto* grad = ctx.Input<LoDTensor>("Grad");
// update param and moment
int threads = 512;
int blocks = (param->numel() + threads - 1) / threads;
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
// Compute with betapow in REG
hipLaunchKernelGGL(( AdamKernelREG<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
if (!use_global_beta_pow) {
// Cpu update
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
}
} else {
hipLaunchKernelGGL(( AdamKernelMEM<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
if (!use_global_beta_pow) {
// Update with gpu
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
}
} else if (grad_var->IsType<framework::SelectedRows>()) {
auto* grad = ctx.Input<framework::SelectedRows>("Grad");
if (grad->rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
framework::SelectedRows tmp_grad_merge;
const framework::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
merge_func(ctx.template device_context<platform::CUDADeviceContext>(),
*grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
int threads = 512;
int ndim = param->numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamCUDAKernelREG<
T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode, ndim);
if (!use_global_beta_pow) {
// Update with cpu
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
}
} else {
SparseAdamFunctor<T, GPUAdam, MPDType> functor(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
platform::ForRange<platform::CUDADeviceContext> for_range(
static_cast<const platform::CUDADeviceContext&>(
ctx.device_context()),
param->numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(adam, ops::AdamOpCUDAKernel<float>,
ops::AdamOpCUDAKernel<double>,
ops::AdamOpCUDAKernel<plat::float16>);
|
23a70d358d1eaf98a736758204794d1c38a9e518.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1, MT beta2, MT epsilon, MT beta1_pow_,
MT beta2_pow_, const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = moment1[id];
MT mom2 = moment2[id];
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1, MT beta2, MT epsilon,
const MT* beta1_pow_, const MT* beta2_pow_,
const MT* moment1, MT* moment1_out,
const MT* moment2, MT* moment2_out, const MT* lr_,
const T* grad, const T* param, T* param_out,
const MT* master_param, MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 /
(sqrt(mom2) + epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1, T beta2, const T* beta1_pow_,
const T* beta2_pow_, T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(
MT beta1, MT beta2, MT epsilon, const MT beta1_pow, const MT beta2_pow,
const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_,
const MT* lr_, const T* grad_, const T* param_, T* param_out_,
const MT* master_param, MT* master_param_out, const int64_t* rows_,
int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) /
(static_cast<MT>(1.0) - beta1_pow);
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
math::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
p -= lr * (mom1 / (sqrt(mom2) +
epsilon * sqrt(static_cast<MT>(1.0) - beta2_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T>
class AdamOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor;
using MPDType = typename details::MPTypeTrait<T>::Type;
int64_t min_row_size_to_use_multithread =
ctx.Attr<int64_t>("min_row_size_to_use_multithread");
bool lazy_mode = ctx.Attr<bool>("lazy_mode");
bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow");
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
auto* param = ctx.Input<LoDTensor>("Param");
auto* grad_var = ctx.InputVar("Grad");
auto* mom1 = ctx.Input<LoDTensor>("Moment1");
auto* mom2 = ctx.Input<LoDTensor>("Moment2");
auto* lr = ctx.Input<LoDTensor>("LearningRate");
auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");
auto* param_out = ctx.Output<LoDTensor>("ParamOut");
auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
bool skip_update = false;
if (ctx.HasInput("SkipUpdate")) {
auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate");
PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(SkipUpdate) size must be 1, but get %d",
skip_update_tensor->numel()));
std::vector<bool> skip_update_vec;
TensorToVector(*skip_update_tensor, ctx.device_context(),
&skip_update_vec);
skip_update = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update) {
VLOG(4) << "Adam skip update";
framework::TensorCopy(
*param, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), param_out);
framework::TensorCopy(
*mom1, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), mom1_out);
framework::TensorCopy(
*mom2, ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(), mom2_out);
framework::TensorCopy(
*beta1_pow, beta1_pow->place(),
ctx.template device_context<platform::DeviceContext>(),
beta1_pow_out);
framework::TensorCopy(
*beta2_pow, beta2_pow->place(),
ctx.template device_context<platform::DeviceContext>(),
beta2_pow_out);
return;
}
MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1"));
if (ctx.HasInput("Beta1Tensor")) {
auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta1Tensor) size must be 1, but get %d",
beta1_tensor->numel()));
beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor));
}
MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2"));
if (ctx.HasInput("Beta2Tensor")) {
auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(Beta2Tensor) size must be 1, but get %d",
beta2_tensor->numel()));
beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor));
}
MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon"));
if (ctx.HasInput("EpsilonTensor")) {
auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor");
PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1,
platform::errors::InvalidArgument(
"Input(EpsilonTensor) size must be 1, but get %d",
epsilon_tensor->numel()));
epsilon = static_cast<MPDType>(GetAttrFromTensor(epsilon_tensor));
}
VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
<< "beta2_pow.numel() : " << beta2_pow->numel();
VLOG(3) << "param.numel(): " << param->numel();
PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
platform::errors::InvalidArgument(
"beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const LoDTensor* master_param = nullptr;
LoDTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master, true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<LoDTensor>("MasterParam");
master_param_out = ctx.Output<LoDTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
if (grad_var->IsType<framework::LoDTensor>()) {
auto* grad = ctx.Input<LoDTensor>("Grad");
// update param and moment
int threads = 512;
int blocks = (param->numel() + threads - 1) / threads;
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
// Compute with betapow in REG
AdamKernelREG<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
if (!use_global_beta_pow) {
// Cpu update
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
}
} else {
AdamKernelMEM<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad->data<T>(), param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, param->numel());
if (!use_global_beta_pow) {
// Update with gpu
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
}
} else if (grad_var->IsType<framework::SelectedRows>()) {
auto* grad = ctx.Input<framework::SelectedRows>("Grad");
if (grad->rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
framework::SelectedRows tmp_grad_merge;
const framework::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
merge_func(ctx.template device_context<platform::CUDADeviceContext>(),
*grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow->place() == platform::CPUPlace() &&
beta2_pow->place() == platform::CPUPlace()) {
int threads = 512;
int ndim = param->numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamCUDAKernelREG<
T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1, beta2, epsilon, *beta1_pow->data<MPDType>(),
*beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode, ndim);
if (!use_global_beta_pow) {
// Update with cpu
beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta1 * beta1_pow->data<MPDType>()[0];
beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] =
beta2 * beta2_pow->data<MPDType>()[0];
}
} else {
SparseAdamFunctor<T, GPUAdam, MPDType> functor(
beta1, beta2, epsilon, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(), mom1->data<MPDType>(),
mom1_out->mutable_data<MPDType>(ctx.GetPlace()),
mom2->data<MPDType>(),
mom2_out->mutable_data<MPDType>(ctx.GetPlace()),
lr->data<MPDType>(), grad_data, param->data<T>(),
param_out->mutable_data<T>(ctx.GetPlace()), master_in_data,
master_out_data, rows, row_numel, grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
platform::ForRange<platform::CUDADeviceContext> for_range(
static_cast<const platform::CUDADeviceContext&>(
ctx.device_context()),
param->numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1, beta2, beta1_pow->data<MPDType>(),
beta2_pow->data<MPDType>(),
beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()),
beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace()));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(adam, ops::AdamOpCUDAKernel<float>,
ops::AdamOpCUDAKernel<double>,
ops::AdamOpCUDAKernel<plat::float16>);
|
ecf835412fbb5907b40fc22b6aa1d489d47366ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "im2col.h"
void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
}
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias");
THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero");
// Params:
int nInputPlane = weight->size[1]/(kH*kW);
int nOutputPlane = weight->size[0];
int batch = 1;
if (input->nDimension == 3) {
THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
} else {
THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_n), n_
);
} else {
THCudaTensor_zero(state, output_n);
}
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nOutputPlane;
long n = columns->size[1];
long k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_n), n
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero");
// Params
int nInputPlane = weight->size[1]/(kW*kH);
int nOutputPlane = weight->size[0];
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nInputPlane*kW*kH;
long n = gradColumns->size[1];
long k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, gradOutput_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, gradColumns), n
);
// Unpack columns back into input:
col2im(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradColumns),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, gradInput_n)
);
}
// Free
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones);
if (gradBias) {
THCUNN_assertSameGPU(state, 2, gradWeight, gradBias);
}
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias");
THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero");
// Params
int nInputPlane = gradWeight->size[1]/(kW*kH);
int nOutputPlane = gradWeight->size[0];
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nOutputPlane;
long n = nInputPlane*kW*kH;
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, gradOutput_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
if (gradBias) {
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
|
ecf835412fbb5907b40fc22b6aa1d489d47366ba.cu
|
#include "THCUNN.h"
#include "common.h"
#include "im2col.h"
void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
}
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias");
THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero");
// Params:
int nInputPlane = weight->size[1]/(kH*kW);
int nOutputPlane = weight->size[0];
int batch = 1;
if (input->nDimension == 3) {
THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
} else {
THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_n), n_
);
} else {
THCudaTensor_zero(state, output_n);
}
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nOutputPlane;
long n = columns->size[1];
long k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_n), n
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero");
// Params
int nInputPlane = weight->size[1]/(kW*kH);
int nOutputPlane = weight->size[0];
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nInputPlane*kW*kH;
long n = gradColumns->size[1];
long k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, gradOutput_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, gradColumns), n
);
// Unpack columns back into input:
col2im(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradColumns),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, gradInput_n)
);
}
// Free
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones);
if (gradBias) {
THCUNN_assertSameGPU(state, 2, gradWeight, gradBias);
}
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)");
THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias");
THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero");
THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero");
// Params
int nInputPlane = gradWeight->size[1]/(kW*kH);
int nOutputPlane = gradWeight->size[0];
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = nOutputPlane;
long n = nInputPlane*kW*kH;
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, gradOutput_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
if (gradBias) {
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
|
d04ad72f31c71e1865d7b4026d19c06e798335f0.hip
|
// !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
// Use half cell width?
//#define USE_HALF_CELL_WIDTH
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffled_pos;
glm::vec3 *dev_shuffled_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
#ifdef USE_HALF_CELL_WIDTH
gridCellWidth = ::max(::max(rule1Distance, rule2Distance), rule3Distance);
#else
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
#endif
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_shuffled_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffled_pos failed!");
hipMalloc((void**)&dev_shuffled_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffled_vel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 self_position = pos[iSelf];
int rule1_cnt = 0, rule3_cnt = 0;
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
for (int i = 0; i < N; i++) {
if (i == iSelf) {
continue;
}
glm::vec3 target_position = pos[i];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel[i];
rule3_cnt++;
}
}
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
return velocity_change;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 velocity_change = computeVelocityChange(N, index, pos, vel1);
glm::vec3 new_velocity = vel1[index] + velocity_change;
// Clamp the speed
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = new_velocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 gridIndex3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int grid_index = gridIndex3Dto1D(gridIndex3D.x, gridIndex3D.y, gridIndex3D.z, gridResolution);
gridIndices[index] = grid_index;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[gridIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[gridIndex] = index;
return;
}
if (gridIndex != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
}
if (gridIndex != particleGridIndices[index + 1]) {
gridCellEndIndices[gridIndex] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 relative_pos = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 gridIndex3D = glm::floor(relative_pos);
// - Identify which cells may contain neighbors. This isn't always 8.
#ifndef USE_HALF_CELL_WIDTH
int x_direction = (glm::round(relative_pos.x - gridIndex3D.x) == 1) ? 1 : -1;
int y_direction = (glm::round(relative_pos.y - gridIndex3D.y) == 1) ? 1 : -1;
int z_direction = (glm::round(relative_pos.z - gridIndex3D.z) == 1) ? 1 : -1;
#endif
glm::vec3 self_position = pos[index];
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
int rule1_cnt = 0, rule3_cnt = 0;
#ifdef USE_HALF_CELL_WIDTH
for (int dx = -1; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = -1; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = -1; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#else
for (int dx = 0; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx * x_direction;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = 0; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy * y_direction;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = 0; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz * z_direction;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#endif
int gridIndex1D = gridIndex3Dto1D(x_index, y_index, z_index, gridResolution);
// - For each cell, read the start/end indices in the boid pointer array.
int startIndex = gridCellStartIndices[gridIndex1D];
if (startIndex == -1) {
continue;
}
int endIndex = gridCellEndIndices[gridIndex1D];
for (int SortedIndex = startIndex; SortedIndex <= endIndex; SortedIndex++) {
int targetIndex = particleArrayIndices[SortedIndex];
if (targetIndex == index) {
continue;
}
glm::vec3 target_position = pos[targetIndex];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel1[targetIndex];
rule3_cnt++;
}
}
}
}
}
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 new_velocity = vel1[index] + velocity_change;
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
vel2[index] = new_velocity;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 relative_pos = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 gridIndex3D = glm::floor(relative_pos);
// - Identify which cells may contain neighbors. This isn't always 8.
#ifndef USE_HALF_CELL_WIDTH
int x_direction = (glm::round(relative_pos.x - gridIndex3D.x) == 1) ? 1 : -1;
int y_direction = (glm::round(relative_pos.y - gridIndex3D.y) == 1) ? 1 : -1;
int z_direction = (glm::round(relative_pos.z - gridIndex3D.z) == 1) ? 1 : -1;
#endif
glm::vec3 self_position = pos[index];
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
int rule1_cnt = 0, rule3_cnt = 0;
#ifdef USE_HALF_CELL_WIDTH
for (int dx = -1; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = -1; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = -1; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#else
for (int dx = 0; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx * x_direction;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = 0; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy * y_direction;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = 0; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz * z_direction;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
int gridIndex1D = gridIndex3Dto1D(x_index, y_index, z_index, gridResolution);
#endif
// - For each cell, read the start/end indices in the boid pointer array.
int startIndex = gridCellStartIndices[gridIndex1D];
if (startIndex == -1) {
continue;
}
int endIndex = gridCellEndIndices[gridIndex1D];
for (int targetIndex = startIndex; targetIndex <= endIndex; targetIndex++) {
if (targetIndex == index) {
continue;
}
glm::vec3 target_position = pos[targetIndex];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel1[targetIndex];
rule3_cnt++;
}
}
}
}
}
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 new_velocity = vel1[index] + velocity_change;
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
vel2[index] = new_velocity;
}
__global__ void kernReshuffleParticleData(
int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *shuffled_pos,
glm::vec3 *vel, glm::vec3 *shuffled_vel) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int pIndex = particleArrayIndices[index];
shuffled_pos[index] = pos[pIndex];
shuffled_vel[index] = vel[pIndex];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices);
thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
dim3 fullBlocksPerGrid2((gridCellCount + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid2), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid2), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices);
thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
dim3 fullBlocksPerGrid2((gridCellCount + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid2), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid2), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
hipLaunchKernelGGL(( kernReshuffleParticleData), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleArrayIndices,
dev_pos, dev_shuffled_pos, dev_vel1, dev_shuffled_vel);
hipMemcpy(dev_pos, dev_shuffled_pos, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel1, dev_shuffled_vel, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2);
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_shuffled_pos);
hipFree(dev_shuffled_vel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
|
d04ad72f31c71e1865d7b4026d19c06e798335f0.cu
|
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
// Use half cell width?
//#define USE_HALF_CELL_WIDTH
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffled_pos;
glm::vec3 *dev_shuffled_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
#ifdef USE_HALF_CELL_WIDTH
gridCellWidth = std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
#else
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
#endif
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_shuffled_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffled_pos failed!");
cudaMalloc((void**)&dev_shuffled_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffled_vel failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 self_position = pos[iSelf];
int rule1_cnt = 0, rule3_cnt = 0;
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
for (int i = 0; i < N; i++) {
if (i == iSelf) {
continue;
}
glm::vec3 target_position = pos[i];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel[i];
rule3_cnt++;
}
}
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
return velocity_change;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 velocity_change = computeVelocityChange(N, index, pos, vel1);
glm::vec3 new_velocity = vel1[index] + velocity_change;
// Clamp the speed
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = new_velocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 gridIndex3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int grid_index = gridIndex3Dto1D(gridIndex3D.x, gridIndex3D.y, gridIndex3D.z, gridResolution);
gridIndices[index] = grid_index;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[gridIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[gridIndex] = index;
return;
}
if (gridIndex != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
}
if (gridIndex != particleGridIndices[index + 1]) {
gridCellEndIndices[gridIndex] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 relative_pos = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 gridIndex3D = glm::floor(relative_pos);
// - Identify which cells may contain neighbors. This isn't always 8.
#ifndef USE_HALF_CELL_WIDTH
int x_direction = (glm::round(relative_pos.x - gridIndex3D.x) == 1) ? 1 : -1;
int y_direction = (glm::round(relative_pos.y - gridIndex3D.y) == 1) ? 1 : -1;
int z_direction = (glm::round(relative_pos.z - gridIndex3D.z) == 1) ? 1 : -1;
#endif
glm::vec3 self_position = pos[index];
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
int rule1_cnt = 0, rule3_cnt = 0;
#ifdef USE_HALF_CELL_WIDTH
for (int dx = -1; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = -1; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = -1; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#else
for (int dx = 0; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx * x_direction;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = 0; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy * y_direction;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = 0; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz * z_direction;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#endif
int gridIndex1D = gridIndex3Dto1D(x_index, y_index, z_index, gridResolution);
// - For each cell, read the start/end indices in the boid pointer array.
int startIndex = gridCellStartIndices[gridIndex1D];
if (startIndex == -1) {
continue;
}
int endIndex = gridCellEndIndices[gridIndex1D];
for (int SortedIndex = startIndex; SortedIndex <= endIndex; SortedIndex++) {
int targetIndex = particleArrayIndices[SortedIndex];
if (targetIndex == index) {
continue;
}
glm::vec3 target_position = pos[targetIndex];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel1[targetIndex];
rule3_cnt++;
}
}
}
}
}
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 new_velocity = vel1[index] + velocity_change;
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
vel2[index] = new_velocity;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 relative_pos = (pos[index] - gridMin) * inverseCellWidth;
glm::vec3 gridIndex3D = glm::floor(relative_pos);
// - Identify which cells may contain neighbors. This isn't always 8.
#ifndef USE_HALF_CELL_WIDTH
int x_direction = (glm::round(relative_pos.x - gridIndex3D.x) == 1) ? 1 : -1;
int y_direction = (glm::round(relative_pos.y - gridIndex3D.y) == 1) ? 1 : -1;
int z_direction = (glm::round(relative_pos.z - gridIndex3D.z) == 1) ? 1 : -1;
#endif
glm::vec3 self_position = pos[index];
glm::vec3 perceived_center(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity(0.0f, 0.0f, 0.0f);
int rule1_cnt = 0, rule3_cnt = 0;
#ifdef USE_HALF_CELL_WIDTH
for (int dx = -1; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = -1; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = -1; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
#else
for (int dx = 0; dx <= 1; dx++) {
int x_index = gridIndex3D.x + dx * x_direction;
if (x_index < 0 || x_index > gridResolution) {
continue;
}
for (int dy = 0; dy <= 1; dy++) {
int y_index = gridIndex3D.y + dy * y_direction;
if (y_index < 0 || y_index > gridResolution) {
continue;
}
for (int dz = 0; dz <= 1; dz++) {
int z_index = gridIndex3D.z + dz * z_direction;
if (z_index < 0 || z_index > gridResolution) {
continue;
}
int gridIndex1D = gridIndex3Dto1D(x_index, y_index, z_index, gridResolution);
#endif
// - For each cell, read the start/end indices in the boid pointer array.
int startIndex = gridCellStartIndices[gridIndex1D];
if (startIndex == -1) {
continue;
}
int endIndex = gridCellEndIndices[gridIndex1D];
for (int targetIndex = startIndex; targetIndex <= endIndex; targetIndex++) {
if (targetIndex == index) {
continue;
}
glm::vec3 target_position = pos[targetIndex];
float distance = glm::distance(self_position, target_position);
if (distance < rule1Distance) {
perceived_center += target_position;
rule1_cnt++;
}
if (distance < rule2Distance) {
c -= (target_position - self_position);
}
if (distance < rule3Distance) {
perceived_velocity += vel1[targetIndex];
rule3_cnt++;
}
}
}
}
}
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 velocity_change(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (rule1_cnt > 0) {
perceived_center /= rule1_cnt;
velocity_change += (perceived_center - self_position) * rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
velocity_change += c * rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (rule3_cnt > 0) {
perceived_velocity /= rule3_cnt;
velocity_change += perceived_velocity * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 new_velocity = vel1[index] + velocity_change;
float speed = glm::length(new_velocity);
if (speed > maxSpeed) {
new_velocity = new_velocity / speed * maxSpeed;
}
vel2[index] = new_velocity;
}
__global__ void kernReshuffleParticleData(
int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *shuffled_pos,
glm::vec3 *vel, glm::vec3 *shuffled_vel) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int pIndex = particleArrayIndices[index];
shuffled_pos[index] = pos[pIndex];
shuffled_vel[index] = vel[pIndex];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices);
thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
dim3 fullBlocksPerGrid2((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer<<<fullBlocksPerGrid2, blockSize>>>(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer<<<fullBlocksPerGrid2, blockSize>>>(gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered<<<fullBlocksPerGrid, blockSize>>>
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices);
thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
dim3 fullBlocksPerGrid2((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer<<<fullBlocksPerGrid2, blockSize>>>(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer<<<fullBlocksPerGrid2, blockSize>>>(gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernReshuffleParticleData<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleArrayIndices,
dev_pos, dev_shuffled_pos, dev_vel1, dev_shuffled_vel);
cudaMemcpy(dev_pos, dev_shuffled_pos, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel1, dev_shuffled_vel, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent<<<fullBlocksPerGrid, blockSize>>>
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_shuffled_pos);
cudaFree(dev_shuffled_vel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
3ed7935fdd89460add6197873cd1d6d75a8ceddc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "im2col.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/Im2Col.cu"
#include "THHGenerateFloatTypes.h"
|
3ed7935fdd89460add6197873cd1d6d75a8ceddc.cu
|
#include "THCUNN.h"
#include "common.h"
#include "im2col.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/Im2Col.cu"
#include "THCGenerateFloatTypes.h"
|
1ad98260102346642b4ed691cb0f579d46068193.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "trackers/klttracker.h"
#include <cmath>
#include <fstream>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <hip/hip_runtime.h>
//#include <rocblas.h>
#include <hip/hip_runtime.h>
using namespace cv;
using namespace std;
#define PI 3.14159265
#define persA 0.06
#define persB 40
#define minDist 10
#define TOPK 5
//#define persA 0.01
//#define persB 20
//#define minDist 5
Mat corners,prePts,nextPts,status,eigenvec;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector;
cv::gpu::PyrLKOpticalFlow tracker;
cv::gpu::GpuMat gpuGray, gpuPreGray, gpuCorners, gpuPrePts, gpuNextPts,gpuStatus,gpuEigenvec,gpuDenseX,gpuDenseY,gpuDenseXC,gpuDenseYC;
Mat denseX,denseY,denseRGB;
typedef struct
{
int i0, i1;
float correlation;
}ppair, p_ppair;
__device__ int d_pairN[1];
__device__ float maxr[1], maxg[1], maxb[1];
__device__ unsigned char *d_neighbor;
__device__ unsigned int* d_isnewmat, *d_netOrder;
__device__ float* d_distmat, *d_curvec, *d_group, *d_correlation, *d_netUpdate, *d_clrmap, *h_netUpdate, *d_crossDist,*d_topK;
__device__ ofv* d_ofvec;
__device__ ppair* d_pairvec, *h_pairvec;
__device__ unsigned char d_baseclr[6][3]=
{
{ 0, 255, 0 },
{ 0, 0, 255 },
{ 255, 255, 0 },
{ 255, 0, 255 },
{ 0, 255, 255 },
{ 255, 0, 0 },
};
__global__ void crossDist(unsigned int* dst,float* vertical,float* horizon,int h,int w)
{
int x = threadIdx.x,y=blockIdx.x;
if (x < w&&y<h)
{
float xv = vertical[y * 2], yv = vertical[y*2+1],xh=horizon[x*2],yh=horizon[x*2+1];
float dx = xv - xh, dy = yv - yh;
float dist = abs(dx) + abs(dy);
if (dist < minDist)
atomicAdd(dst + y, 1);
}
}
__global__ void searchNeighbor(unsigned char* d_neighbor, ofv* d_ofvec , ppair* d_pairvec,unsigned int* d_netOrder, int nFeatures)
{
int r = blockIdx.x, c = threadIdx.x;
if (r < c)
{
float dx = abs(d_ofvec[r].x1 - d_ofvec[c].x1), dy = abs(d_ofvec[r].y1 - d_ofvec[c].y1);
int yidx = d_ofvec[r].idx, xidx = d_ofvec[c].idx;
float dist = sqrt(dx*dx + dy*dy);
float xmid = (d_ofvec[r].x1 + d_ofvec[c].x1) / 2, ymid = (d_ofvec[r].y1 + d_ofvec[c].y1) / 2;
if (dx < ymid*(persA)+persB && dy < ymid*(persA*1.5) + persB*1.5)
{
d_neighbor[yidx*nFeatures + xidx] = 1;
d_neighbor[xidx*nFeatures + yidx] = 1;
float vx0 = d_ofvec[r].x1 - d_ofvec[r].x0, vx1 = d_ofvec[c].x1 - d_ofvec[c].x0,
vy0 = d_ofvec[r].y1 - d_ofvec[r].y0, vy1 = d_ofvec[c].y1 - d_ofvec[c].y0;
float norm0 = sqrt(vx0*vx0 + vy0*vy0), norm1 = sqrt(vx1*vx1 + vy1*vy1);
float cosine = (vx0*vx1 + vy0*vy1) / norm0 / norm1;
float cor = cosine / (dist / 10 + 0.1);
//if (cosine > 0.5
ppair tmppair;
tmppair.i0 = yidx, tmppair.i1 = xidx, tmppair.correlation = cor;
int arrpos = atomicAdd(d_pairN, 1);
memcpy(d_pairvec + arrpos, &tmppair,sizeof(ppair));
atomicAdd(d_netOrder + yidx,1);
atomicAdd(d_netOrder + xidx, 1);
}
/*
else
d_neighbor[yidx*nFeatures + xidx] = 0;
*/
}
}
__global__ void calUpdate(float* d_group, ppair* d_pairvec, float* d_netUpdate)
{
int nPair = gridDim.x,nFeatures = blockDim.x;
int ipair = blockIdx.x, idim = threadIdx.x;
int i0 = d_pairvec[ipair].i0, i1 = d_pairvec[ipair].i1;
float cor = d_pairvec[ipair].correlation;
//printf("%f\n", cor);
float update0 = d_group[i1*nFeatures + idim] * cor;
float update1 = d_group[i0*nFeatures + idim] * cor;
atomicAdd(d_netUpdate + i0*nFeatures + idim, update0);
atomicAdd(d_netUpdate + i1*nFeatures + idim, update1);
}
__global__ void updateNet(float* d_group, float* d_netUpdate,unsigned int* d_netOrder)
{
int idx = blockIdx.x, nFeatures = blockDim.x;
int dim = threadIdx.x;
int order = d_netOrder[idx];
if (order > 0)
{
float newval = d_netUpdate[idx*nFeatures+dim]/order;
float oldval = d_group[idx*nFeatures + dim];
d_group[idx*nFeatures + dim] = (oldval + newval) / 2;
}
}
__global__ void inCross(float* d_group,float* d_crossDist)
{
int nFeatures = blockDim.x;
int i0 = blockIdx.x, i1 = threadIdx.x;
float val = 0;
__shared__ float group0[1000];
__shared__ float group1[1000];
memcpy(group0, d_group + i0*nFeatures,nFeatures*sizeof(float));
memcpy(group1, d_group + i1*nFeatures, nFeatures*sizeof(float));
for (int i = 0; i < nFeatures; i++)
{
//val += d_group[i0*nFeatures + i] * d_group[i1*nFeatures + i];
val += group0[i] * group1[i];
}
d_crossDist[i0*nFeatures + i1] = val;
}
__global__ void genClr(float* d_clrmap, float* d_group)
{
int nFeatures = blockDim.x;
int idx = blockIdx.x, idim = threadIdx.x;
unsigned char r = d_baseclr[idim % 6][0], g = d_baseclr[idim % 6][1], b = d_baseclr[idim % 6][2];
float val = d_group[idx*nFeatures + idim]/nFeatures;
atomicAdd(d_clrmap + idx * 3, r*val);
atomicAdd(d_clrmap + idx * 3+1, g*val);
atomicAdd(d_clrmap + idx * 3+2, b*val);
}
int KLTtracker::init(int bsize,int w,int h)
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
std::cout << "maxgridDim" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << std::endl;
std::cout<<"maxThreadsPerBlock:"<<prop.maxThreadsPerBlock<<std::endl;
std::cout << prop.major << "," << prop.minor << std::endl;
}
nFeatures = 1000;
nSearch=3000;
trackBuff = std::vector<FeatBuff>(nFeatures);
isTracking=new int[nFeatures];
for (int i=0;i<nFeatures;i++)
{
trackBuff[i].init(1,100);
isTracking[i]=0;
}
frame_width = w;
frame_height = h;
frameidx=0;
dirvec = new float[nFeatures];
memset(dirvec, 0, nFeatures*sizeof(float));
goodNewPts.init(1,nSearch);
detector= gpu::GoodFeaturesToTrackDetector_GPU(nSearch,0.0001,3,3);
tracker = gpu::PyrLKOpticalFlow();
tracker.winSize=Size(3,3);
tracker.maxLevel=3;
tracker.iters=10;
gpuGray=gpu::GpuMat(frame_height, frame_width, CV_8UC1 );
gpuPreGray=gpu::GpuMat(frame_height, frame_width, CV_8UC1 );
hipMalloc(&d_isnewmat, nSearch*sizeof(unsigned int));
h_isnewmat = (unsigned int*)malloc(nSearch*sizeof(unsigned int));
h_curvec = (float*)malloc(nFeatures*2*sizeof(float));
hipMalloc(&d_curvec, nFeatures * 2 * sizeof(float));
hipMalloc(&d_ofvec, nFeatures* sizeof(ofv));
ofvBuff.init(1, nFeatures);
hipMalloc(&d_neighbor, nFeatures*nFeatures);
h_neighbor = (unsigned char*)malloc(nFeatures*nFeatures);
memset(h_neighbor, 0, nFeatures*nFeatures);
hipMalloc(&d_group, nFeatures*nFeatures*sizeof(float));
h_group = (float *)malloc(nFeatures*nFeatures*sizeof(float));
hipMalloc(&d_netOrder, nFeatures*sizeof(unsigned int));
//hipMalloc(&d_correlation, nFeatures*nFeatures*sizeof(float));
hipMalloc(&d_pairvec, nFeatures*sizeof(ppair));
h_pairvec = (ppair *)malloc(nFeatures*sizeof(ppair));
hipMalloc(&d_netUpdate, nFeatures*nFeatures*sizeof(float));
h_netUpdate = (float*)malloc(nFeatures*nFeatures*sizeof(float));
h_pairN = 0;
hipMemcpyToSymbol(d_pairN, &h_pairN, sizeof(int));
hipMalloc(&d_crossDist, nFeatures*nFeatures*sizeof(float));
h_crossDist = (float *)malloc(nFeatures*nFeatures*sizeof(float));
hipMalloc(&d_clrmap, nFeatures * 3 * sizeof(float));
h_clrmap = (float*)malloc(nFeatures * 3 * sizeof(float));
//hipMalloc(&d_topK, nFeatures * TOPK * sizeof(float));
h_topK = (unsigned int*)malloc(nFeatures * TOPK * sizeof(unsigned int*));
memset(h_topK, 0, nFeatures * TOPK * sizeof(unsigned int*));
std::cout << "inited" << std::endl;
gt_inited = false;
return 1;
}
int KLTtracker::selfinit(unsigned char* framedata)
{
curframedata=framedata;
Mat curframe(frame_height,frame_width,CV_8UC1,framedata);
gpuGray.upload(curframe);
gpuPreGray.upload(curframe);
detector(gpuGray, gpuCorners);
gpuCorners.download(corners);
gpuCorners.copyTo(gpuPrePts);
for (int k = 0; k < nFeatures; k++)
{
Vec2f p = corners.at<Vec2f>(k);
pttmp.x = p[0];//(PntT)(p[0] + 0.5);
pttmp.y = p[1];//(PntT)(p[1]+ 0.5);
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
isTracking[k]=1;
memset(h_group + k*nFeatures, 0, nFeatures*sizeof(float));
h_group[k*nFeatures + k] = 1;
h_curvec[k * 2] = trackBuff[k].cur_frame_ptr->x;
h_curvec[k * 2 + 1] = trackBuff[k].cur_frame_ptr->y;
}
return true;
}
bool KLTtracker::checkTrackMoving(FeatBuff &strk)
{
bool isTrkValid = true;
int Movelen=7,minlen=5,startidx=max(strk.len-Movelen,0);
if(strk.len>Movelen)
{
double maxdist = .0, dtmp = .0,totlen=.0;
FeatPts* aptr = strk.getPtr(startidx), *bptr = aptr;
PntT xa=aptr->x,ya=aptr->y,xb=strk.cur_frame_ptr->x,yb=strk.cur_frame_ptr->y;
double displc=sqrt( pow(xb-xa, 2.0) + pow(yb-ya, 2.0));
if((strk.len -startidx)*0.2>displc)
{
isTrkValid = false;
}
}
return isTrkValid;
}
int KLTtracker::updateAframe(unsigned char* framedata, unsigned char* rgbdata, int fidx)
{
frameidx=fidx;
curframedata=framedata;
gpuGray.copyTo(gpuPreGray);
//gpuPreGray.data = gpuGray.data;
Mat curframe(frame_height,frame_width,CV_8UC1,framedata);
gpuGray.upload(curframe);
tracker.sparse(gpuPreGray, gpuGray, gpuPrePts, gpuNextPts, gpuStatus, &gpuEigenvec);
gpuStatus.download(status);
gpuNextPts.download(nextPts);
gpuPrePts.download(prePts);
detector(gpuGray, gpuCorners);
gpuCorners.download(corners);
hipMemcpy(d_curvec, h_curvec, nFeatures*2*sizeof(float), hipMemcpyHostToDevice);
hipMemset(d_isnewmat, 0, nSearch*sizeof(unsigned int));
hipLaunchKernelGGL(( crossDist), dim3(nSearch), dim3(nFeatures), 0, 0, d_isnewmat, (float *)gpuCorners.data, d_curvec, nSearch, nFeatures);
hipMemcpy(h_isnewmat, d_isnewmat, nSearch*sizeof(unsigned int), hipMemcpyDeviceToHost);
goodNewPts.clear();
for(int i=0;i<nSearch;i++)
{
if (h_isnewmat[i] == 0)
{
goodNewPts.updateAFrame(&i);
}
}
ofvBuff.clear();
int addidx=0,counter=0;
for (int k = 0; k < nFeatures; k++)
{
int statusflag = status.at<int>(k);
Vec2f trkp = nextPts.at<Vec2f>(k);
bool lost=false;
if ( statusflag)
{
Vec2f pre=prePts.at<Vec2f>(k),cur=nextPts.at<Vec2f>(k);
int prex=trackBuff[k].cur_frame_ptr->x, prey=trackBuff[k].cur_frame_ptr->y;
pttmp.x = trkp[0];
pttmp.y = trkp[1];
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
double trkdist=abs(prex-pttmp.x)+abs(prey-pttmp.y),ofdist=abs(pre[0]-cur[0])+abs(pre[1]-cur[1]);
dirvec[k] = 0.5*dirvec[k] + 0.5*sgn(pttmp.y-prey);
isTracking[k]=1;
bool isMoving=checkTrackMoving(trackBuff[k]);
if (!isMoving||(trackBuff[k].len>1 && trkdist>10))
{
lost=true;
isTracking[k]=0;
}
}
else
{
counter++;
lost=true;
isTracking[k]=0;
}
if(lost)
{
trackBuff[k].clear();
dirvec[k] = 0;
if(addidx<goodNewPts.len)
{
int newidx=*(goodNewPts.getPtr(addidx++));
Vec2f cnrp = corners.at<Vec2f>(newidx);
pttmp.x = cnrp[0];
pttmp.y = cnrp[1];
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
nextPts.at<Vec2f>(k)=cnrp;
isTracking[k]=1;
memset(h_group + k*nFeatures, 0, nFeatures*sizeof(float));
h_group[k*nFeatures + k] = 1;
}
}
else
{
if (trackBuff[k].len > 8)
{
ofvtmp.x0 = trackBuff[k].getPtr(trackBuff[k].len - 5)->x;
ofvtmp.y0 = trackBuff[k].getPtr(trackBuff[k].len - 5)->y;
ofvtmp.x1 = trackBuff[k].cur_frame_ptr->x;
ofvtmp.y1 = trackBuff[k].cur_frame_ptr->y;
ofvtmp.len = trackBuff[k].len;
ofvtmp.idx = k;
ofvBuff.updateAFrame(&ofvtmp);
}
}
h_curvec[k * 2] = trackBuff[k].cur_frame_ptr->x;
h_curvec[k * 2 + 1] = trackBuff[k].cur_frame_ptr->y;
}
if (ofvBuff.len > 0)
{
h_pairN = 0;
hipMemset(d_ofvec, 0, nFeatures* sizeof(ofv));
hipMemcpy(d_ofvec, ofvBuff.data, ofvBuff.len*sizeof(ofv), hipMemcpyHostToDevice);
hipMemset(d_neighbor, 0, nFeatures*nFeatures);
hipMemset(d_netOrder, 0, nFeatures*sizeof(unsigned int));
hipMemcpyToSymbol(d_pairN, &h_pairN, sizeof(int));
searchNeighbor << <ofvBuff.len, ofvBuff.len >> >(d_neighbor, d_ofvec, d_pairvec,d_netOrder, nFeatures);
hipMemcpy(h_pairvec, d_pairvec, nFeatures*sizeof(ppair), hipMemcpyDeviceToHost);
hipMemcpy(h_neighbor, d_neighbor, nFeatures*nFeatures, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&h_pairN, d_pairN, sizeof(int));
hipMemcpy(d_group, h_group, nFeatures*nFeatures*sizeof(float), hipMemcpyHostToDevice);
std::cout << "h_pairN:" << h_pairN << std::endl;
//for (int i = 0; i < nFeatures; i++)
//{
//}
hipMemset(d_netUpdate, 0, nFeatures*nFeatures*sizeof(float));
hipLaunchKernelGGL(( calUpdate) , dim3(h_pairN), dim3(nFeatures), 0, 0, d_group, d_pairvec, d_netUpdate);
hipMemcpy(h_netUpdate, d_netUpdate, nFeatures*nFeatures*sizeof(float), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( updateNet) , dim3(nFeatures),dim3(nFeatures), 0, 0, d_group, d_netUpdate, d_netOrder);
hipMemcpy(h_group, d_group, nFeatures*nFeatures*sizeof(float), hipMemcpyDeviceToHost);
/*
float maxval;
for (int i = 0; i < nFeatures; i++)
{
for (int j = 0; j < nFeatures; j++)
{
int ind = (maxval>h_group[i*nFeatures + j]);
maxval = ind*maxval + (1 - ind)*h_group[i*nFeatures + j];
}
}
std::cout <<maxval<< std::endl;
*/
inCross << <nFeatures, nFeatures >> >(d_group, d_crossDist);
hipMemcpy(h_crossDist, d_crossDist, nFeatures*nFeatures*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < nFeatures; i++)
{
std::cout << h_crossDist[i*nFeatures + i] << std::endl;
}
/*
std::cout << std::endl;
hipMemset(d_clrmap, 0, 3 * nFeatures*sizeof(float));
genClr << <nFeatures, nFeatures >> >(d_clrmap, d_group);
hipMemcpy(h_clrmap, d_clrmap, 3*nFeatures*sizeof(float), hipMemcpyDeviceToHost);
float maxr = 0, maxg = 0, maxb = 0;
for (int j = 0; j < nFeatures; j++)
{
int ind = (maxr>h_clrmap[j*3]);
maxr = ind*maxr + (1 - ind)*h_clrmap[j * 3];
ind = (maxg>h_clrmap[j * 3+1]);
maxg = ind*maxg + (1 - ind)*h_clrmap[j * 3+1];
ind = (maxb>h_clrmap[j * 3+2]);
maxb = ind*maxb + (1 - ind)*h_clrmap[j * 3+2];
}
for (int j = 0; j < nFeatures; j++)
{
h_clrmap[j * 3] /= maxr/255;
h_clrmap[j * 3+1] /= maxg/255;
h_clrmap[j * 3+2] /= maxb/255;
//std::cout << h_clrmap[j * 3] << "," << h_clrmap[j * 3 + 1] << "," << h_clrmap[j * 3 + 2] << "," << std::endl;
}
*/
}
gpuPrePts.upload(nextPts);
return 1;
}
|
1ad98260102346642b4ed691cb0f579d46068193.cu
|
#include "trackers/klttracker.h"
#include <cmath>
#include <fstream>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <cuda.h>
//#include <cublas.h>
#include <cuda_runtime.h>
using namespace cv;
using namespace std;
#define PI 3.14159265
#define persA 0.06
#define persB 40
#define minDist 10
#define TOPK 5
//#define persA 0.01
//#define persB 20
//#define minDist 5
Mat corners,prePts,nextPts,status,eigenvec;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector;
cv::gpu::PyrLKOpticalFlow tracker;
cv::gpu::GpuMat gpuGray, gpuPreGray, gpuCorners, gpuPrePts, gpuNextPts,gpuStatus,gpuEigenvec,gpuDenseX,gpuDenseY,gpuDenseXC,gpuDenseYC;
Mat denseX,denseY,denseRGB;
typedef struct
{
int i0, i1;
float correlation;
}ppair, p_ppair;
__device__ int d_pairN[1];
__device__ float maxr[1], maxg[1], maxb[1];
__device__ unsigned char *d_neighbor;
__device__ unsigned int* d_isnewmat, *d_netOrder;
__device__ float* d_distmat, *d_curvec, *d_group, *d_correlation, *d_netUpdate, *d_clrmap, *h_netUpdate, *d_crossDist,*d_topK;
__device__ ofv* d_ofvec;
__device__ ppair* d_pairvec, *h_pairvec;
__device__ unsigned char d_baseclr[6][3]=
{
{ 0, 255, 0 },
{ 0, 0, 255 },
{ 255, 255, 0 },
{ 255, 0, 255 },
{ 0, 255, 255 },
{ 255, 0, 0 },
};
__global__ void crossDist(unsigned int* dst,float* vertical,float* horizon,int h,int w)
{
int x = threadIdx.x,y=blockIdx.x;
if (x < w&&y<h)
{
float xv = vertical[y * 2], yv = vertical[y*2+1],xh=horizon[x*2],yh=horizon[x*2+1];
float dx = xv - xh, dy = yv - yh;
float dist = abs(dx) + abs(dy);
if (dist < minDist)
atomicAdd(dst + y, 1);
}
}
__global__ void searchNeighbor(unsigned char* d_neighbor, ofv* d_ofvec , ppair* d_pairvec,unsigned int* d_netOrder, int nFeatures)
{
int r = blockIdx.x, c = threadIdx.x;
if (r < c)
{
float dx = abs(d_ofvec[r].x1 - d_ofvec[c].x1), dy = abs(d_ofvec[r].y1 - d_ofvec[c].y1);
int yidx = d_ofvec[r].idx, xidx = d_ofvec[c].idx;
float dist = sqrt(dx*dx + dy*dy);
float xmid = (d_ofvec[r].x1 + d_ofvec[c].x1) / 2, ymid = (d_ofvec[r].y1 + d_ofvec[c].y1) / 2;
if (dx < ymid*(persA)+persB && dy < ymid*(persA*1.5) + persB*1.5)
{
d_neighbor[yidx*nFeatures + xidx] = 1;
d_neighbor[xidx*nFeatures + yidx] = 1;
float vx0 = d_ofvec[r].x1 - d_ofvec[r].x0, vx1 = d_ofvec[c].x1 - d_ofvec[c].x0,
vy0 = d_ofvec[r].y1 - d_ofvec[r].y0, vy1 = d_ofvec[c].y1 - d_ofvec[c].y0;
float norm0 = sqrt(vx0*vx0 + vy0*vy0), norm1 = sqrt(vx1*vx1 + vy1*vy1);
float cosine = (vx0*vx1 + vy0*vy1) / norm0 / norm1;
float cor = cosine / (dist / 10 + 0.1);
//if (cosine > 0.5
ppair tmppair;
tmppair.i0 = yidx, tmppair.i1 = xidx, tmppair.correlation = cor;
int arrpos = atomicAdd(d_pairN, 1);
memcpy(d_pairvec + arrpos, &tmppair,sizeof(ppair));
atomicAdd(d_netOrder + yidx,1);
atomicAdd(d_netOrder + xidx, 1);
}
/*
else
d_neighbor[yidx*nFeatures + xidx] = 0;
*/
}
}
__global__ void calUpdate(float* d_group, ppair* d_pairvec, float* d_netUpdate)
{
int nPair = gridDim.x,nFeatures = blockDim.x;
int ipair = blockIdx.x, idim = threadIdx.x;
int i0 = d_pairvec[ipair].i0, i1 = d_pairvec[ipair].i1;
float cor = d_pairvec[ipair].correlation;
//printf("%f\n", cor);
float update0 = d_group[i1*nFeatures + idim] * cor;
float update1 = d_group[i0*nFeatures + idim] * cor;
atomicAdd(d_netUpdate + i0*nFeatures + idim, update0);
atomicAdd(d_netUpdate + i1*nFeatures + idim, update1);
}
__global__ void updateNet(float* d_group, float* d_netUpdate,unsigned int* d_netOrder)
{
int idx = blockIdx.x, nFeatures = blockDim.x;
int dim = threadIdx.x;
int order = d_netOrder[idx];
if (order > 0)
{
float newval = d_netUpdate[idx*nFeatures+dim]/order;
float oldval = d_group[idx*nFeatures + dim];
d_group[idx*nFeatures + dim] = (oldval + newval) / 2;
}
}
__global__ void inCross(float* d_group,float* d_crossDist)
{
int nFeatures = blockDim.x;
int i0 = blockIdx.x, i1 = threadIdx.x;
float val = 0;
__shared__ float group0[1000];
__shared__ float group1[1000];
memcpy(group0, d_group + i0*nFeatures,nFeatures*sizeof(float));
memcpy(group1, d_group + i1*nFeatures, nFeatures*sizeof(float));
for (int i = 0; i < nFeatures; i++)
{
//val += d_group[i0*nFeatures + i] * d_group[i1*nFeatures + i];
val += group0[i] * group1[i];
}
d_crossDist[i0*nFeatures + i1] = val;
}
__global__ void genClr(float* d_clrmap, float* d_group)
{
int nFeatures = blockDim.x;
int idx = blockIdx.x, idim = threadIdx.x;
unsigned char r = d_baseclr[idim % 6][0], g = d_baseclr[idim % 6][1], b = d_baseclr[idim % 6][2];
float val = d_group[idx*nFeatures + idim]/nFeatures;
atomicAdd(d_clrmap + idx * 3, r*val);
atomicAdd(d_clrmap + idx * 3+1, g*val);
atomicAdd(d_clrmap + idx * 3+2, b*val);
}
int KLTtracker::init(int bsize,int w,int h)
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
std::cout << "maxgridDim" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << std::endl;
std::cout<<"maxThreadsPerBlock:"<<prop.maxThreadsPerBlock<<std::endl;
std::cout << prop.major << "," << prop.minor << std::endl;
}
nFeatures = 1000;
nSearch=3000;
trackBuff = std::vector<FeatBuff>(nFeatures);
isTracking=new int[nFeatures];
for (int i=0;i<nFeatures;i++)
{
trackBuff[i].init(1,100);
isTracking[i]=0;
}
frame_width = w;
frame_height = h;
frameidx=0;
dirvec = new float[nFeatures];
memset(dirvec, 0, nFeatures*sizeof(float));
goodNewPts.init(1,nSearch);
detector= gpu::GoodFeaturesToTrackDetector_GPU(nSearch,0.0001,3,3);
tracker = gpu::PyrLKOpticalFlow();
tracker.winSize=Size(3,3);
tracker.maxLevel=3;
tracker.iters=10;
gpuGray=gpu::GpuMat(frame_height, frame_width, CV_8UC1 );
gpuPreGray=gpu::GpuMat(frame_height, frame_width, CV_8UC1 );
cudaMalloc(&d_isnewmat, nSearch*sizeof(unsigned int));
h_isnewmat = (unsigned int*)malloc(nSearch*sizeof(unsigned int));
h_curvec = (float*)malloc(nFeatures*2*sizeof(float));
cudaMalloc(&d_curvec, nFeatures * 2 * sizeof(float));
cudaMalloc(&d_ofvec, nFeatures* sizeof(ofv));
ofvBuff.init(1, nFeatures);
cudaMalloc(&d_neighbor, nFeatures*nFeatures);
h_neighbor = (unsigned char*)malloc(nFeatures*nFeatures);
memset(h_neighbor, 0, nFeatures*nFeatures);
cudaMalloc(&d_group, nFeatures*nFeatures*sizeof(float));
h_group = (float *)malloc(nFeatures*nFeatures*sizeof(float));
cudaMalloc(&d_netOrder, nFeatures*sizeof(unsigned int));
//cudaMalloc(&d_correlation, nFeatures*nFeatures*sizeof(float));
cudaMalloc(&d_pairvec, nFeatures*sizeof(ppair));
h_pairvec = (ppair *)malloc(nFeatures*sizeof(ppair));
cudaMalloc(&d_netUpdate, nFeatures*nFeatures*sizeof(float));
h_netUpdate = (float*)malloc(nFeatures*nFeatures*sizeof(float));
h_pairN = 0;
cudaMemcpyToSymbol(d_pairN, &h_pairN, sizeof(int));
cudaMalloc(&d_crossDist, nFeatures*nFeatures*sizeof(float));
h_crossDist = (float *)malloc(nFeatures*nFeatures*sizeof(float));
cudaMalloc(&d_clrmap, nFeatures * 3 * sizeof(float));
h_clrmap = (float*)malloc(nFeatures * 3 * sizeof(float));
//cudaMalloc(&d_topK, nFeatures * TOPK * sizeof(float));
h_topK = (unsigned int*)malloc(nFeatures * TOPK * sizeof(unsigned int*));
memset(h_topK, 0, nFeatures * TOPK * sizeof(unsigned int*));
std::cout << "inited" << std::endl;
gt_inited = false;
return 1;
}
int KLTtracker::selfinit(unsigned char* framedata)
{
curframedata=framedata;
Mat curframe(frame_height,frame_width,CV_8UC1,framedata);
gpuGray.upload(curframe);
gpuPreGray.upload(curframe);
detector(gpuGray, gpuCorners);
gpuCorners.download(corners);
gpuCorners.copyTo(gpuPrePts);
for (int k = 0; k < nFeatures; k++)
{
Vec2f p = corners.at<Vec2f>(k);
pttmp.x = p[0];//(PntT)(p[0] + 0.5);
pttmp.y = p[1];//(PntT)(p[1]+ 0.5);
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
isTracking[k]=1;
memset(h_group + k*nFeatures, 0, nFeatures*sizeof(float));
h_group[k*nFeatures + k] = 1;
h_curvec[k * 2] = trackBuff[k].cur_frame_ptr->x;
h_curvec[k * 2 + 1] = trackBuff[k].cur_frame_ptr->y;
}
return true;
}
bool KLTtracker::checkTrackMoving(FeatBuff &strk)
{
bool isTrkValid = true;
int Movelen=7,minlen=5,startidx=max(strk.len-Movelen,0);
if(strk.len>Movelen)
{
double maxdist = .0, dtmp = .0,totlen=.0;
FeatPts* aptr = strk.getPtr(startidx), *bptr = aptr;
PntT xa=aptr->x,ya=aptr->y,xb=strk.cur_frame_ptr->x,yb=strk.cur_frame_ptr->y;
double displc=sqrt( pow(xb-xa, 2.0) + pow(yb-ya, 2.0));
if((strk.len -startidx)*0.2>displc)
{
isTrkValid = false;
}
}
return isTrkValid;
}
int KLTtracker::updateAframe(unsigned char* framedata, unsigned char* rgbdata, int fidx)
{
frameidx=fidx;
curframedata=framedata;
gpuGray.copyTo(gpuPreGray);
//gpuPreGray.data = gpuGray.data;
Mat curframe(frame_height,frame_width,CV_8UC1,framedata);
gpuGray.upload(curframe);
tracker.sparse(gpuPreGray, gpuGray, gpuPrePts, gpuNextPts, gpuStatus, &gpuEigenvec);
gpuStatus.download(status);
gpuNextPts.download(nextPts);
gpuPrePts.download(prePts);
detector(gpuGray, gpuCorners);
gpuCorners.download(corners);
cudaMemcpy(d_curvec, h_curvec, nFeatures*2*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_isnewmat, 0, nSearch*sizeof(unsigned int));
crossDist<<<nSearch, nFeatures>>>(d_isnewmat, (float *)gpuCorners.data, d_curvec, nSearch, nFeatures);
cudaMemcpy(h_isnewmat, d_isnewmat, nSearch*sizeof(unsigned int), cudaMemcpyDeviceToHost);
goodNewPts.clear();
for(int i=0;i<nSearch;i++)
{
if (h_isnewmat[i] == 0)
{
goodNewPts.updateAFrame(&i);
}
}
ofvBuff.clear();
int addidx=0,counter=0;
for (int k = 0; k < nFeatures; k++)
{
int statusflag = status.at<int>(k);
Vec2f trkp = nextPts.at<Vec2f>(k);
bool lost=false;
if ( statusflag)
{
Vec2f pre=prePts.at<Vec2f>(k),cur=nextPts.at<Vec2f>(k);
int prex=trackBuff[k].cur_frame_ptr->x, prey=trackBuff[k].cur_frame_ptr->y;
pttmp.x = trkp[0];
pttmp.y = trkp[1];
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
double trkdist=abs(prex-pttmp.x)+abs(prey-pttmp.y),ofdist=abs(pre[0]-cur[0])+abs(pre[1]-cur[1]);
dirvec[k] = 0.5*dirvec[k] + 0.5*sgn(pttmp.y-prey);
isTracking[k]=1;
bool isMoving=checkTrackMoving(trackBuff[k]);
if (!isMoving||(trackBuff[k].len>1 && trkdist>10))
{
lost=true;
isTracking[k]=0;
}
}
else
{
counter++;
lost=true;
isTracking[k]=0;
}
if(lost)
{
trackBuff[k].clear();
dirvec[k] = 0;
if(addidx<goodNewPts.len)
{
int newidx=*(goodNewPts.getPtr(addidx++));
Vec2f cnrp = corners.at<Vec2f>(newidx);
pttmp.x = cnrp[0];
pttmp.y = cnrp[1];
pttmp.t = frameidx;
trackBuff[k].updateAFrame(&pttmp);
nextPts.at<Vec2f>(k)=cnrp;
isTracking[k]=1;
memset(h_group + k*nFeatures, 0, nFeatures*sizeof(float));
h_group[k*nFeatures + k] = 1;
}
}
else
{
if (trackBuff[k].len > 8)
{
ofvtmp.x0 = trackBuff[k].getPtr(trackBuff[k].len - 5)->x;
ofvtmp.y0 = trackBuff[k].getPtr(trackBuff[k].len - 5)->y;
ofvtmp.x1 = trackBuff[k].cur_frame_ptr->x;
ofvtmp.y1 = trackBuff[k].cur_frame_ptr->y;
ofvtmp.len = trackBuff[k].len;
ofvtmp.idx = k;
ofvBuff.updateAFrame(&ofvtmp);
}
}
h_curvec[k * 2] = trackBuff[k].cur_frame_ptr->x;
h_curvec[k * 2 + 1] = trackBuff[k].cur_frame_ptr->y;
}
if (ofvBuff.len > 0)
{
h_pairN = 0;
cudaMemset(d_ofvec, 0, nFeatures* sizeof(ofv));
cudaMemcpy(d_ofvec, ofvBuff.data, ofvBuff.len*sizeof(ofv), cudaMemcpyHostToDevice);
cudaMemset(d_neighbor, 0, nFeatures*nFeatures);
cudaMemset(d_netOrder, 0, nFeatures*sizeof(unsigned int));
cudaMemcpyToSymbol(d_pairN, &h_pairN, sizeof(int));
searchNeighbor << <ofvBuff.len, ofvBuff.len >> >(d_neighbor, d_ofvec, d_pairvec,d_netOrder, nFeatures);
cudaMemcpy(h_pairvec, d_pairvec, nFeatures*sizeof(ppair), cudaMemcpyDeviceToHost);
cudaMemcpy(h_neighbor, d_neighbor, nFeatures*nFeatures, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&h_pairN, d_pairN, sizeof(int));
cudaMemcpy(d_group, h_group, nFeatures*nFeatures*sizeof(float), cudaMemcpyHostToDevice);
std::cout << "h_pairN:" << h_pairN << std::endl;
//for (int i = 0; i < nFeatures; i++)
//{
//}
cudaMemset(d_netUpdate, 0, nFeatures*nFeatures*sizeof(float));
calUpdate <<<h_pairN, nFeatures>>>(d_group, d_pairvec, d_netUpdate);
cudaMemcpy(h_netUpdate, d_netUpdate, nFeatures*nFeatures*sizeof(float), cudaMemcpyDeviceToHost);
updateNet <<<nFeatures,nFeatures>>>(d_group, d_netUpdate, d_netOrder);
cudaMemcpy(h_group, d_group, nFeatures*nFeatures*sizeof(float), cudaMemcpyDeviceToHost);
/*
float maxval;
for (int i = 0; i < nFeatures; i++)
{
for (int j = 0; j < nFeatures; j++)
{
int ind = (maxval>h_group[i*nFeatures + j]);
maxval = ind*maxval + (1 - ind)*h_group[i*nFeatures + j];
}
}
std::cout <<maxval<< std::endl;
*/
inCross << <nFeatures, nFeatures >> >(d_group, d_crossDist);
cudaMemcpy(h_crossDist, d_crossDist, nFeatures*nFeatures*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < nFeatures; i++)
{
std::cout << h_crossDist[i*nFeatures + i] << std::endl;
}
/*
std::cout << std::endl;
cudaMemset(d_clrmap, 0, 3 * nFeatures*sizeof(float));
genClr << <nFeatures, nFeatures >> >(d_clrmap, d_group);
cudaMemcpy(h_clrmap, d_clrmap, 3*nFeatures*sizeof(float), cudaMemcpyDeviceToHost);
float maxr = 0, maxg = 0, maxb = 0;
for (int j = 0; j < nFeatures; j++)
{
int ind = (maxr>h_clrmap[j*3]);
maxr = ind*maxr + (1 - ind)*h_clrmap[j * 3];
ind = (maxg>h_clrmap[j * 3+1]);
maxg = ind*maxg + (1 - ind)*h_clrmap[j * 3+1];
ind = (maxb>h_clrmap[j * 3+2]);
maxb = ind*maxb + (1 - ind)*h_clrmap[j * 3+2];
}
for (int j = 0; j < nFeatures; j++)
{
h_clrmap[j * 3] /= maxr/255;
h_clrmap[j * 3+1] /= maxg/255;
h_clrmap[j * 3+2] /= maxb/255;
//std::cout << h_clrmap[j * 3] << "," << h_clrmap[j * 3 + 1] << "," << h_clrmap[j * 3 + 2] << "," << std::endl;
}
*/
}
gpuPrePts.upload(nextPts);
return 1;
}
|
2c88ffcd960000d200699f7e6d90ff490ecdde80.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .optix.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
float r0, r1, r2, r3;
if (sampleIdx < 256)
r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ),
r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ),
r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ),
r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 );
else
r0 = RandomFloat( seed ), r1 = RandomFloat( seed ),
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
O = RandomPointOnLens( r2, r3 );
float3 posOnPixel;
if (params.distortion == 0)
{
const float u = ((float)sx + r0) * (1.0f / params.scrsize.x);
const float v = ((float)sy + r1) * (1.0f / params.scrsize.y);
posOnPixel = params.p1 + u * params.right + v * params.up;
}
else
{
const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f;
const float rr = tx * tx + ty * ty;
const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr);
const float theta = atan2f( tx, ty );
const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x;
const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y;
posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y);
}
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 8) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint pixelIdx = __float_as_uint( O4.w ) >> 8;
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupPhotonRay( const uint rayIdx )
{
const float4 O4 = params.pathStates[rayIdx * 3];
const float4 D4 = params.pathStates[rayIdx * 3 + 1];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.pathStates[rayIdx * 3 + 0].w = __uint_as_float( u3 /* intersection distance */ );
params.pathStates[rayIdx * 3 + 2].y = __uint_as_float( u1 /* inst_idx */ );
params.pathStates[rayIdx * 3 + 2].z = __uint_as_float( u2 /* prim_idx */ );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
switch (params.phase)
{
case Params::SPAWN_PRIMARY: // primary rays
setupPrimaryRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_SHADOW: // secondary rays
generateShadowRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_SECONDARY:
setupSecondaryRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_PHOTONS:
setupPhotonRay( idx.x );
break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF
|
2c88ffcd960000d200699f7e6d90ff490ecdde80.cu
|
/* .optix.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
float r0, r1, r2, r3;
if (sampleIdx < 256)
r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ),
r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ),
r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ),
r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 );
else
r0 = RandomFloat( seed ), r1 = RandomFloat( seed ),
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
O = RandomPointOnLens( r2, r3 );
float3 posOnPixel;
if (params.distortion == 0)
{
const float u = ((float)sx + r0) * (1.0f / params.scrsize.x);
const float v = ((float)sy + r1) * (1.0f / params.scrsize.y);
posOnPixel = params.p1 + u * params.right + v * params.up;
}
else
{
const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f;
const float rr = tx * tx + ty * ty;
const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr);
const float theta = atan2f( tx, ty );
const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x;
const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y;
posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y);
}
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 8) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint pixelIdx = __float_as_uint( O4.w ) >> 8;
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupPhotonRay( const uint rayIdx )
{
const float4 O4 = params.pathStates[rayIdx * 3];
const float4 D4 = params.pathStates[rayIdx * 3 + 1];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.pathStates[rayIdx * 3 + 0].w = __uint_as_float( u3 /* intersection distance */ );
params.pathStates[rayIdx * 3 + 2].y = __uint_as_float( u1 /* inst_idx */ );
params.pathStates[rayIdx * 3 + 2].z = __uint_as_float( u2 /* prim_idx */ );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
switch (params.phase)
{
case Params::SPAWN_PRIMARY: // primary rays
setupPrimaryRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_SHADOW: // secondary rays
generateShadowRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_SECONDARY:
setupSecondaryRay( idx.x + idx.y * params.scrsize.x, stride );
break;
case Params::SPAWN_PHOTONS:
setupPhotonRay( idx.x );
break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF
|
b23eb84a7c55d799756b4a7b2dae8754005f2226.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <distance/distance.cuh>
#include <iostream>
#include <score/scores.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Score {
class TrustworthinessScoreTest : public ::testing::Test {
protected:
void basicTest() {
std::vector<float> X = {
5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037,
-0.6330313, 4.6920023, -0.79210913, 0.6106314, 2.1210914,
5.919943, -8.43784, -6.4819884, 0.41001374, -6.1052523,
-4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555,
-0.4719201, -7.53353, 7.6789393, -1.4959852, -5.5977287,
-9.564147, 1.2902534, 3.559834, -6.7659483, 8.265964,
4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452,
4.4150834, -3.094395, -4.426273, 9.584571, -5.64133,
6.6209483, 7.4044604, 3.9620576, 5.639907, 10.33007,
-0.8792053, 5.143776, -7.464049, 1.2448754, -5.6300974,
5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336,
1.9681473, -6.9083176, 6.404673, 0.07186685, 9.0994835,
8.51037, -8.986389, 0.40534487, 2.115397, 4.086756,
1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078,
7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153,
7.0408177, -3.6069896, -9.971769, 4.4075623, 7.9063697,
2.559074, 4.323717, 1.6867131, -1.1576937, -9.893141,
-3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257,
3.4460473, 9.759119, -5.4680476, -4.722435, -8.032619,
-1.4598992, 4.227361, 3.135568, 1.1950601, 1.1982028,
6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965,
-5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574,
1.4582269, -5.5587835, 8.031556, -0.26328218, -5.2591386,
-9.262641, 2.8691363, 5.299787, -9.209455, 8.523085,
5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462,
4.067106, -1.5912259, -3.2345476, 8.042973, -3.6364832,
4.1242137, 9.886953, 5.4743724, 6.3058076, 9.369645,
-0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944,
3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722,
2.802145, -7.748032, 5.8461113, -0.34215945, 11.298865,
1.4107164, -9.949621, -1.6257563, -10.655836, 2.4528909,
1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459,
2.631155, 3.964414, 2.8769252, 0.04198391, -0.16993195,
3.6747139, -2.8377378, 6.1782537, 10.759618, -4.5642614,
-8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334,
-7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536,
0.72271067, -10.511495, -0.78634536, -10.661714, 2.9376361,
1.9148129, 6.22859, 0.26264945, 8.028384, 6.8743043,
0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785,
5.2778087, -1.7140163, 9.217541, 8.602799, -2.6537218,
-7.8377395, 1.1244944, 5.4540544, -0.38506773, 3.9885726,
-10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547,
8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257,
1.0516582, -2.0707326, -0.7615968, -8.561018, -3.7831352,
10.300297, 5.332594, -6.5880876, -4.2508664, 1.7985519,
5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974,
1.7206005, 4.890457, 3.7264557, 0.4428284, -9.922455,
-4.250455, -6.4410596, -2.107994, -1.4109765, -6.1325397,
0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383,
-10.792166, 8.378851, 10.802716, 9.848448, -9.188757,
1.3151443, 1.9971865, -2.521849, 4.3268294, -7.775683,
-2.2902298, 3.0824065, -7.17559, 9.6100855, 7.3965735,
-10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839,
-7.4045196, -1.7992063, -4.0394845, 5.2471714, -2.250571,
2.528036, -8.343515, -2.2374575, -10.019771, 0.73371273,
3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571,
0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701,
3.7870514, -4.1340904, 7.202998, 9.736904, -3.005512,
-8.920467, 1.1228397, 6.2598724, 1.2812365, 4.5442104,
-8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397,
1.2252625, -10.1986475, -1.4409319, -10.013967, 3.9071581,
1.683064, 4.877419, 1.6570637, 9.559105, 7.3546534,
0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978,
3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583,
-8.039611, 2.3060374, 5.134666, 1.0458113, 6.0190983,
-9.143728, 0.99048865, 9.210842, 6.670241, -5.9614363,
0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542,
-9.28306, 9.1838665, 9.318644, 7.189082, -11.092555,
1.0320464, 3.882163, 0.10953151, 7.9029684, -6.9068265,
-1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663,
-11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728,
-10.357126, -0.9291848, -3.4193344, 3.1289792, -2.5030103,
6.772719, 11.457757, -4.2125936, -6.684548, -4.7611327,
3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314,
5.66614, 7.501461, 5.072407, 6.636537, 8.990381,
-0.2559256, 4.737867, -6.2149944, 2.535682, -5.5484023,
5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467,
1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009,
0.05066403, -9.645226, -2.2658763, -9.518178, 4.493372,
2.3232365, 2.1659086, 0.42507997, 8.360246, 8.23535,
2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741,
4.359464, -4.6073823, 7.83441, 8.958755, -3.4690795,
-9.182282, 1.2478025, 5.6311107, -1.2408862, 3.6316886,
-8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032,
0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995,
-9.607528, 7.2899456, 8.83158, 8.908199, -10.300263,
1.1451302, 3.7871468, -0.97040755, 5.7664757, -8.9688,
-2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903,
-12.066902, 6.301596, -5.0419583, -8.228695, 2.4879954,
-8.918582, -3.7434099, -4.1593685, 3.7431836, -1.1704745,
0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777,
-9.554555, 9.091726, 11.477966, 7.630937, -10.450911,
1.9205878, 5.358983, -0.44546837, 6.7611346, -9.74753,
-0.5939732, 3.8892255, -6.437991, 10.294727, 5.6723895,
-10.7883, 6.192348, -5.293862, -10.811491, 1.0194173,
-7.074576, -3.192368, -2.5231771, 4.2791643, -0.53309685,
0.501366, 9.636625, 7.710316, -6.4219728, 1.0975566,
-8.218886, 6.9011984, 9.873679, 8.903804, -9.316832,
1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224,
-3.2196746, 3.1303136, -7.285681, 9.041425, 5.6417427,
-9.93667, 5.7548947, -5.113397, -8.544622, 4.182665,
-7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856,
6.709082, -8.461194, 1.2666026, 4.8770437, 2.6955879,
3.0340345, -1.1614609, -3.536341, -7.090382, -5.36146,
9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037,
8.782348, -3.316743, -8.65248, 1.6802986, 8.186188,
2.1783829, 4.931278, 4.158475, 1.4033595, -11.320101,
-3.7084908, -6.740436, -2.5555193, -1.0451177, -6.5569925,
0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463,
-8.056692, 8.493993, 7.6401625, 8.812983, -9.377281,
2.4369764, 3.1766508, 0.6300803, 5.6666765, -7.913654,
-0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256,
-9.626183, 8.347351, -3.605006, -7.923387, 1.1024277,
-8.705793, -2.5151258, -2.5066147, 4.0515003, -2.060757,
6.2635093, 8.286584, -6.0509276, -6.76452, -3.1158175,
1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877,
6.093071, 7.4911637, 4.51018, 4.8425875, 9.211085,
-2.4386222, 4.5830803, -5.6079445, 2.3713675, -4.0707507,
3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796,
5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532,
1.9340848, 8.18833, 8.16617, -9.42086, -0.3388326,
-9.659727, 8.243045, 8.099073, 8.439428, -7.038694,
2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196,
-1.553731, 4.08758, -6.6922374, 9.50525, 4.026735,
-9.243538, 7.2740564, -3.9319072, -6.3228955, 1.6693478,
-7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407,
3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089,
-2.519147, 4.225298, 2.4105988, -0.4344186, 0.53441775,
5.2836394, -8.2816105, -4.996147, -1.6870759, -7.8543897,
-3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813,
1.0709786, -8.866012, 8.427324, -1.1755563, -5.789216,
-8.197835, 5.3342214, 6.0646234, -6.8975716, 7.717031,
3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359,
-1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088,
7.652741, -9.9595585, -5.989757, 0.10119354, -7.935407,
-5.792786, -5.22783, -4.318978, 5.414037, -6.4621663,
1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314,
-8.326418, 4.9049683, 5.4442496, -6.403739, 7.5822453,
7.0972915, -9.072851, -0.23897195, 1.7662339, 5.3096304,
1.983179, -2.222645, -0.34700772, -9.094717, -6.107907,
9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662,
8.528821, -3.7299833, -9.341266, 2.608542, 9.108706,
0.7978509, 4.2488184, 2.454484, 0.9446999, -10.106636,
-3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582,
9.324853, -7.9020953, 2.0910501, 2.2896829, 1.6790711,
1.3159255, -3.5258796, 1.8898442, -8.105812, -4.924962,
8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088,
7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636,
1.8511922, 3.019749, 3.1227696, 0.4822102, -10.021213,
-3.530504, -6.225959, -3.0029628, -1.7881511, -7.3879776,
1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836,
-1.5284524, 4.0535855, 3.112011, 0.10340207, -0.5429599,
6.67026, -9.155924, -4.924038, 0.64248866, -10.0103655,
-3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605,
4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948,
-7.822478, 2.1421318, 4.66659, -9.701272, 9.549149,
0.8998125, -8.651497, -0.56899565, -8.639817, 2.3088377,
2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639,
2.8440373, 6.2043204, 4.433932, 0.6320018, -1.8179281,
5.09452, -1.5741565, 8.153934, 8.744339, -3.6945698,
-8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066,
-7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542,
9.477355, -8.622195, -0.23191702, 2.0485356, 3.9379985,
1.5916302, -1.4516805, -0.0843819, -7.8554378, -5.88308,
7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592,
6.160884, -3.631315, -8.333449, 2.770595, 7.8495173,
3.3331623, 4.940415, 3.6207345, -0.037517, -11.034698,
-3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867,
7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264,
2.6052725, -4.1224284, -0.89305353, -6.8267674, -4.9715133,
8.880253, 5.6994023, -5.9695024, -4.9181266, 1.3017995,
7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529,
0.93840516, 4.2070026, 6.159839, 0.91979957, -8.706724,
-4.317946, -6.6823545, -3.0388, -2.464262, -7.3716645,
1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049,
-2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997,
7.229751, -9.742043, -8.08724, 1.2426697, -7.9230795,
-3.3162494, -7.129571, -3.5488048, 7.4701195, -5.2357526,
0.5917681, -6.272206, 6.342328, -2.909731, -4.991607,
-8.845513, 3.3228495, 7.033246, -7.8180246, 8.214469,
6.3910093, 9.185153, -6.20472, -7.713809, -3.8481297,
3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121,
3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594,
-3.014036, 5.182494, -5.8806014, 2.5312455, -5.9926524,
4.474469, 6.3717875, 6.993105, 6.493093, -8.935534,
3.004074, -8.055647, 8.315765, -1.3026813, 8.250377,
0.02606229, 6.8508425, 9.655665, -7.0116496, -0.41060972,
-10.049198, 7.897801, 6.7791023, 8.3362, -9.821014,
2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123,
-3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307,
-9.704417, 4.3817043, -3.9841619, -8.111647, 1.1883026,
-8.115312, -2.9240117, -5.8879666, 4.20928, -0.3587938,
6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343,
3.082544, -3.477687, -1.3768549, -7.4922366, -3.756631,
10.039836, 3.6670392, -5.9761434, -4.4728765, 3.244255,
7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773,
0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124,
-5.246805, -6.5283823, -4.579547, -2.42714, -6.709197,
2.7782338, 7.33353, -6.454507, -2.9929368, -7.8362985,
-2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365,
6.9631333, -9.291356, -8.23837, -0.34263706, -8.275113,
-2.8454232, -5.0864096, -2.681942, 7.5450225, -6.2517986,
0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943,
-7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079,
1.4897474, 6.7217417, -4.2459426, -4.114431, -8.375707,
-2.143264, 5.6972933, 1.5574739, 0.39375135, 1.7930849,
5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536,
-5.2620406, -5.4643164, -3.185536, 6.620315, -7.065227,
1.0524757, -6.125088, 5.7126627, -1.6161644, -3.852159,
-9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405,
1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685,
-2.1121511, 5.1147075, 1.6128504, 2.0064135, 1.0544407,
6.0038295, -7.8282537, -4.801278, 0.32349443, -8.0649805,
-4.372714, -5.61336, -5.21394, 8.176595, -5.4753284,
1.7800134, -8.267283, 7.2133374, -0.16594432, -6.317046,
-9.490406, 4.1261597, 5.473317, -7.7551675, 7.007468,
7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803,
1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394,
9.632604, 5.7492557, -5.9014316, -3.1632116, 2.340859,
8.708098, -3.1255999, -8.848661, 4.5612836, 8.455157,
0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823,
-3.0331545, -8.269506, -2.82415, -0.9411246, -5.993506,
2.1618164, -8.716055, -0.7432543, -10.255819, 3.095418,
2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814,
0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952,
3.9244103, -2.117118, 7.6977615, 8.759009, -4.2157164,
-9.136053, 3.247858, 4.668686, 0.76162136, 5.3833632,
-9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507,
2.173508, -9.038242, -1.3666698, -9.819077, 0.37825826,
2.3898845, 4.2440815, 1.9161536, 7.24787, 6.9124637,
1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454,
5.638998, -2.497932, 8.342559, 8.586319, -2.9069402,
-7.6387944, 3.5975037, 4.4115705, 0.41506064, 4.9078383,
-9.68327, 1.8159529, 9.744613, 8.40622, -4.495336,
9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846,
2.652022, -2.7495477, 0.2528986, -8.268324, -6.004913,
10.428784, 6.6580734, -5.537176, -1.7177434, 2.7504628,
6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654,
2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745,
-4.1879907, -7.172832, -4.1843605, -0.03646783, -5.419406,
6.958486, 11.011111, -7.1821184, -7.956423, -3.408451,
4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208,
5.967827, 8.433518, 4.660108, 5.5657144, 9.964243,
-1.3515275, 6.404833, -6.4805903, 2.4379845, -6.0816774,
1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596,
3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529,
4.362273, 9.231461, -6.3382263, -7.659, -3.461823,
4.71463, 0.17817476, -3.685746, 7.2962036, -4.6489477,
5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103,
-3.900459, 6.844054, -7.0886965, -0.05019227, -8.233724,
5.5808983, 6.374517, 8.321048, 7.969449, -7.3478637,
1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739,
2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157,
1.814725, 5.311151, 1.4831505, 7.8483663, 7.257948,
1.395786, 6.417756, 5.376912, 0.59505713, 0.00062552,
3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605,
-8.466229, 1.904205, 5.6338267, -0.52567476, 5.59736,
-8.361799, 0.5009981, 8.460681, 7.3891273, -3.5272243,
5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836,
3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563,
5.677401, 7.497946, 5.662994, 5.414262, 8.566503,
-2.5530663, 7.1032815, -6.0612082, 1.3419591, -4.9595256,
4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043,
3.3240774, -9.379446, 6.838661, -0.81241214, 8.694813,
0.79141915, 7.632467, 8.575382, -8.533798, 0.28954387,
-7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289,
0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131,
-2.1792977, 2.4729247, -6.1917787, 10.373678, 7.6549597,
-8.809486, 5.5657206, -3.3169382, -8.042887, 2.0874746,
-7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814,
1.1726758, 7.4618697, 6.9483604, -8.469206, 0.7401797,
-10.318176, 8.384557, 10.5476265, 9.146971, -9.250223,
0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598,
-1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966,
-11.249811, 8.359293, -1.9445792, -7.7393436, -0.3931331,
-8.824441, -2.5995944, -2.5714035, 4.140213, -3.6863053,
5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704,
2.5179656, -1.4543481, -2.2703636, 7.010597, -3.6436229,
6.753862, 7.4129915, 7.1406755, 5.653706, 9.5445175,
0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123,
4.171763, 5.3747005, 6.341021, 7.456738, -8.231657,
2.763487, -9.208167, 6.676799, -1.1957736, 10.062605,
4.0975976, 7.312957, -2.4981596, -2.9658387, -8.150425,
-2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015,
5.8556347, -7.8481026, -5.9913163, -0.02840613, -9.937289,
-1.0486673, -5.2340155, -3.83912, 7.7165728, -8.409944,
0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131,
-8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484,
5.262654, 8.774514, -4.7603083, -7.2096143, -4.437014,
3.6080024, -1.624254, -4.2787876, 8.880863, -4.8984556,
5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042,
-1.2006199, 5.393288, -5.6455317, 0.7829499, -4.0338907,
2.479272, 6.5080743, 8.582535, 7.0097537, -6.9823785,
3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443,
-0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029,
1.6800792, 5.778404, 1.7269998, 7.1975236, 7.7258267,
2.7632928, 5.3399253, 3.4650044, 0.01971426, -1.6468811,
4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048,
-7.0344677, 1.2911975, 5.950357, 0.19028673, 4.657226,
-8.199647, 2.246055, 8.989509, 5.3101015, -4.2400866};
std::vector<float> X_embedded = {
-0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503,
-0.29751351, -0.01072748, -0.21353109, -0.54769957, -0.55086273,
0.37093949, -0.12714292, -0.06639574, -0.36098689, -0.13060696,
-0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146,
-0.19595343, 0.08900568, -0.04813879, -0.06563424, -0.42655188,
-0.69014251, 0.51459783, -0.1942696, -0.07767916, -0.6119386,
0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622,
-0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941,
-0.01961523, -0.14147359, 0.41414487, -0.8512944, -0.61199242,
-0.18586016, 0.14024924, -0.41635606, -0.02890144, 0.1065347,
0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046,
-0.53189689, -0.98987544, -0.67918193, 0.41787854, -0.20878236,
-0.06612862, 0.03502904, -0.03765266, -0.0980606, -0.00971657,
0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805,
0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055,
-0.88302094, -0.80197543, 0.03548668, 0.02775662, -0.54374295,
0.03379983, 0.00923623, 0.29320273, -1.05263519, -0.93360096,
0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651,
0.36450726, -1.22978747, -0.83822101, -0.18796451, 0.34888434,
-0.3801491, -0.45327303, -0.59747899, 0.39697698, -0.15616602,
-0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619,
-0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121,
-0.0141237, -0.63747931, 0.04805023, 0.34190539, 0.50725192,
-1.17861414, -0.74641538, -0.09333111, 0.27992678, -0.56214809,
0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908,
0.10957897, 0.27983218, -0.62088525, 0.0410459, 0.23973398,
0.40960434, -1.14183664, -0.83321381, 0.02149482, 0.21720445,
-0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214,
-0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234,
0.36154974, 0.1822421, -1.15837133, -0.92209691, -0.0501582,
0.08535917, -0.54003763, -1.08675635, -1.04009593, 0.09408128,
0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001,
0.04034991, 0.1814747, 0.36906669, -1.13532007, -0.8852452,
0.0782818, 0.16825101, -0.50301319, -0.29128098, -0.65341312,
0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344,
-0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813,
-0.1118066, -0.47453368, 0.06347904, -0.23796193, -1.02682328,
-0.47594091, 0.39515916, -0.2782529, -0.16566519, 0.08063579,
0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546,
-0.11806234, 0.00356161, 0.11513405, -0.14213292, 0.04102662,
-0.36622161, -0.73686272, 0.48323864, -0.27338892, -0.14203401,
-0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361,
0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686,
-0.74530888, -0.9354887, -0.5027945, 0.38170099, -0.15547098,
0.00677824, -0.04677663, -0.13541745, 0.07253501, -0.97933143,
-0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441,
-0.08101331, 0.00630076, -0.16881248, -0.67989182, 0.46083611,
-0.43910736, -0.29321918, -0.38735861, 0.07669903, -0.29749861,
-0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747,
-0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585,
0.38848072, -1.06653547, -0.64079332, -0.47378591, 0.43195483,
-0.04856951, -0.9840439, -0.70610428, 0.34028092, -0.2089237,
-0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428,
-1.24533033, 0.48944879, 0.0578458, 0.26708388, -0.90321028,
0.35377088, -0.36791429, -0.35382384, -0.52748734, 0.42854419,
-0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228,
-0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905,
-0.46334973, -0.03123853, -0.11303604, -0.87073672, -0.45441297,
0.41825858, -0.25303507, -0.21845073, 0.10248682, -0.11045569,
-0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019,
-0.83789682, 0.02995787, 0.16843079, -0.53874511, 0.03056994,
0.17877036, 0.49632853, -1.03276777, -0.74778616, -0.03971953,
0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449,
-0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126,
-0.4749442, -0.56964815, 0.39389887, -0.19248079, -0.04161081,
-0.38728487, -0.20341556, -0.12656988, -0.35949609, -0.46137866,
0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227,
-0.10242701, -0.06792886, 0.09689897, 0.33259571, -0.98854214,
-0.84444433, 0.00673901, 0.13457057, -0.43145794, -0.51500046,
-0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595,
-0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913,
-1.05619383, -0.71218503, -0.02197581, 0.26422262, -0.34765676,
0.06601537, 0.21712676, 0.34723559, -1.20982027, -0.95646334,
0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197,
0.43724492, -0.17666136, -0.06591748, -0.18937394, -0.07400128,
-0.06881691, -0.5201112, -0.61088628, 0.4225319, -0.18969463,
-0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858,
-0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348,
-0.1128775, -0.0078648, -0.02323332, 0.04292452, 0.39291084,
-0.94897962, -0.63863206, -0.16546988, 0.23698957, -0.30633628};
hipStream_t stream;
hipStreamCreate(&stream);
allocator.reset(new defaultDeviceAllocator);
float* d_X = (float*)allocator->allocate(X.size() * sizeof(float), stream);
float* d_X_embedded =
(float*)allocator->allocate(X_embedded.size() * sizeof(float), stream);
updateDevice(d_X, X.data(), X.size(), stream);
updateDevice(d_X_embedded, X_embedded.data(), X_embedded.size(), stream);
// euclidean test
score =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
d_X, d_X_embedded, 50, 30, 8, 5, allocator, stream);
allocator->deallocate(d_X, X.size() * sizeof(float), stream);
allocator->deallocate(d_X_embedded, X_embedded.size() * sizeof(float),
stream);
hipStreamDestroy(stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
double score;
std::shared_ptr<deviceAllocator> allocator;
};
typedef TrustworthinessScoreTest TrustworthinessScoreTestF;
TEST_F(TrustworthinessScoreTestF, Result) {
ASSERT_TRUE(0.9374 < score && score < 0.9376);
}
}; // namespace Score
}; // namespace MLCommon
|
b23eb84a7c55d799756b4a7b2dae8754005f2226.cu
|
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <distance/distance.cuh>
#include <iostream>
#include <score/scores.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Score {
class TrustworthinessScoreTest : public ::testing::Test {
protected:
void basicTest() {
std::vector<float> X = {
5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037,
-0.6330313, 4.6920023, -0.79210913, 0.6106314, 2.1210914,
5.919943, -8.43784, -6.4819884, 0.41001374, -6.1052523,
-4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555,
-0.4719201, -7.53353, 7.6789393, -1.4959852, -5.5977287,
-9.564147, 1.2902534, 3.559834, -6.7659483, 8.265964,
4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452,
4.4150834, -3.094395, -4.426273, 9.584571, -5.64133,
6.6209483, 7.4044604, 3.9620576, 5.639907, 10.33007,
-0.8792053, 5.143776, -7.464049, 1.2448754, -5.6300974,
5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336,
1.9681473, -6.9083176, 6.404673, 0.07186685, 9.0994835,
8.51037, -8.986389, 0.40534487, 2.115397, 4.086756,
1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078,
7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153,
7.0408177, -3.6069896, -9.971769, 4.4075623, 7.9063697,
2.559074, 4.323717, 1.6867131, -1.1576937, -9.893141,
-3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257,
3.4460473, 9.759119, -5.4680476, -4.722435, -8.032619,
-1.4598992, 4.227361, 3.135568, 1.1950601, 1.1982028,
6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965,
-5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574,
1.4582269, -5.5587835, 8.031556, -0.26328218, -5.2591386,
-9.262641, 2.8691363, 5.299787, -9.209455, 8.523085,
5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462,
4.067106, -1.5912259, -3.2345476, 8.042973, -3.6364832,
4.1242137, 9.886953, 5.4743724, 6.3058076, 9.369645,
-0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944,
3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722,
2.802145, -7.748032, 5.8461113, -0.34215945, 11.298865,
1.4107164, -9.949621, -1.6257563, -10.655836, 2.4528909,
1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459,
2.631155, 3.964414, 2.8769252, 0.04198391, -0.16993195,
3.6747139, -2.8377378, 6.1782537, 10.759618, -4.5642614,
-8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334,
-7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536,
0.72271067, -10.511495, -0.78634536, -10.661714, 2.9376361,
1.9148129, 6.22859, 0.26264945, 8.028384, 6.8743043,
0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785,
5.2778087, -1.7140163, 9.217541, 8.602799, -2.6537218,
-7.8377395, 1.1244944, 5.4540544, -0.38506773, 3.9885726,
-10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547,
8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257,
1.0516582, -2.0707326, -0.7615968, -8.561018, -3.7831352,
10.300297, 5.332594, -6.5880876, -4.2508664, 1.7985519,
5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974,
1.7206005, 4.890457, 3.7264557, 0.4428284, -9.922455,
-4.250455, -6.4410596, -2.107994, -1.4109765, -6.1325397,
0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383,
-10.792166, 8.378851, 10.802716, 9.848448, -9.188757,
1.3151443, 1.9971865, -2.521849, 4.3268294, -7.775683,
-2.2902298, 3.0824065, -7.17559, 9.6100855, 7.3965735,
-10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839,
-7.4045196, -1.7992063, -4.0394845, 5.2471714, -2.250571,
2.528036, -8.343515, -2.2374575, -10.019771, 0.73371273,
3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571,
0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701,
3.7870514, -4.1340904, 7.202998, 9.736904, -3.005512,
-8.920467, 1.1228397, 6.2598724, 1.2812365, 4.5442104,
-8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397,
1.2252625, -10.1986475, -1.4409319, -10.013967, 3.9071581,
1.683064, 4.877419, 1.6570637, 9.559105, 7.3546534,
0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978,
3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583,
-8.039611, 2.3060374, 5.134666, 1.0458113, 6.0190983,
-9.143728, 0.99048865, 9.210842, 6.670241, -5.9614363,
0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542,
-9.28306, 9.1838665, 9.318644, 7.189082, -11.092555,
1.0320464, 3.882163, 0.10953151, 7.9029684, -6.9068265,
-1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663,
-11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728,
-10.357126, -0.9291848, -3.4193344, 3.1289792, -2.5030103,
6.772719, 11.457757, -4.2125936, -6.684548, -4.7611327,
3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314,
5.66614, 7.501461, 5.072407, 6.636537, 8.990381,
-0.2559256, 4.737867, -6.2149944, 2.535682, -5.5484023,
5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467,
1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009,
0.05066403, -9.645226, -2.2658763, -9.518178, 4.493372,
2.3232365, 2.1659086, 0.42507997, 8.360246, 8.23535,
2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741,
4.359464, -4.6073823, 7.83441, 8.958755, -3.4690795,
-9.182282, 1.2478025, 5.6311107, -1.2408862, 3.6316886,
-8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032,
0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995,
-9.607528, 7.2899456, 8.83158, 8.908199, -10.300263,
1.1451302, 3.7871468, -0.97040755, 5.7664757, -8.9688,
-2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903,
-12.066902, 6.301596, -5.0419583, -8.228695, 2.4879954,
-8.918582, -3.7434099, -4.1593685, 3.7431836, -1.1704745,
0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777,
-9.554555, 9.091726, 11.477966, 7.630937, -10.450911,
1.9205878, 5.358983, -0.44546837, 6.7611346, -9.74753,
-0.5939732, 3.8892255, -6.437991, 10.294727, 5.6723895,
-10.7883, 6.192348, -5.293862, -10.811491, 1.0194173,
-7.074576, -3.192368, -2.5231771, 4.2791643, -0.53309685,
0.501366, 9.636625, 7.710316, -6.4219728, 1.0975566,
-8.218886, 6.9011984, 9.873679, 8.903804, -9.316832,
1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224,
-3.2196746, 3.1303136, -7.285681, 9.041425, 5.6417427,
-9.93667, 5.7548947, -5.113397, -8.544622, 4.182665,
-7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856,
6.709082, -8.461194, 1.2666026, 4.8770437, 2.6955879,
3.0340345, -1.1614609, -3.536341, -7.090382, -5.36146,
9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037,
8.782348, -3.316743, -8.65248, 1.6802986, 8.186188,
2.1783829, 4.931278, 4.158475, 1.4033595, -11.320101,
-3.7084908, -6.740436, -2.5555193, -1.0451177, -6.5569925,
0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463,
-8.056692, 8.493993, 7.6401625, 8.812983, -9.377281,
2.4369764, 3.1766508, 0.6300803, 5.6666765, -7.913654,
-0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256,
-9.626183, 8.347351, -3.605006, -7.923387, 1.1024277,
-8.705793, -2.5151258, -2.5066147, 4.0515003, -2.060757,
6.2635093, 8.286584, -6.0509276, -6.76452, -3.1158175,
1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877,
6.093071, 7.4911637, 4.51018, 4.8425875, 9.211085,
-2.4386222, 4.5830803, -5.6079445, 2.3713675, -4.0707507,
3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796,
5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532,
1.9340848, 8.18833, 8.16617, -9.42086, -0.3388326,
-9.659727, 8.243045, 8.099073, 8.439428, -7.038694,
2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196,
-1.553731, 4.08758, -6.6922374, 9.50525, 4.026735,
-9.243538, 7.2740564, -3.9319072, -6.3228955, 1.6693478,
-7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407,
3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089,
-2.519147, 4.225298, 2.4105988, -0.4344186, 0.53441775,
5.2836394, -8.2816105, -4.996147, -1.6870759, -7.8543897,
-3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813,
1.0709786, -8.866012, 8.427324, -1.1755563, -5.789216,
-8.197835, 5.3342214, 6.0646234, -6.8975716, 7.717031,
3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359,
-1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088,
7.652741, -9.9595585, -5.989757, 0.10119354, -7.935407,
-5.792786, -5.22783, -4.318978, 5.414037, -6.4621663,
1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314,
-8.326418, 4.9049683, 5.4442496, -6.403739, 7.5822453,
7.0972915, -9.072851, -0.23897195, 1.7662339, 5.3096304,
1.983179, -2.222645, -0.34700772, -9.094717, -6.107907,
9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662,
8.528821, -3.7299833, -9.341266, 2.608542, 9.108706,
0.7978509, 4.2488184, 2.454484, 0.9446999, -10.106636,
-3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582,
9.324853, -7.9020953, 2.0910501, 2.2896829, 1.6790711,
1.3159255, -3.5258796, 1.8898442, -8.105812, -4.924962,
8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088,
7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636,
1.8511922, 3.019749, 3.1227696, 0.4822102, -10.021213,
-3.530504, -6.225959, -3.0029628, -1.7881511, -7.3879776,
1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836,
-1.5284524, 4.0535855, 3.112011, 0.10340207, -0.5429599,
6.67026, -9.155924, -4.924038, 0.64248866, -10.0103655,
-3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605,
4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948,
-7.822478, 2.1421318, 4.66659, -9.701272, 9.549149,
0.8998125, -8.651497, -0.56899565, -8.639817, 2.3088377,
2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639,
2.8440373, 6.2043204, 4.433932, 0.6320018, -1.8179281,
5.09452, -1.5741565, 8.153934, 8.744339, -3.6945698,
-8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066,
-7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542,
9.477355, -8.622195, -0.23191702, 2.0485356, 3.9379985,
1.5916302, -1.4516805, -0.0843819, -7.8554378, -5.88308,
7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592,
6.160884, -3.631315, -8.333449, 2.770595, 7.8495173,
3.3331623, 4.940415, 3.6207345, -0.037517, -11.034698,
-3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867,
7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264,
2.6052725, -4.1224284, -0.89305353, -6.8267674, -4.9715133,
8.880253, 5.6994023, -5.9695024, -4.9181266, 1.3017995,
7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529,
0.93840516, 4.2070026, 6.159839, 0.91979957, -8.706724,
-4.317946, -6.6823545, -3.0388, -2.464262, -7.3716645,
1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049,
-2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997,
7.229751, -9.742043, -8.08724, 1.2426697, -7.9230795,
-3.3162494, -7.129571, -3.5488048, 7.4701195, -5.2357526,
0.5917681, -6.272206, 6.342328, -2.909731, -4.991607,
-8.845513, 3.3228495, 7.033246, -7.8180246, 8.214469,
6.3910093, 9.185153, -6.20472, -7.713809, -3.8481297,
3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121,
3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594,
-3.014036, 5.182494, -5.8806014, 2.5312455, -5.9926524,
4.474469, 6.3717875, 6.993105, 6.493093, -8.935534,
3.004074, -8.055647, 8.315765, -1.3026813, 8.250377,
0.02606229, 6.8508425, 9.655665, -7.0116496, -0.41060972,
-10.049198, 7.897801, 6.7791023, 8.3362, -9.821014,
2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123,
-3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307,
-9.704417, 4.3817043, -3.9841619, -8.111647, 1.1883026,
-8.115312, -2.9240117, -5.8879666, 4.20928, -0.3587938,
6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343,
3.082544, -3.477687, -1.3768549, -7.4922366, -3.756631,
10.039836, 3.6670392, -5.9761434, -4.4728765, 3.244255,
7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773,
0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124,
-5.246805, -6.5283823, -4.579547, -2.42714, -6.709197,
2.7782338, 7.33353, -6.454507, -2.9929368, -7.8362985,
-2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365,
6.9631333, -9.291356, -8.23837, -0.34263706, -8.275113,
-2.8454232, -5.0864096, -2.681942, 7.5450225, -6.2517986,
0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943,
-7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079,
1.4897474, 6.7217417, -4.2459426, -4.114431, -8.375707,
-2.143264, 5.6972933, 1.5574739, 0.39375135, 1.7930849,
5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536,
-5.2620406, -5.4643164, -3.185536, 6.620315, -7.065227,
1.0524757, -6.125088, 5.7126627, -1.6161644, -3.852159,
-9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405,
1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685,
-2.1121511, 5.1147075, 1.6128504, 2.0064135, 1.0544407,
6.0038295, -7.8282537, -4.801278, 0.32349443, -8.0649805,
-4.372714, -5.61336, -5.21394, 8.176595, -5.4753284,
1.7800134, -8.267283, 7.2133374, -0.16594432, -6.317046,
-9.490406, 4.1261597, 5.473317, -7.7551675, 7.007468,
7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803,
1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394,
9.632604, 5.7492557, -5.9014316, -3.1632116, 2.340859,
8.708098, -3.1255999, -8.848661, 4.5612836, 8.455157,
0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823,
-3.0331545, -8.269506, -2.82415, -0.9411246, -5.993506,
2.1618164, -8.716055, -0.7432543, -10.255819, 3.095418,
2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814,
0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952,
3.9244103, -2.117118, 7.6977615, 8.759009, -4.2157164,
-9.136053, 3.247858, 4.668686, 0.76162136, 5.3833632,
-9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507,
2.173508, -9.038242, -1.3666698, -9.819077, 0.37825826,
2.3898845, 4.2440815, 1.9161536, 7.24787, 6.9124637,
1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454,
5.638998, -2.497932, 8.342559, 8.586319, -2.9069402,
-7.6387944, 3.5975037, 4.4115705, 0.41506064, 4.9078383,
-9.68327, 1.8159529, 9.744613, 8.40622, -4.495336,
9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846,
2.652022, -2.7495477, 0.2528986, -8.268324, -6.004913,
10.428784, 6.6580734, -5.537176, -1.7177434, 2.7504628,
6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654,
2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745,
-4.1879907, -7.172832, -4.1843605, -0.03646783, -5.419406,
6.958486, 11.011111, -7.1821184, -7.956423, -3.408451,
4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208,
5.967827, 8.433518, 4.660108, 5.5657144, 9.964243,
-1.3515275, 6.404833, -6.4805903, 2.4379845, -6.0816774,
1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596,
3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529,
4.362273, 9.231461, -6.3382263, -7.659, -3.461823,
4.71463, 0.17817476, -3.685746, 7.2962036, -4.6489477,
5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103,
-3.900459, 6.844054, -7.0886965, -0.05019227, -8.233724,
5.5808983, 6.374517, 8.321048, 7.969449, -7.3478637,
1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739,
2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157,
1.814725, 5.311151, 1.4831505, 7.8483663, 7.257948,
1.395786, 6.417756, 5.376912, 0.59505713, 0.00062552,
3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605,
-8.466229, 1.904205, 5.6338267, -0.52567476, 5.59736,
-8.361799, 0.5009981, 8.460681, 7.3891273, -3.5272243,
5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836,
3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563,
5.677401, 7.497946, 5.662994, 5.414262, 8.566503,
-2.5530663, 7.1032815, -6.0612082, 1.3419591, -4.9595256,
4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043,
3.3240774, -9.379446, 6.838661, -0.81241214, 8.694813,
0.79141915, 7.632467, 8.575382, -8.533798, 0.28954387,
-7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289,
0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131,
-2.1792977, 2.4729247, -6.1917787, 10.373678, 7.6549597,
-8.809486, 5.5657206, -3.3169382, -8.042887, 2.0874746,
-7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814,
1.1726758, 7.4618697, 6.9483604, -8.469206, 0.7401797,
-10.318176, 8.384557, 10.5476265, 9.146971, -9.250223,
0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598,
-1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966,
-11.249811, 8.359293, -1.9445792, -7.7393436, -0.3931331,
-8.824441, -2.5995944, -2.5714035, 4.140213, -3.6863053,
5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704,
2.5179656, -1.4543481, -2.2703636, 7.010597, -3.6436229,
6.753862, 7.4129915, 7.1406755, 5.653706, 9.5445175,
0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123,
4.171763, 5.3747005, 6.341021, 7.456738, -8.231657,
2.763487, -9.208167, 6.676799, -1.1957736, 10.062605,
4.0975976, 7.312957, -2.4981596, -2.9658387, -8.150425,
-2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015,
5.8556347, -7.8481026, -5.9913163, -0.02840613, -9.937289,
-1.0486673, -5.2340155, -3.83912, 7.7165728, -8.409944,
0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131,
-8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484,
5.262654, 8.774514, -4.7603083, -7.2096143, -4.437014,
3.6080024, -1.624254, -4.2787876, 8.880863, -4.8984556,
5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042,
-1.2006199, 5.393288, -5.6455317, 0.7829499, -4.0338907,
2.479272, 6.5080743, 8.582535, 7.0097537, -6.9823785,
3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443,
-0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029,
1.6800792, 5.778404, 1.7269998, 7.1975236, 7.7258267,
2.7632928, 5.3399253, 3.4650044, 0.01971426, -1.6468811,
4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048,
-7.0344677, 1.2911975, 5.950357, 0.19028673, 4.657226,
-8.199647, 2.246055, 8.989509, 5.3101015, -4.2400866};
std::vector<float> X_embedded = {
-0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503,
-0.29751351, -0.01072748, -0.21353109, -0.54769957, -0.55086273,
0.37093949, -0.12714292, -0.06639574, -0.36098689, -0.13060696,
-0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146,
-0.19595343, 0.08900568, -0.04813879, -0.06563424, -0.42655188,
-0.69014251, 0.51459783, -0.1942696, -0.07767916, -0.6119386,
0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622,
-0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941,
-0.01961523, -0.14147359, 0.41414487, -0.8512944, -0.61199242,
-0.18586016, 0.14024924, -0.41635606, -0.02890144, 0.1065347,
0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046,
-0.53189689, -0.98987544, -0.67918193, 0.41787854, -0.20878236,
-0.06612862, 0.03502904, -0.03765266, -0.0980606, -0.00971657,
0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805,
0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055,
-0.88302094, -0.80197543, 0.03548668, 0.02775662, -0.54374295,
0.03379983, 0.00923623, 0.29320273, -1.05263519, -0.93360096,
0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651,
0.36450726, -1.22978747, -0.83822101, -0.18796451, 0.34888434,
-0.3801491, -0.45327303, -0.59747899, 0.39697698, -0.15616602,
-0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619,
-0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121,
-0.0141237, -0.63747931, 0.04805023, 0.34190539, 0.50725192,
-1.17861414, -0.74641538, -0.09333111, 0.27992678, -0.56214809,
0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908,
0.10957897, 0.27983218, -0.62088525, 0.0410459, 0.23973398,
0.40960434, -1.14183664, -0.83321381, 0.02149482, 0.21720445,
-0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214,
-0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234,
0.36154974, 0.1822421, -1.15837133, -0.92209691, -0.0501582,
0.08535917, -0.54003763, -1.08675635, -1.04009593, 0.09408128,
0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001,
0.04034991, 0.1814747, 0.36906669, -1.13532007, -0.8852452,
0.0782818, 0.16825101, -0.50301319, -0.29128098, -0.65341312,
0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344,
-0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813,
-0.1118066, -0.47453368, 0.06347904, -0.23796193, -1.02682328,
-0.47594091, 0.39515916, -0.2782529, -0.16566519, 0.08063579,
0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546,
-0.11806234, 0.00356161, 0.11513405, -0.14213292, 0.04102662,
-0.36622161, -0.73686272, 0.48323864, -0.27338892, -0.14203401,
-0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361,
0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686,
-0.74530888, -0.9354887, -0.5027945, 0.38170099, -0.15547098,
0.00677824, -0.04677663, -0.13541745, 0.07253501, -0.97933143,
-0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441,
-0.08101331, 0.00630076, -0.16881248, -0.67989182, 0.46083611,
-0.43910736, -0.29321918, -0.38735861, 0.07669903, -0.29749861,
-0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747,
-0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585,
0.38848072, -1.06653547, -0.64079332, -0.47378591, 0.43195483,
-0.04856951, -0.9840439, -0.70610428, 0.34028092, -0.2089237,
-0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428,
-1.24533033, 0.48944879, 0.0578458, 0.26708388, -0.90321028,
0.35377088, -0.36791429, -0.35382384, -0.52748734, 0.42854419,
-0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228,
-0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905,
-0.46334973, -0.03123853, -0.11303604, -0.87073672, -0.45441297,
0.41825858, -0.25303507, -0.21845073, 0.10248682, -0.11045569,
-0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019,
-0.83789682, 0.02995787, 0.16843079, -0.53874511, 0.03056994,
0.17877036, 0.49632853, -1.03276777, -0.74778616, -0.03971953,
0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449,
-0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126,
-0.4749442, -0.56964815, 0.39389887, -0.19248079, -0.04161081,
-0.38728487, -0.20341556, -0.12656988, -0.35949609, -0.46137866,
0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227,
-0.10242701, -0.06792886, 0.09689897, 0.33259571, -0.98854214,
-0.84444433, 0.00673901, 0.13457057, -0.43145794, -0.51500046,
-0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595,
-0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913,
-1.05619383, -0.71218503, -0.02197581, 0.26422262, -0.34765676,
0.06601537, 0.21712676, 0.34723559, -1.20982027, -0.95646334,
0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197,
0.43724492, -0.17666136, -0.06591748, -0.18937394, -0.07400128,
-0.06881691, -0.5201112, -0.61088628, 0.4225319, -0.18969463,
-0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858,
-0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348,
-0.1128775, -0.0078648, -0.02323332, 0.04292452, 0.39291084,
-0.94897962, -0.63863206, -0.16546988, 0.23698957, -0.30633628};
cudaStream_t stream;
cudaStreamCreate(&stream);
allocator.reset(new defaultDeviceAllocator);
float* d_X = (float*)allocator->allocate(X.size() * sizeof(float), stream);
float* d_X_embedded =
(float*)allocator->allocate(X_embedded.size() * sizeof(float), stream);
updateDevice(d_X, X.data(), X.size(), stream);
updateDevice(d_X_embedded, X_embedded.data(), X_embedded.size(), stream);
// euclidean test
score =
trustworthiness_score<float,
ML::Distance::DistanceType::EucUnexpandedL2Sqrt>(
d_X, d_X_embedded, 50, 30, 8, 5, allocator, stream);
allocator->deallocate(d_X, X.size() * sizeof(float), stream);
allocator->deallocate(d_X_embedded, X_embedded.size() * sizeof(float),
stream);
cudaStreamDestroy(stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
double score;
std::shared_ptr<deviceAllocator> allocator;
};
typedef TrustworthinessScoreTest TrustworthinessScoreTestF;
TEST_F(TrustworthinessScoreTestF, Result) {
ASSERT_TRUE(0.9374 < score && score < 0.9376);
}
}; // namespace Score
}; // namespace MLCommon
|
e80a4b97d8ce90852857a9e4507601eeb58e5e5f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/conv_bias/chanwise/fwd_8x8x32.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include <cassert>
#include <cstdio>
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
__host__ __device__ void get_receptive_field_size(uint32_t OH, uint32_t OW,
uint32_t FH, uint32_t FW,
uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW,
uint32_t* RH, uint32_t* RW) {
// DFH = dilationd FH, DFW = dilationd FW
// RH = receptive field height, RW = receptive field width
uint32_t DFH = (FH - 1) * DH + 1, DFW = (FW - 1) * DW + 1;
*RH = ((OH - 1) * SH + 1) + DFH - 1;
*RW = ((OW - 1) * SW + 1) + DFW - 1;
}
// 32x4x4 threads
// assume that C must be multiples of 4
// F == 0: FH/FW should be retrieved from param
// F != 0: FH/FW should use F
template <uint32_t F>
__global__ void kern(int32_t* dst, const int8_t* src, const int8_t* flt,
Param param) {
// each block would process 128 channels at every 4x4 spatial area.
uint32_t C = param.src_chl, IH = param.src_h, IW = param.src_w,
OH = param.out_h, OW = param.out_w, FH = F == 0 ? param.flt_h : F,
FW = F == 0 ? param.flt_w : F, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, DH = param.dilation_h,
DW = param.dilation_w;
const uint32_t* src_32 = reinterpret_cast<const uint32_t*>(src);
const uint32_t* flt_32 = reinterpret_cast<const uint32_t*>(flt);
uint32_t bidx = blockIdx.x, bidy = blockIdx.y, bidz = blockIdx.z;
uint32_t c_beg = blockIdx.x * 128, c_end = min((blockIdx.x + 1) * 128, C),
c_cur = c_beg + threadIdx.x * 4;
uint32_t tidx = threadIdx.x, tidy = threadIdx.y, tidz = threadIdx.z,
tid = (tidx << 0) | (tidy << 5) | (tidz << 7),
tid_stride = 32 * 4 * 4, tidyz = (tidy << 0) | (tidz << 2),
tidyz_stride = 4 * 4;
uint32_t oh = bidz * 4 + tidz, ow = bidy * 4 + tidy;
uint32_t C_32 = C >> 2;
// calculate receptive field of 4x4 output pixels
uint32_t RH, RW;
get_receptive_field_size(4, 4, FH, FW, SH, SW, DH, DW, &RH, &RW);
extern __shared__ int8_t shared[];
int8_t* flt_shared_tmp = static_cast<int8_t*>(static_cast<void*>(shared));
uint32_t* flt_shared_tmp_32 = reinterpret_cast<uint32_t*>(flt_shared_tmp);
int8_t* flt_shared = static_cast<int8_t*>(
static_cast<void*>(shared + 128 * FH * FW * sizeof(int8_t)));
uint32_t* flt_shared_32 = reinterpret_cast<uint32_t*>(flt_shared);
int8_t* src_shared = static_cast<int8_t*>(
static_cast<void*>(shared + 128 * FH * FW * sizeof(int8_t) +
128 * FH * FW * sizeof(int8_t)));
uint32_t* src_shared_32 = reinterpret_cast<uint32_t*>(src_shared);
int32_t* dst_shared = static_cast<int32_t*>(static_cast<void*>(
shared + 128 * FH * FW * sizeof(int8_t) +
128 * FH * FW * sizeof(int8_t) + 128 * RH * RW * sizeof(int8_t)));
// read original filter to shared memory
// *_int8 vars must be multiples of 4 here.
uint32_t flt_offset = c_beg * FH * FW;
uint32_t flt_offset_32 = flt_offset >> 2;
uint32_t flt_amount = (c_end - c_beg) * FH * FW;
uint32_t flt_amount_32 = flt_amount >> 2;
for (uint32_t id = tid; id < flt_amount_32; id += tid_stride) {
flt_shared_tmp_32[id] = flt_32[flt_offset_32 + id];
}
__syncthreads();
// transpose filter: (flt_amount, FH*FW) -> (FH*FW, 128)
// typical example: (128, 9) -> (9, 128)
for (uint32_t idyz = tidyz; idyz < FH * FW; idyz += tidyz_stride)
for (uint32_t idx = tidx; idx < 128; idx += 32) {
uint32_t from_idx = idx * FH * FW + idyz;
uint32_t to_idx = idx + idyz * 128;
if (from_idx < flt_amount) {
flt_shared[to_idx] = flt_shared_tmp[from_idx];
} else {
flt_shared[to_idx] = 0;
}
}
// no need to sync here
// __syncthreads();
// read (RH, RW, 128) src from global to shared
for (uint32_t rh = tidz; rh < RH; rh += 4)
for (uint32_t rw = tidy; rw < RW; rw += 4) {
uint32_t ih = bidz * 4 * SH + rh - PH;
uint32_t iw = bidy * 4 * SW + rw - PW;
uint32_t to_idx = (rh * RW + rw) * 32 + tidx;
uint32_t c_32 = bidx * 32 + tidx;
uint32_t from_idx = (ih * IW + iw) * C_32 + c_32;
if (ih < IH && iw < IW && c_32 < C_32) {
src_shared_32[to_idx] = src_32[from_idx];
} else {
src_shared_32[to_idx] = 0;
}
}
__syncthreads();
// do convolution
if (c_cur < c_end && oh < OH && ow < OW) {
int32_t dst0 = 0, dst1 = 0, dst2 = 0, dst3 = 0;
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh)
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t rh = tidz * SH + fh * DH, rw = tidy * SW + fw * DW;
uint32_t sval_32 = src_shared_32[(rh * RW + rw) * 32 + tidx];
int32_t sval0 = int8_t((sval_32 >> 0) & 255),
sval1 = int8_t((sval_32 >> 8) & 255),
sval2 = int8_t((sval_32 >> 16) & 255),
sval3 = int8_t((sval_32 >> 24) & 255);
uint32_t fval_32 = flt_shared_32[(fh * FW + fw) * 32 + tidx];
int32_t fval0 = int8_t((fval_32 >> 0) & 255),
fval1 = int8_t((fval_32 >> 8) & 255),
fval2 = int8_t((fval_32 >> 16) & 255),
fval3 = int8_t((fval_32 >> 24) & 255);
dst0 += sval0 * fval0;
dst1 += sval1 * fval1;
dst2 += sval2 * fval2;
dst3 += sval3 * fval3;
}
dst_shared[tidyz * 129 + tidx * 4 + 0] = dst0;
dst_shared[tidyz * 129 + tidx * 4 + 1] = dst1;
dst_shared[tidyz * 129 + tidx * 4 + 2] = dst2;
dst_shared[tidyz * 129 + tidx * 4 + 3] = dst3;
}
__syncthreads();
if (oh < OH && ow < OW) {
#pragma unroll
for (uint32_t k = 0; k < 4; ++k) {
uint32_t c = c_beg + tidx + k * 32;
if (c < c_end) {
dst[(oh * OW + ow) * C + c] =
dst_shared[tidyz * 129 + tidx + k * 32];
}
}
}
}
} // anonymous namespace
void megdnn::cuda::conv_bias::chanwise::run_fwd_8x8x32(int32_t* dst,
const int8_t* src,
const int8_t* flt,
const Param& param,
hipStream_t stream) {
uint32_t N = param.batch, C = param.src_chl, IH = param.src_h,
IW = param.src_w, OH = param.out_h, OW = param.out_w,
FH = param.flt_h, FW = param.flt_w, SH = param.stride_h,
SW = param.stride_w, DH = param.dilation_h, DW = param.dilation_w;
dim3 threads(32, 4, 4);
dim3 blocks(DIVUP(C, 128), DIVUP(OW, 4), DIVUP(OH, 4));
// shared mem size: filter*2 + src + dst
// filter
uint32_t filter_shared_mem_size = 128 * FH * FW * sizeof(int8_t);
// src
uint32_t RH, RW;
get_receptive_field_size(4, 4, FH, FW, SH, SW, DH, DW, &RH, &RW);
uint32_t src_shared_mem_size = 128 * RH * RW * sizeof(int8_t);
// dst
// use 129 instead of 128 to avoid shared memory bank conflict
uint32_t dst_shared_mem_size = 129 * 4 * 4 * sizeof(int32_t);
uint32_t shared_mem_size = 2 * filter_shared_mem_size +
src_shared_mem_size + dst_shared_mem_size;
void (*kptr)(int32_t*, const int8_t*, const int8_t*, Param) = kern<0>;
if (FH == 1 && FW == 1)
kptr = kern<1>;
if (FH == 3 && FW == 3)
kptr = kern<3>;
if (FH == 5 && FW == 5)
kptr = kern<5>;
for (uint32_t n = 0; n < N; ++n) {
int32_t* dptr = dst + n * C * OH * OW;
const int8_t* sptr = src + n * C * IH * IW;
const int8_t* fptr = flt;
hipLaunchKernelGGL(( kptr), dim3(blocks), dim3(threads), shared_mem_size, stream, dptr, sptr, fptr,
param);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
|
e80a4b97d8ce90852857a9e4507601eeb58e5e5f.cu
|
/**
* \file dnn/src/cuda/conv_bias/chanwise/fwd_8x8x32.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include <cassert>
#include <cstdio>
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
__host__ __device__ void get_receptive_field_size(uint32_t OH, uint32_t OW,
uint32_t FH, uint32_t FW,
uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW,
uint32_t* RH, uint32_t* RW) {
// DFH = dilationd FH, DFW = dilationd FW
// RH = receptive field height, RW = receptive field width
uint32_t DFH = (FH - 1) * DH + 1, DFW = (FW - 1) * DW + 1;
*RH = ((OH - 1) * SH + 1) + DFH - 1;
*RW = ((OW - 1) * SW + 1) + DFW - 1;
}
// 32x4x4 threads
// assume that C must be multiples of 4
// F == 0: FH/FW should be retrieved from param
// F != 0: FH/FW should use F
template <uint32_t F>
__global__ void kern(int32_t* dst, const int8_t* src, const int8_t* flt,
Param param) {
// each block would process 128 channels at every 4x4 spatial area.
uint32_t C = param.src_chl, IH = param.src_h, IW = param.src_w,
OH = param.out_h, OW = param.out_w, FH = F == 0 ? param.flt_h : F,
FW = F == 0 ? param.flt_w : F, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, DH = param.dilation_h,
DW = param.dilation_w;
const uint32_t* src_32 = reinterpret_cast<const uint32_t*>(src);
const uint32_t* flt_32 = reinterpret_cast<const uint32_t*>(flt);
uint32_t bidx = blockIdx.x, bidy = blockIdx.y, bidz = blockIdx.z;
uint32_t c_beg = blockIdx.x * 128, c_end = min((blockIdx.x + 1) * 128, C),
c_cur = c_beg + threadIdx.x * 4;
uint32_t tidx = threadIdx.x, tidy = threadIdx.y, tidz = threadIdx.z,
tid = (tidx << 0) | (tidy << 5) | (tidz << 7),
tid_stride = 32 * 4 * 4, tidyz = (tidy << 0) | (tidz << 2),
tidyz_stride = 4 * 4;
uint32_t oh = bidz * 4 + tidz, ow = bidy * 4 + tidy;
uint32_t C_32 = C >> 2;
// calculate receptive field of 4x4 output pixels
uint32_t RH, RW;
get_receptive_field_size(4, 4, FH, FW, SH, SW, DH, DW, &RH, &RW);
extern __shared__ int8_t shared[];
int8_t* flt_shared_tmp = static_cast<int8_t*>(static_cast<void*>(shared));
uint32_t* flt_shared_tmp_32 = reinterpret_cast<uint32_t*>(flt_shared_tmp);
int8_t* flt_shared = static_cast<int8_t*>(
static_cast<void*>(shared + 128 * FH * FW * sizeof(int8_t)));
uint32_t* flt_shared_32 = reinterpret_cast<uint32_t*>(flt_shared);
int8_t* src_shared = static_cast<int8_t*>(
static_cast<void*>(shared + 128 * FH * FW * sizeof(int8_t) +
128 * FH * FW * sizeof(int8_t)));
uint32_t* src_shared_32 = reinterpret_cast<uint32_t*>(src_shared);
int32_t* dst_shared = static_cast<int32_t*>(static_cast<void*>(
shared + 128 * FH * FW * sizeof(int8_t) +
128 * FH * FW * sizeof(int8_t) + 128 * RH * RW * sizeof(int8_t)));
// read original filter to shared memory
// *_int8 vars must be multiples of 4 here.
uint32_t flt_offset = c_beg * FH * FW;
uint32_t flt_offset_32 = flt_offset >> 2;
uint32_t flt_amount = (c_end - c_beg) * FH * FW;
uint32_t flt_amount_32 = flt_amount >> 2;
for (uint32_t id = tid; id < flt_amount_32; id += tid_stride) {
flt_shared_tmp_32[id] = flt_32[flt_offset_32 + id];
}
__syncthreads();
// transpose filter: (flt_amount, FH*FW) -> (FH*FW, 128)
// typical example: (128, 9) -> (9, 128)
for (uint32_t idyz = tidyz; idyz < FH * FW; idyz += tidyz_stride)
for (uint32_t idx = tidx; idx < 128; idx += 32) {
uint32_t from_idx = idx * FH * FW + idyz;
uint32_t to_idx = idx + idyz * 128;
if (from_idx < flt_amount) {
flt_shared[to_idx] = flt_shared_tmp[from_idx];
} else {
flt_shared[to_idx] = 0;
}
}
// no need to sync here
// __syncthreads();
// read (RH, RW, 128) src from global to shared
for (uint32_t rh = tidz; rh < RH; rh += 4)
for (uint32_t rw = tidy; rw < RW; rw += 4) {
uint32_t ih = bidz * 4 * SH + rh - PH;
uint32_t iw = bidy * 4 * SW + rw - PW;
uint32_t to_idx = (rh * RW + rw) * 32 + tidx;
uint32_t c_32 = bidx * 32 + tidx;
uint32_t from_idx = (ih * IW + iw) * C_32 + c_32;
if (ih < IH && iw < IW && c_32 < C_32) {
src_shared_32[to_idx] = src_32[from_idx];
} else {
src_shared_32[to_idx] = 0;
}
}
__syncthreads();
// do convolution
if (c_cur < c_end && oh < OH && ow < OW) {
int32_t dst0 = 0, dst1 = 0, dst2 = 0, dst3 = 0;
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh)
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t rh = tidz * SH + fh * DH, rw = tidy * SW + fw * DW;
uint32_t sval_32 = src_shared_32[(rh * RW + rw) * 32 + tidx];
int32_t sval0 = int8_t((sval_32 >> 0) & 255),
sval1 = int8_t((sval_32 >> 8) & 255),
sval2 = int8_t((sval_32 >> 16) & 255),
sval3 = int8_t((sval_32 >> 24) & 255);
uint32_t fval_32 = flt_shared_32[(fh * FW + fw) * 32 + tidx];
int32_t fval0 = int8_t((fval_32 >> 0) & 255),
fval1 = int8_t((fval_32 >> 8) & 255),
fval2 = int8_t((fval_32 >> 16) & 255),
fval3 = int8_t((fval_32 >> 24) & 255);
dst0 += sval0 * fval0;
dst1 += sval1 * fval1;
dst2 += sval2 * fval2;
dst3 += sval3 * fval3;
}
dst_shared[tidyz * 129 + tidx * 4 + 0] = dst0;
dst_shared[tidyz * 129 + tidx * 4 + 1] = dst1;
dst_shared[tidyz * 129 + tidx * 4 + 2] = dst2;
dst_shared[tidyz * 129 + tidx * 4 + 3] = dst3;
}
__syncthreads();
if (oh < OH && ow < OW) {
#pragma unroll
for (uint32_t k = 0; k < 4; ++k) {
uint32_t c = c_beg + tidx + k * 32;
if (c < c_end) {
dst[(oh * OW + ow) * C + c] =
dst_shared[tidyz * 129 + tidx + k * 32];
}
}
}
}
} // anonymous namespace
void megdnn::cuda::conv_bias::chanwise::run_fwd_8x8x32(int32_t* dst,
const int8_t* src,
const int8_t* flt,
const Param& param,
cudaStream_t stream) {
uint32_t N = param.batch, C = param.src_chl, IH = param.src_h,
IW = param.src_w, OH = param.out_h, OW = param.out_w,
FH = param.flt_h, FW = param.flt_w, SH = param.stride_h,
SW = param.stride_w, DH = param.dilation_h, DW = param.dilation_w;
dim3 threads(32, 4, 4);
dim3 blocks(DIVUP(C, 128), DIVUP(OW, 4), DIVUP(OH, 4));
// shared mem size: filter*2 + src + dst
// filter
uint32_t filter_shared_mem_size = 128 * FH * FW * sizeof(int8_t);
// src
uint32_t RH, RW;
get_receptive_field_size(4, 4, FH, FW, SH, SW, DH, DW, &RH, &RW);
uint32_t src_shared_mem_size = 128 * RH * RW * sizeof(int8_t);
// dst
// use 129 instead of 128 to avoid shared memory bank conflict
uint32_t dst_shared_mem_size = 129 * 4 * 4 * sizeof(int32_t);
uint32_t shared_mem_size = 2 * filter_shared_mem_size +
src_shared_mem_size + dst_shared_mem_size;
void (*kptr)(int32_t*, const int8_t*, const int8_t*, Param) = kern<0>;
if (FH == 1 && FW == 1)
kptr = kern<1>;
if (FH == 3 && FW == 3)
kptr = kern<3>;
if (FH == 5 && FW == 5)
kptr = kern<5>;
for (uint32_t n = 0; n < N; ++n) {
int32_t* dptr = dst + n * C * OH * OW;
const int8_t* sptr = src + n * C * IH * IW;
const int8_t* fptr = flt;
kptr<<<blocks, threads, shared_mem_size, stream>>>(dptr, sptr, fptr,
param);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
|
2b62e7ac48d83dc5e7bf79774955f1b3d6dd4753.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_23, const double* __restrict__ arrIn1_22, const double* __restrict__ arrIn1_21, const double* __restrict__ arrIn1_20, const double* __restrict__ arrIn1_19, const double* __restrict__ arrIn1_18, const double* __restrict__ arrIn1_17, const double* __restrict__ arrIn1_16, const double* __restrict__ arrIn1_15, const double* __restrict__ arrIn1_14, const double* __restrict__ arrIn1_13, const double* __restrict__ arrIn1_12, const double* __restrict__ arrIn1_11, const double* __restrict__ arrIn1_10, const double* __restrict__ arrIn1_9, const double* __restrict__ arrIn1_8, const double* __restrict__ arrIn1_7, const double* __restrict__ arrIn1_6, const double* __restrict__ arrIn1_5, const double* __restrict__ arrIn1_4, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const double* __restrict__ arrIn3_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0;
const double v1 = arrIn2_2[v0];
const double v2 = arrIn2_1[v0];
const double v3 = arrIn2_0[v0];
const Int64 v4 = (Int64) 0;
const Int64 v5 = (Int64) -1 + sh1;
const Int64 v6 = (Int64) -1 + sh2;
const Word8 v7 = v4 <= v6 && (v6 < shIn1_0 && (v4 <= v5 && (v5 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv90;
double lv91;
double lv92;
if (v7) {
const Int64 v8 = (v6 * shIn1_1 + v5) * shIn1_0 + sh0;
lv92 = arrIn1_2[v8];
lv91 = arrIn1_1[v8];
lv90 = arrIn1_0[v8];
} else {
lv92 = 0.0;
lv91 = 0.0;
lv90 = 0.0;
}
const Int64 v10 = (Int64) -1 + sh0;
const Int64 v11 = (Int64) -1 + sh1;
const Int64 v12 = (Int64) -1 + sh2;
const Word8 v13 = v4 <= v12 && (v12 < shIn1_0 && (v4 <= v11 && (v11 < shIn1_0 && (v4 <= v10 && v10 < shIn1_0))));
double lv150;
double lv151;
double lv152;
if (v13) {
const Int64 v14 = (v12 * shIn1_1 + v11) * shIn1_0 + v10;
lv152 = arrIn1_5[v14];
lv151 = arrIn1_4[v14];
lv150 = arrIn1_3[v14];
} else {
lv152 = 0.0;
lv151 = 0.0;
lv150 = 0.0;
}
const Int64 v16 = (Int64) -1 + sh0;
const Int64 v17 = (Int64) -1 + sh2;
const Word8 v18 = v4 <= v17 && (v17 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= v16 && v16 < shIn1_0))));
double lv200;
double lv201;
double lv202;
if (v18) {
const Int64 v19 = (v17 * shIn1_1 + sh1) * shIn1_0 + v16;
lv202 = arrIn1_8[v19];
lv201 = arrIn1_7[v19];
lv200 = arrIn1_6[v19];
} else {
lv202 = 0.0;
lv201 = 0.0;
lv200 = 0.0;
}
const Int64 v21 = (Int64) -1 + sh2;
const Word8 v22 = v4 <= v21 && (v21 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv240;
double lv241;
double lv242;
if (v22) {
const Int64 v23 = (v21 * shIn1_1 + sh1) * shIn1_0 + sh0;
lv242 = arrIn1_11[v23];
lv241 = arrIn1_10[v23];
lv240 = arrIn1_9[v23];
} else {
lv242 = 0.0;
lv241 = 0.0;
lv240 = 0.0;
}
const Int64 v25 = (Int64) -1 + sh1;
const Word8 v26 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= v25 && (v25 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv280;
double lv281;
double lv282;
if (v26) {
const Int64 v27 = (sh2 * shIn1_1 + v25) * shIn1_0 + sh0;
lv282 = arrIn1_14[v27];
lv281 = arrIn1_13[v27];
lv280 = arrIn1_12[v27];
} else {
lv282 = 0.0;
lv281 = 0.0;
lv280 = 0.0;
}
const Int64 v29 = (Int64) -1 + sh0;
const Int64 v30 = (Int64) -1 + sh1;
const Word8 v31 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= v30 && (v30 < shIn1_0 && (v4 <= v29 && v29 < shIn1_0))));
double lv330;
double lv331;
double lv332;
if (v31) {
const Int64 v32 = (sh2 * shIn1_1 + v30) * shIn1_0 + v29;
lv332 = arrIn1_17[v32];
lv331 = arrIn1_16[v32];
lv330 = arrIn1_15[v32];
} else {
lv332 = 0.0;
lv331 = 0.0;
lv330 = 0.0;
}
const Word8 v34 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv360;
double lv361;
double lv362;
if (v34) {
const Int64 v35 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
lv362 = arrIn1_23[v35];
lv361 = arrIn1_22[v35];
lv360 = arrIn1_21[v35];
} else {
lv362 = 0.0;
lv361 = 0.0;
lv360 = 0.0;
}
const Int64 v37 = (Int64) -1 + sh0;
const Word8 v38 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= v37 && v37 < shIn1_0))));
double lv400;
double lv401;
double lv402;
if (v38) {
const Int64 v39 = (sh2 * shIn1_1 + sh1) * shIn1_0 + v37;
lv402 = arrIn1_20[v39];
lv401 = arrIn1_19[v39];
lv400 = arrIn1_18[v39];
} else {
lv402 = 0.0;
lv401 = 0.0;
lv400 = 0.0;
}
const double v41 = lv362 + lv402;
const double v42 = lv361 + lv401;
const double v43 = lv360 + lv400;
const double v44 = v41 + lv332;
const double v45 = v42 + lv331;
const double v46 = v43 + lv330;
const double v47 = v44 + lv282;
const double v48 = v45 + lv281;
const double v49 = v46 + lv280;
const double v50 = v47 + lv242;
const double v51 = v48 + lv241;
const double v52 = v49 + lv240;
const double v53 = v50 + lv202;
const double v54 = v51 + lv201;
const double v55 = v52 + lv200;
const double v56 = v53 + lv152;
const double v57 = v54 + lv151;
const double v58 = v55 + lv150;
const double v59 = v56 + lv92;
const double v60 = v57 + lv91;
const double v61 = v58 + lv90;
const Int64 v62 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0;
const double v63 = arrIn0_0[v62];
const double v64 = v59 / v63;
const double v65 = v60 / v63;
const double v66 = v61 / v63;
const Word8 v67 = v4 == sh0;
double lv680;
if (v67) {
lv680 = 0.0;
} else {
lv680 = v64;
}
const Word8 v69 = v4 == sh1;
double lv700;
if (v69) {
lv700 = 0.0;
} else {
lv700 = v65;
}
const Word8 v71 = v4 == sh2;
double lv720;
if (v71) {
lv720 = 0.0;
} else {
lv720 = v66;
}
const Int64 v73 = 0;
const double v74 = arrIn3_0[v73];
const double v75 = lv680 * v74;
const double v76 = lv700 * v74;
const double v77 = lv720 * v74;
const double v78 = v1 + v75;
const double v79 = v2 + v76;
const double v80 = v3 + v77;
const Word8 v81 = fabs(v78) < 1.0e-7;
double lv820;
if (v81) {
lv820 = 0.0;
} else {
lv820 = v78;
}
const Word8 v83 = fabs(v79) < 1.0e-7;
double lv840;
if (v83) {
lv840 = 0.0;
} else {
lv840 = v79;
}
const Word8 v85 = fabs(v80) < 1.0e-7;
double lv860;
if (v85) {
lv860 = 0.0;
} else {
lv860 = v80;
}
arrOut_2[ix] = lv820;
arrOut_1[ix] = lv840;
arrOut_0[ix] = lv860;
}
}
|
2b62e7ac48d83dc5e7bf79774955f1b3d6dd4753.cu
|
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_23, const double* __restrict__ arrIn1_22, const double* __restrict__ arrIn1_21, const double* __restrict__ arrIn1_20, const double* __restrict__ arrIn1_19, const double* __restrict__ arrIn1_18, const double* __restrict__ arrIn1_17, const double* __restrict__ arrIn1_16, const double* __restrict__ arrIn1_15, const double* __restrict__ arrIn1_14, const double* __restrict__ arrIn1_13, const double* __restrict__ arrIn1_12, const double* __restrict__ arrIn1_11, const double* __restrict__ arrIn1_10, const double* __restrict__ arrIn1_9, const double* __restrict__ arrIn1_8, const double* __restrict__ arrIn1_7, const double* __restrict__ arrIn1_6, const double* __restrict__ arrIn1_5, const double* __restrict__ arrIn1_4, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const double* __restrict__ arrIn3_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0;
const double v1 = arrIn2_2[v0];
const double v2 = arrIn2_1[v0];
const double v3 = arrIn2_0[v0];
const Int64 v4 = (Int64) 0;
const Int64 v5 = (Int64) -1 + sh1;
const Int64 v6 = (Int64) -1 + sh2;
const Word8 v7 = v4 <= v6 && (v6 < shIn1_0 && (v4 <= v5 && (v5 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv90;
double lv91;
double lv92;
if (v7) {
const Int64 v8 = (v6 * shIn1_1 + v5) * shIn1_0 + sh0;
lv92 = arrIn1_2[v8];
lv91 = arrIn1_1[v8];
lv90 = arrIn1_0[v8];
} else {
lv92 = 0.0;
lv91 = 0.0;
lv90 = 0.0;
}
const Int64 v10 = (Int64) -1 + sh0;
const Int64 v11 = (Int64) -1 + sh1;
const Int64 v12 = (Int64) -1 + sh2;
const Word8 v13 = v4 <= v12 && (v12 < shIn1_0 && (v4 <= v11 && (v11 < shIn1_0 && (v4 <= v10 && v10 < shIn1_0))));
double lv150;
double lv151;
double lv152;
if (v13) {
const Int64 v14 = (v12 * shIn1_1 + v11) * shIn1_0 + v10;
lv152 = arrIn1_5[v14];
lv151 = arrIn1_4[v14];
lv150 = arrIn1_3[v14];
} else {
lv152 = 0.0;
lv151 = 0.0;
lv150 = 0.0;
}
const Int64 v16 = (Int64) -1 + sh0;
const Int64 v17 = (Int64) -1 + sh2;
const Word8 v18 = v4 <= v17 && (v17 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= v16 && v16 < shIn1_0))));
double lv200;
double lv201;
double lv202;
if (v18) {
const Int64 v19 = (v17 * shIn1_1 + sh1) * shIn1_0 + v16;
lv202 = arrIn1_8[v19];
lv201 = arrIn1_7[v19];
lv200 = arrIn1_6[v19];
} else {
lv202 = 0.0;
lv201 = 0.0;
lv200 = 0.0;
}
const Int64 v21 = (Int64) -1 + sh2;
const Word8 v22 = v4 <= v21 && (v21 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv240;
double lv241;
double lv242;
if (v22) {
const Int64 v23 = (v21 * shIn1_1 + sh1) * shIn1_0 + sh0;
lv242 = arrIn1_11[v23];
lv241 = arrIn1_10[v23];
lv240 = arrIn1_9[v23];
} else {
lv242 = 0.0;
lv241 = 0.0;
lv240 = 0.0;
}
const Int64 v25 = (Int64) -1 + sh1;
const Word8 v26 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= v25 && (v25 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv280;
double lv281;
double lv282;
if (v26) {
const Int64 v27 = (sh2 * shIn1_1 + v25) * shIn1_0 + sh0;
lv282 = arrIn1_14[v27];
lv281 = arrIn1_13[v27];
lv280 = arrIn1_12[v27];
} else {
lv282 = 0.0;
lv281 = 0.0;
lv280 = 0.0;
}
const Int64 v29 = (Int64) -1 + sh0;
const Int64 v30 = (Int64) -1 + sh1;
const Word8 v31 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= v30 && (v30 < shIn1_0 && (v4 <= v29 && v29 < shIn1_0))));
double lv330;
double lv331;
double lv332;
if (v31) {
const Int64 v32 = (sh2 * shIn1_1 + v30) * shIn1_0 + v29;
lv332 = arrIn1_17[v32];
lv331 = arrIn1_16[v32];
lv330 = arrIn1_15[v32];
} else {
lv332 = 0.0;
lv331 = 0.0;
lv330 = 0.0;
}
const Word8 v34 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= sh0 && sh0 < shIn1_0))));
double lv360;
double lv361;
double lv362;
if (v34) {
const Int64 v35 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
lv362 = arrIn1_23[v35];
lv361 = arrIn1_22[v35];
lv360 = arrIn1_21[v35];
} else {
lv362 = 0.0;
lv361 = 0.0;
lv360 = 0.0;
}
const Int64 v37 = (Int64) -1 + sh0;
const Word8 v38 = v4 <= sh2 && (sh2 < shIn1_0 && (v4 <= sh1 && (sh1 < shIn1_0 && (v4 <= v37 && v37 < shIn1_0))));
double lv400;
double lv401;
double lv402;
if (v38) {
const Int64 v39 = (sh2 * shIn1_1 + sh1) * shIn1_0 + v37;
lv402 = arrIn1_20[v39];
lv401 = arrIn1_19[v39];
lv400 = arrIn1_18[v39];
} else {
lv402 = 0.0;
lv401 = 0.0;
lv400 = 0.0;
}
const double v41 = lv362 + lv402;
const double v42 = lv361 + lv401;
const double v43 = lv360 + lv400;
const double v44 = v41 + lv332;
const double v45 = v42 + lv331;
const double v46 = v43 + lv330;
const double v47 = v44 + lv282;
const double v48 = v45 + lv281;
const double v49 = v46 + lv280;
const double v50 = v47 + lv242;
const double v51 = v48 + lv241;
const double v52 = v49 + lv240;
const double v53 = v50 + lv202;
const double v54 = v51 + lv201;
const double v55 = v52 + lv200;
const double v56 = v53 + lv152;
const double v57 = v54 + lv151;
const double v58 = v55 + lv150;
const double v59 = v56 + lv92;
const double v60 = v57 + lv91;
const double v61 = v58 + lv90;
const Int64 v62 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0;
const double v63 = arrIn0_0[v62];
const double v64 = v59 / v63;
const double v65 = v60 / v63;
const double v66 = v61 / v63;
const Word8 v67 = v4 == sh0;
double lv680;
if (v67) {
lv680 = 0.0;
} else {
lv680 = v64;
}
const Word8 v69 = v4 == sh1;
double lv700;
if (v69) {
lv700 = 0.0;
} else {
lv700 = v65;
}
const Word8 v71 = v4 == sh2;
double lv720;
if (v71) {
lv720 = 0.0;
} else {
lv720 = v66;
}
const Int64 v73 = 0;
const double v74 = arrIn3_0[v73];
const double v75 = lv680 * v74;
const double v76 = lv700 * v74;
const double v77 = lv720 * v74;
const double v78 = v1 + v75;
const double v79 = v2 + v76;
const double v80 = v3 + v77;
const Word8 v81 = fabs(v78) < 1.0e-7;
double lv820;
if (v81) {
lv820 = 0.0;
} else {
lv820 = v78;
}
const Word8 v83 = fabs(v79) < 1.0e-7;
double lv840;
if (v83) {
lv840 = 0.0;
} else {
lv840 = v79;
}
const Word8 v85 = fabs(v80) < 1.0e-7;
double lv860;
if (v85) {
lv860 = 0.0;
} else {
lv860 = v80;
}
arrOut_2[ix] = lv820;
arrOut_1[ix] = lv840;
arrOut_0[ix] = lv860;
}
}
|
196d5c4ef709147fc924e473b248bf2e7415d1e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "shared.h"
extern "C" __global__ void postProcess(const float4* accumBuffer, uint32_t imageSizeX, uint32_t imageSizeY, uint32_t numAccumFrames,
float4* outputBuffer) {
uint32_t ipx = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t ipy = blockDim.y * blockIdx.y + threadIdx.y;
if (ipx >= imageSizeX || ipy >= imageSizeY)
return;
uint32_t idx = ipy * imageSizeX + ipx;
float3 pix = getXYZ(accumBuffer[idx]) / (float)numAccumFrames;
pix.x = 1 - ::exp(-pix.x);
pix.y = 1 - ::exp(-pix.y);
pix.z = 1 - ::exp(-pix.z);
outputBuffer[idx] = make_float4(pix, 1.0f);
}
|
196d5c4ef709147fc924e473b248bf2e7415d1e9.cu
|
#pragma once
#include "shared.h"
extern "C" __global__ void postProcess(const float4* accumBuffer, uint32_t imageSizeX, uint32_t imageSizeY, uint32_t numAccumFrames,
float4* outputBuffer) {
uint32_t ipx = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t ipy = blockDim.y * blockIdx.y + threadIdx.y;
if (ipx >= imageSizeX || ipy >= imageSizeY)
return;
uint32_t idx = ipy * imageSizeX + ipx;
float3 pix = getXYZ(accumBuffer[idx]) / (float)numAccumFrames;
pix.x = 1 - std::exp(-pix.x);
pix.y = 1 - std::exp(-pix.y);
pix.z = 1 - std::exp(-pix.z);
outputBuffer[idx] = make_float4(pix, 1.0f);
}
|
047c2b8b581e377611f25be5a0bb50a8171b1789.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int N,
const int G,
const int K,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias);
template <>
__global__ void ComputeFusedParamsCUDAKernel<float>(
const int N,
const int G,
const int K,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C) {
const int ng = index / K;
const int c = index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
const float scale_val = __ldg(gamma + c) * __ldg(rsig + ng);
scale[index] = scale_val;
bias[index] = fmaf(-scale_val, __ldg(mu + ng), __ldg(beta + c));
#else
const float scale_val = gamma[c] * rsig[ng];
scale[index] = scale_val;
bias[index] = fmaf(-scale_val, mu[ng], beta[c]);
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / HxW;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc));
#else
Y[index] = fmaf(X[index], scale[nc], bias[nc]);
#endif
}
}
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / (HxW * C) * C + index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc));
#else
Y[index] = fmaf(X[index], scale[nc], bias[nc]);
#endif
}
}
template <typename T>
__global__ void ComputeInternalGradientsNCHWCUDAKernel(
const int HxW,
const T* dY,
const T* X,
T* ds,
T* db) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
const int nc = blockIdx.x;
T ds_sum = 0;
T db_sum = 0;
for (int i = threadIdx.x; i < HxW; i += blockDim.x) {
const int index = nc * HxW + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
ds_sum += __ldg(dY + index) * __ldg(X + index);
db_sum += __ldg(dY + index);
#else
ds_sum += dY[index] * X[index];
db_sum += dY[index];
#endif
}
ds_sum = BlockReduce<T>(ds_storage).Sum(ds_sum);
db_sum = BlockReduce<T>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
ds[nc] = ds_sum;
db[nc] = db_sum;
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - gamma * mu * rsig
// Y = s * X + b
// let n = K * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -gamma * u * drsig/dX - gamma * rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T>
__global__ void ComputeYGradientScaleCUDAKernel(
const int N,
const int G,
const int K,
const T* rsig,
const T* gamma,
T* dY_scale) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C) {
const int ng = index / K;
const int c = index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dY_scale[index] = __ldg(gamma + c) * __ldg(rsig + ng);
#else
dY_scale[index] = gamma[c] * rsig[ng];
#endif
}
}
template <typename T>
__global__ void ComputeXScaleAndBiasCUDAKernel(
const int G,
const int K,
const T alpha,
const T* ds,
const T* db,
const T* mu,
const T* rsig,
const T* gamma,
T* X_scale,
T* bias);
template <>
__global__ void ComputeXScaleAndBiasCUDAKernel<float>(
const int G,
const int K,
const float alpha,
const float* ds,
const float* db,
const float* mu,
const float* rsig,
const float* gamma,
float* X_scale,
float* bias) {
__shared__ typename BlockReduce<float>::TempStorage ds_storage;
__shared__ typename BlockReduce<float>::TempStorage db_storage;
const int n = blockIdx.x;
const int g = blockIdx.y;
const int ng = n * G + g;
float ds_sum = 0;
float db_sum = 0;
for (int i = threadIdx.x; i < K; i += blockDim.x) {
const int index = ng * K + i;
const int c = g * K + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
ds_sum += __ldg(ds + index) * __ldg(gamma + c);
db_sum += __ldg(db + index) * __ldg(gamma + c);
#else
ds_sum += ds[index] * gamma[c];
db_sum += db[index] * gamma[c];
#endif
}
ds_sum = BlockReduce<float>(ds_storage).Sum(ds_sum);
db_sum = BlockReduce<float>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
const float x = fmaf(db_sum, __ldg(mu + ng), -ds_sum) *
math::utils::Cube<float>(__ldg(rsig + ng)) * alpha;
X_scale[ng] = x;
bias[ng] = -fmaf(x, __ldg(mu + ng), db_sum * __ldg(rsig + ng) * alpha);
#else
const float x = fmaf(db_sum, mu[ng], -ds_sum) *
math::utils::Cube<float>(rsig[ng]) * alpha;
X_scale[ng] = x;
bias[ng] = -fmaf(x, mu[ng], db_sum * rsig[ng] * alpha);
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int N,
const int G,
const int K,
const int HxW,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX);
template <>
__global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / HxW;
const int ng = nc / K;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dX[index] = fmaf(
__ldg(dY_scale + nc),
__ldg(dY + index),
fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng)));
#else
dX[index] =
fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng]));
#endif
}
}
template <>
__global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / (HxW * C) * C + index % C;
const int ng = nc / K;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dX[index] = fmaf(
__ldg(dY_scale + nc),
__ldg(dY + index),
fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng)));
#else
dX[index] =
fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng]));
#endif
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int K,
const T* ds,
const T* db,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta);
template <>
__global__ void GammaBetaBackwardCUDAKernel<float>(
const int N,
const int G,
const int K,
const float* ds,
const float* db,
const float* mu,
const float* rsig,
float* dgamma,
float* dbeta) {
__shared__ typename BlockReduce<float>::TempStorage dg_storage;
__shared__ typename BlockReduce<float>::TempStorage db_storage;
const int C = G * K;
const int g = blockIdx.x;
const int k = blockIdx.y;
const int c = g * K + k;
float dg_sum = 0;
float db_sum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
const int nc = i * C + c;
const int ng = i * G + g;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dg_sum += fmaf(-__ldg(db + nc), __ldg(mu + ng), __ldg(ds + nc)) *
__ldg(rsig + ng);
db_sum += __ldg(db + nc);
#else
dg_sum += fmaf(-db[nc], mu[ng], ds[nc]) * rsig[ng];
db_sum += db[nc];
#endif
}
dg_sum = BlockReduce<float>(dg_storage).Sum(dg_sum);
db_sum = BlockReduce<float>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
dgamma[c] = dg_sum;
dbeta[c] = db_sum;
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::ComputeFusedParams(
const int N,
const int G,
const int K,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
const int M = math::DivUp(N * G * K, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<float>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, G, K, mu, rsig, gamma, beta, scale, bias);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, C, HxW, X, scale, bias, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, C, HxW, X, scale, bias, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int C = G * K;
ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA));
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
float* dY_scale_data = dY_scale_.mutable_data<float>();
float* X_scale_data = X_scale_.mutable_data<float>();
float* bias_data = bias_.mutable_data<float>();
hipLaunchKernelGGL(( ComputeInternalGradientsNCHWCUDAKernel<float>)
, dim3(N * C), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
HxW, dY_data, X_data, ds_data, db_data);
// Computes dL/dX.
int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( ComputeYGradientScaleCUDAKernel<float>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, G, K, rsig_data, gamma_data, dY_scale_data);
hipLaunchKernelGGL(( ComputeXScaleAndBiasCUDAKernel<float>)
, dim3(dim3(N, G)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G,
K,
1.0f / static_cast<float>(K * HxW),
ds_data,
db_data,
mu_data,
rsig_data,
gamma_data,
X_scale_data,
bias_data);
M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N,
G,
K,
HxW,
dY_scale_data,
dY_data,
X_scale_data,
X_data,
bias_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<
float>), dim3(dim3(G, K)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data);
return true;
}
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int C = G * K;
ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&ones_, {HxW}, at::dtype<float>().device(CUDA));
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
float* dY_scale_data = dY_scale_.mutable_data<float>();
float* X_scale_data = X_scale_.mutable_data<float>();
float* bias_data = bias_.mutable_data<float>();
float* ones_data = ones_.mutable_data<float>();
math::Set<float, CUDAContext>(HxW, 1.0f, ones_data, &context_);
math::Mul<float, CUDAContext>(
N * C * HxW, dY_data, X_data, dX_data, &context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
N,
C,
1,
HxW,
1.0f,
dX_data,
C * HxW,
ones_data,
0,
0.0f,
ds_data,
C,
&context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
N,
C,
1,
HxW,
1.0f,
dY_data,
C * HxW,
ones_data,
0,
0.0f,
db_data,
C,
&context_);
// Computes dL/dX.
int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( ComputeYGradientScaleCUDAKernel<float>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, G, K, rsig_data, gamma_data, dY_scale_data);
hipLaunchKernelGGL(( ComputeXScaleAndBiasCUDAKernel<float>)
, dim3(dim3(N, G)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G,
K,
1.0f / static_cast<float>(K * HxW),
ds_data,
db_data,
mu_data,
rsig_data,
gamma_data,
X_scale_data,
bias_data);
M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N,
G,
K,
HxW,
dY_scale_data,
dY_data,
X_scale_data,
X_data,
bias_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<
float>), dim3(dim3(G, K)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data);
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
047c2b8b581e377611f25be5a0bb50a8171b1789.cu
|
// ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int N,
const int G,
const int K,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias);
template <>
__global__ void ComputeFusedParamsCUDAKernel<float>(
const int N,
const int G,
const int K,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C) {
const int ng = index / K;
const int c = index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
const float scale_val = __ldg(gamma + c) * __ldg(rsig + ng);
scale[index] = scale_val;
bias[index] = fmaf(-scale_val, __ldg(mu + ng), __ldg(beta + c));
#else
const float scale_val = gamma[c] * rsig[ng];
scale[index] = scale_val;
bias[index] = fmaf(-scale_val, mu[ng], beta[c]);
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / HxW;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc));
#else
Y[index] = fmaf(X[index], scale[nc], bias[nc]);
#endif
}
}
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / (HxW * C) * C + index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc));
#else
Y[index] = fmaf(X[index], scale[nc], bias[nc]);
#endif
}
}
template <typename T>
__global__ void ComputeInternalGradientsNCHWCUDAKernel(
const int HxW,
const T* dY,
const T* X,
T* ds,
T* db) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
const int nc = blockIdx.x;
T ds_sum = 0;
T db_sum = 0;
for (int i = threadIdx.x; i < HxW; i += blockDim.x) {
const int index = nc * HxW + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
ds_sum += __ldg(dY + index) * __ldg(X + index);
db_sum += __ldg(dY + index);
#else
ds_sum += dY[index] * X[index];
db_sum += dY[index];
#endif
}
ds_sum = BlockReduce<T>(ds_storage).Sum(ds_sum);
db_sum = BlockReduce<T>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
ds[nc] = ds_sum;
db[nc] = db_sum;
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - gamma * mu * rsig
// Y = s * X + b
// let n = K * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -gamma * u * drsig/dX - gamma * rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T>
__global__ void ComputeYGradientScaleCUDAKernel(
const int N,
const int G,
const int K,
const T* rsig,
const T* gamma,
T* dY_scale) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C) {
const int ng = index / K;
const int c = index % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dY_scale[index] = __ldg(gamma + c) * __ldg(rsig + ng);
#else
dY_scale[index] = gamma[c] * rsig[ng];
#endif
}
}
template <typename T>
__global__ void ComputeXScaleAndBiasCUDAKernel(
const int G,
const int K,
const T alpha,
const T* ds,
const T* db,
const T* mu,
const T* rsig,
const T* gamma,
T* X_scale,
T* bias);
template <>
__global__ void ComputeXScaleAndBiasCUDAKernel<float>(
const int G,
const int K,
const float alpha,
const float* ds,
const float* db,
const float* mu,
const float* rsig,
const float* gamma,
float* X_scale,
float* bias) {
__shared__ typename BlockReduce<float>::TempStorage ds_storage;
__shared__ typename BlockReduce<float>::TempStorage db_storage;
const int n = blockIdx.x;
const int g = blockIdx.y;
const int ng = n * G + g;
float ds_sum = 0;
float db_sum = 0;
for (int i = threadIdx.x; i < K; i += blockDim.x) {
const int index = ng * K + i;
const int c = g * K + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
ds_sum += __ldg(ds + index) * __ldg(gamma + c);
db_sum += __ldg(db + index) * __ldg(gamma + c);
#else
ds_sum += ds[index] * gamma[c];
db_sum += db[index] * gamma[c];
#endif
}
ds_sum = BlockReduce<float>(ds_storage).Sum(ds_sum);
db_sum = BlockReduce<float>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
const float x = fmaf(db_sum, __ldg(mu + ng), -ds_sum) *
math::utils::Cube<float>(__ldg(rsig + ng)) * alpha;
X_scale[ng] = x;
bias[ng] = -fmaf(x, __ldg(mu + ng), db_sum * __ldg(rsig + ng) * alpha);
#else
const float x = fmaf(db_sum, mu[ng], -ds_sum) *
math::utils::Cube<float>(rsig[ng]) * alpha;
X_scale[ng] = x;
bias[ng] = -fmaf(x, mu[ng], db_sum * rsig[ng] * alpha);
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int N,
const int G,
const int K,
const int HxW,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX);
template <>
__global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / HxW;
const int ng = nc / K;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dX[index] = fmaf(
__ldg(dY_scale + nc),
__ldg(dY + index),
fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng)));
#else
dX[index] =
fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng]));
#endif
}
}
template <>
__global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int C = G * K;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < N * C * HxW) {
const int nc = index / (HxW * C) * C + index % C;
const int ng = nc / K;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dX[index] = fmaf(
__ldg(dY_scale + nc),
__ldg(dY + index),
fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng)));
#else
dX[index] =
fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng]));
#endif
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int K,
const T* ds,
const T* db,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta);
template <>
__global__ void GammaBetaBackwardCUDAKernel<float>(
const int N,
const int G,
const int K,
const float* ds,
const float* db,
const float* mu,
const float* rsig,
float* dgamma,
float* dbeta) {
__shared__ typename BlockReduce<float>::TempStorage dg_storage;
__shared__ typename BlockReduce<float>::TempStorage db_storage;
const int C = G * K;
const int g = blockIdx.x;
const int k = blockIdx.y;
const int c = g * K + k;
float dg_sum = 0;
float db_sum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
const int nc = i * C + c;
const int ng = i * G + g;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
dg_sum += fmaf(-__ldg(db + nc), __ldg(mu + ng), __ldg(ds + nc)) *
__ldg(rsig + ng);
db_sum += __ldg(db + nc);
#else
dg_sum += fmaf(-db[nc], mu[ng], ds[nc]) * rsig[ng];
db_sum += db[nc];
#endif
}
dg_sum = BlockReduce<float>(dg_storage).Sum(dg_sum);
db_sum = BlockReduce<float>(db_storage).Sum(db_sum);
if (threadIdx.x == 0) {
dgamma[c] = dg_sum;
dbeta[c] = db_sum;
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::ComputeFusedParams(
const int N,
const int G,
const int K,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
const int M = math::DivUp(N * G * K, CAFFE_CUDA_NUM_THREADS);
ComputeFusedParamsCUDAKernel<float>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, G, K, mu, rsig, gamma, beta, scale, bias);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, C, HxW, X, scale, bias, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, C, HxW, X, scale, bias, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int C = G * K;
ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA));
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
float* dY_scale_data = dY_scale_.mutable_data<float>();
float* X_scale_data = X_scale_.mutable_data<float>();
float* bias_data = bias_.mutable_data<float>();
ComputeInternalGradientsNCHWCUDAKernel<float>
<<<N * C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
HxW, dY_data, X_data, ds_data, db_data);
// Computes dL/dX.
int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS);
ComputeYGradientScaleCUDAKernel<float>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, G, K, rsig_data, gamma_data, dY_scale_data);
ComputeXScaleAndBiasCUDAKernel<float>
<<<dim3(N, G), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G,
K,
1.0f / static_cast<float>(K * HxW),
ds_data,
db_data,
mu_data,
rsig_data,
gamma_data,
X_scale_data,
bias_data);
M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N,
G,
K,
HxW,
dY_scale_data,
dY_data,
X_scale_data,
X_data,
bias_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<
float><<<dim3(G, K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data);
return true;
}
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC(
const int N,
const int G,
const int K,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int C = G * K;
ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA));
ReinitializeTensor(&ones_, {HxW}, at::dtype<float>().device(CUDA));
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
float* dY_scale_data = dY_scale_.mutable_data<float>();
float* X_scale_data = X_scale_.mutable_data<float>();
float* bias_data = bias_.mutable_data<float>();
float* ones_data = ones_.mutable_data<float>();
math::Set<float, CUDAContext>(HxW, 1.0f, ones_data, &context_);
math::Mul<float, CUDAContext>(
N * C * HxW, dY_data, X_data, dX_data, &context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
N,
C,
1,
HxW,
1.0f,
dX_data,
C * HxW,
ones_data,
0,
0.0f,
ds_data,
C,
&context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
N,
C,
1,
HxW,
1.0f,
dY_data,
C * HxW,
ones_data,
0,
0.0f,
db_data,
C,
&context_);
// Computes dL/dX.
int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS);
ComputeYGradientScaleCUDAKernel<float>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, G, K, rsig_data, gamma_data, dY_scale_data);
ComputeXScaleAndBiasCUDAKernel<float>
<<<dim3(N, G), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G,
K,
1.0f / static_cast<float>(K * HxW),
ds_data,
db_data,
mu_data,
rsig_data,
gamma_data,
X_scale_data,
bias_data);
M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS);
GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N,
G,
K,
HxW,
dY_scale_data,
dY_data,
X_scale_data,
X_data,
bias_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<
float><<<dim3(G, K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data);
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
5ecd0ea5c5dcff84fc8be45ebfc18b99b0c1eb76.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shared.h"
#include <cstdio>
#include <cmath>
#include <thrust/complex.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
extern "C"{
__global__ void ghmatecd_kernel(
int cone[],
FREAL cx[],
FREAL cy[],
FREAL cz[],
FREAL cxm[],
FREAL cym[],
FREAL czm[],
thrust::complex<FREAL> zh[],
thrust::complex<FREAL> zg[],
FREAL rn[][3],
thrust::complex<FREAL> zge,
thrust::complex<FREAL> zcs,
thrust::complex<FREAL> zcp,
FREAL fr,
FREAL gi[],
FREAL ome[],
FREAL c1,
FREAL c2,
FREAL c3,
FREAL c4,
// int npg,
int n,
int nbe,
int dim_cone,
int column_pad,
int fast_singular,
int* ret
)
{
const int ig = threadIdx.x;
const int jg = threadIdx.y;
const int ii = blockIdx.y;
const int jj = blockIdx.x + column_pad;
const int npg = blockDim.y;
const int tid = npg*threadIdx.y + threadIdx.x;
const int lane = tid % 32;
const int warp = tid / 32;
const int num_warps = (npg*npg + 31)/32;
const int lane_x = lane % 3;
const int lane_y = lane / 3;
const int gelem_pad = 3*3*num_warps;
extern __shared__ thrust::complex<FREAL> s_[];
int i, j;
const FREAL pi = 3.141592654;
FREAL p[4][2], f[4];
FREAL xj[3][2];
__shared__ FREAL co[3][4];
__shared__ FREAL rn_cached[4];
FREAL g1, g2, p1, p2, p12, sp, sm, rp, rm, det;
FREAL cxg, cyg, czg, cxp, cyp, czp;
FREAL j1, j2, j3;
FREAL r1, r2, r3, r, drn, rd[3];
thrust::complex<FREAL> zwi, zc0, zc1, zc2, zkp, zks, zzp, zzs, zezp, zezs,
zp2, zs2, zfhi, zcappa, zfhidr, zcappadr, zaa, zbb, zcc;
thrust::complex<FREAL> zhi, zgi;
FREAL zhi_real, zhi_imag, zgi_real, zgi_imag;
int iii, jjj;
#define zhelem(i, j, k) s_[3*num_warps*(i) + num_warps*(j) + (k)]
#define zgelem(i, j, k) (s_ + gelem_pad)[3*num_warps*(i) + num_warps*(j) + (k)]
if (threadIdx.x < 4 && threadIdx.y == 0)
{
co[0][threadIdx.x] = cx[cone[dim_cone*threadIdx.x + jj] - 1];
co[1][threadIdx.x] = cy[cone[dim_cone*threadIdx.x + jj] - 1];
co[2][threadIdx.x] = cz[cone[dim_cone*threadIdx.x + jj] - 1];
//Note que a dimenso coluna de rn 3, mas estamos acessando o elemento
//na posio 4. Isto pode levar a um segfault, entretanto consegue-se
//uma melhora de ~100ms no kernel se fizermos esta alterao.
rn_cached[threadIdx.x] = rn[jj][threadIdx.x];
}
__syncthreads();
cxp = cxm[ii];
cyp = cym[ii];
czp = czm[ii];
g2 = gi[jg];
p2 = ome[jg];
sp = 1 + g2;
sm = 1 - g2;
p[0][0] = -0.25*sm;
p[1][0] = 0.25*sm;
p[2][0] = 0.25*sp;
p[3][0] = -0.25*sp;
g1 = gi[ig];
p1 = ome[ig];
rp = 1 + g1;
rm = 1 - g1;
f[0] = 0.25*rm*sm;
f[1] = 0.25*rp*sm;
f[2] = 0.25*rp*sp;
f[3] = 0.25*rm*sp;
p[0][1] = -0.25*rm;
p[1][1] = -0.25*rp;
p[2][1] = 0.25*rp;
p[3][1] = 0.25*rm;
for (iii = 0; iii < 2; ++iii)
{
for (jjj = 0; jjj < 3; ++jjj)
{
xj[jjj][iii] = p[0][iii]*co[jjj][0] + p[1][iii]*co[jjj][1]+ p[2][iii]*co[jjj][2] + p[3][iii]*co[jjj][3];
}
}
j1 = xj[1][0]*xj[2][1]-xj[1][1]*xj[2][0];
j2 = xj[0][1]*xj[2][0]-xj[0][0]*xj[2][1];
j3 = xj[0][0]*xj[1][1]-xj[0][1]*xj[1][0];
det = sqrt(j1*j1 + j2*j2 + j3*j3);
if (det < 1e-5)
{
*ret = 1;
return;
}
cxg = 0;
cyg = 0;
czg = 0;
for (iii = 0; iii < 4; ++iii)
{
cxg = cxg + co[0][iii]*f[iii];
cyg = cyg + co[1][iii]*f[iii];
czg = czg + co[2][iii]*f[iii];
}
r1 = cxg - cxp;
r2 = cyg - cyp;
r3 = czg - czp;
r = sqrt(r1*r1 + r2*r2 + r3*r3);
drn = (r1*rn_cached[0] + r2*rn_cached[1] + r3*rn_cached[2])/r;
rd[0] = r1/r;
rd[1] = r2/r;
rd[2] = r3/r;
zwi = thrust::complex<FREAL>(0, fr);
zc0 = ((FREAL) 1.0)/(((FREAL) 4.)*(pi)*(zge));
zc1 = ((zcp)/(zcs))*((zcp)/(zcs));
zc2 = ((zcs)/(zcp))*((zcs)/(zcp));
zkp = -zwi/(zcp);
zks = -zwi/(zcs);
zzp = zkp*r;
zzs = zks*r;
zezp= exp(zzp);
zezs= exp(zzs);
zp2 = zzp*zzp;
zs2 = zzs*zzs;
zfhi = (((FREAL) 1.) + ((FREAL) 1.)/zs2 - ((FREAL) 1.)/zzs)*zezs/r - zc2*(((FREAL) 1.)/zp2 - ((FREAL) 1.)/zzp)*zezp/r;
zcappa = (((FREAL) 1.) + ((FREAL) 3.)/zs2 - ((FREAL) 3.f)/zzs)*zezs/r - zc2*(((FREAL) 1.) + ((FREAL) 3.)/zp2 - ((FREAL) 3.)/zzp)*zezp/r;
zfhidr = (((FREAL) -2.)+ zzs + ((FREAL) 3.)/zzs - ((FREAL) 3.)/zs2)*zezs/(r*r) - zc2*(((FREAL) -1.) + ((FREAL) 3.)/zzp - ((FREAL) 3.)/zp2)*zezp/(r*r);
zcappadr= (zzs - ((FREAL) 4.) + ((FREAL) 9.f)/zzs - ((FREAL) 9.)/zs2)*zezs/(r*r) - zc2*(zzp - ((FREAL) 4.) + ((FREAL) 9.)/zzp - ((FREAL) 9.f)/zp2)*zezp/(r*r);
zaa = zfhidr-zcappa/r;
zbb = ((FREAL) 4.)*zcappa/r -((FREAL) 2.)*zcappadr;
zcc = (zc1-((FREAL) 2.))*(zaa + ((FREAL) 0.5)*zbb-((FREAL) 3.0)*zcappa/r)-((FREAL) 2.0)*zcappa/r;
p12 = p1*p2*det;
for (j = 0; j < 3; ++j)
{ for (i = 0; i < 3; ++i)
{
zgi = (zc0*(zfhi*delta[j][i] - zcappa*rd[j]*rd[i]));
zhi = (((FREAL) 1.0)/(((FREAL) 4.0)*pi))*((zaa*(drn*delta[j][i] +
rd[j]*rn_cached[i])) + rd[i]*rd[j]*drn*zbb +
rd[i]*rn_cached[j]*zcc);
if (ii == jj && fast_singular)
{
zgi = zgi - (c1/r)*(c2*delta[j][i] + rd[i]*rd[j]);
zhi = zhi - (c3/(r*r))*(drn*(c4*delta[j][i] + ((FREAL) 3.0)*rd[i]*rd[j]) + c4*(rd[j]*rn_cached[i] - rd[i]*rn_cached[j]));
}
zgi = zgi*p12;
zgi_real = zgi.real();
zgi_imag = zgi.imag();
zhi = zhi*p12;
zhi_real = zhi.real();
zhi_imag = zhi.imag();
for (int offset = 16; offset > 0; offset = offset/2)
{ zgi_real += shfl(zgi_real, offset);
zgi_imag += shfl(zgi_imag, offset);
zhi_real += shfl(zhi_real, offset);
zhi_imag += shfl(zhi_imag, offset);
}
if (lane == 0)
{
zhelem(j, i, warp) = thrust::complex<FREAL>(zhi_real, zhi_imag);
zgelem(j, i, warp) = thrust::complex<FREAL>(zgi_real, zgi_imag);
}
}
}
__syncthreads();
int index = 3*blockIdx.y + (3*nbe)*3*blockIdx.x + lane_x + (3*nbe)*lane_y;
if (num_warps == 1)
{
if (lane < 9)
{
// No need for reduction.
zg[index] = zgelem(lane_y, lane_x, 0);
zh[index] = zhelem(lane_y, lane_x, 0);
}
}
else
{
switch (warp)
{
case 0:
if (lane < 9)
zg[index] = thrust::reduce(thrust::seq, &zgelem(lane_y, lane_x, 0), &zgelem(lane_y, lane_x, num_warps));
break;
case 1:
if (lane < 9)
zh[index] = thrust::reduce(thrust::seq, &zhelem(lane_y, lane_x, 0), &zhelem(lane_y, lane_x, num_warps));
break;
}
}
}
void cuda_ghmatecd_(int* nbe,
int* npg,
int* n,
int* np,
thrust::complex<FREAL>* zge,
thrust::complex<FREAL>* zcs,
thrust::complex<FREAL>* zcp,
FREAL* c1,
FREAL* c2,
FREAL* c3,
FREAL* c4,
FREAL* fr,
FREAL* zhest_,
FREAL* zgest_,
thrust::complex<FREAL>* zgp_,
thrust::complex<FREAL>* zhp_,
int* fast_singular,
int* status
)
{
size_t column_size = 2*(3*(*nbe))*sizeof(thrust::complex<FREAL>);
int num_of_warps = ((*npg)*(*npg) + 31)/32;
int shared_mem_size = 2*3*3*num_of_warps*sizeof(thrust::complex<FREAL>);
hipError_t error;
thrust::complex<FREAL>* device_zh;
thrust::complex<FREAL>* device_zg;
int* device_return_status;
int return_status;
/*Cast os parmetros de volta para o tipo original*/
// FREAL (*zhest)[3*(*nbe)] = (FREAL (*)[3*(*nbe)]) zhest_;
// FREAL (*zgest)[3*(*nbe)] = (FREAL (*)[3*(*nbe)]) zgest_;
thrust::complex<FREAL> (*zgp)[3*(*nbe)] = (thrust::complex<FREAL> (*)[3*(*nbe)]) zgp_;
thrust::complex<FREAL> (*zhp)[3*(*nbe)] = (thrust::complex<FREAL> (*)[3*(*nbe)]) zhp_;
int i, iterations, width;
dim3 threadsPerBlock(*npg,*npg);
error = hipMalloc(&device_return_status, sizeof(int));
cuda_assert(error);
width = largest_possible_width(column_size, *nbe, &iterations);
error = hipMalloc(&device_zh, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
error = hipMalloc(&device_zg, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
for (i = 0; i < iterations; ++i)
{
int starting_column = width*i;
// if (starting_column + width > *n)
// width = *n - starting_column;
if (starting_column + width > *nbe)
width = *nbe - starting_column;
dim3 numBlocks(width, *nbe);
error = hipMemset(device_return_status, 0, sizeof(int));
cuda_assert(error);
error = hipMemset(device_zh, 0, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
error = hipMemset(device_zg, 0, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ghmatecd_kernel), dim3(numBlocks), dim3(threadsPerBlock), shared_mem_size, 0,
device_cone,
device_cx,
device_cy,
device_cz,
device_cxm,
device_cym,
device_czm,
device_zh,
device_zg,
(FREAL (*)[3]) device_etas,
*zge,
*zcs,
*zcp,
*fr,
device_gi,
device_ome,
*c1,
*c2,
*c3,
*c4,
// *npg,
*n,
*nbe,
*n,
starting_column,
*fast_singular,
device_return_status
);
hipDeviceSynchronize();
error = hipMemcpy(&return_status, device_return_status, sizeof(int), hipMemcpyDeviceToHost);
cuda_assert(error);
if (return_status != 0)
{
fputs("Matriz Singular\n", stderr);
}
error = hipMemcpy(&zhp[3*starting_column], device_zh, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>), hipMemcpyDeviceToHost);
cuda_assert(error);
error = hipMemcpy(&zgp[3*starting_column], device_zg, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>), hipMemcpyDeviceToHost);
cuda_assert(error);
}
error = hipFree(device_zh);
cuda_assert(error);
error = hipFree(device_zg);
cuda_assert(error);
*status = return_status;
error = hipFree(device_return_status);
cuda_assert(error);
}
}
|
5ecd0ea5c5dcff84fc8be45ebfc18b99b0c1eb76.cu
|
#include "shared.h"
#include <cstdio>
#include <cmath>
#include <thrust/complex.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
extern "C"{
__global__ void ghmatecd_kernel(
int cone[],
FREAL cx[],
FREAL cy[],
FREAL cz[],
FREAL cxm[],
FREAL cym[],
FREAL czm[],
thrust::complex<FREAL> zh[],
thrust::complex<FREAL> zg[],
FREAL rn[][3],
thrust::complex<FREAL> zge,
thrust::complex<FREAL> zcs,
thrust::complex<FREAL> zcp,
FREAL fr,
FREAL gi[],
FREAL ome[],
FREAL c1,
FREAL c2,
FREAL c3,
FREAL c4,
// int npg,
int n,
int nbe,
int dim_cone,
int column_pad,
int fast_singular,
int* ret
)
{
const int ig = threadIdx.x;
const int jg = threadIdx.y;
const int ii = blockIdx.y;
const int jj = blockIdx.x + column_pad;
const int npg = blockDim.y;
const int tid = npg*threadIdx.y + threadIdx.x;
const int lane = tid % 32;
const int warp = tid / 32;
const int num_warps = (npg*npg + 31)/32;
const int lane_x = lane % 3;
const int lane_y = lane / 3;
const int gelem_pad = 3*3*num_warps;
extern __shared__ thrust::complex<FREAL> s_[];
int i, j;
const FREAL pi = 3.141592654;
FREAL p[4][2], f[4];
FREAL xj[3][2];
__shared__ FREAL co[3][4];
__shared__ FREAL rn_cached[4];
FREAL g1, g2, p1, p2, p12, sp, sm, rp, rm, det;
FREAL cxg, cyg, czg, cxp, cyp, czp;
FREAL j1, j2, j3;
FREAL r1, r2, r3, r, drn, rd[3];
thrust::complex<FREAL> zwi, zc0, zc1, zc2, zkp, zks, zzp, zzs, zezp, zezs,
zp2, zs2, zfhi, zcappa, zfhidr, zcappadr, zaa, zbb, zcc;
thrust::complex<FREAL> zhi, zgi;
FREAL zhi_real, zhi_imag, zgi_real, zgi_imag;
int iii, jjj;
#define zhelem(i, j, k) s_[3*num_warps*(i) + num_warps*(j) + (k)]
#define zgelem(i, j, k) (s_ + gelem_pad)[3*num_warps*(i) + num_warps*(j) + (k)]
if (threadIdx.x < 4 && threadIdx.y == 0)
{
co[0][threadIdx.x] = cx[cone[dim_cone*threadIdx.x + jj] - 1];
co[1][threadIdx.x] = cy[cone[dim_cone*threadIdx.x + jj] - 1];
co[2][threadIdx.x] = cz[cone[dim_cone*threadIdx.x + jj] - 1];
//Note que a dimensão coluna de rn é 3, mas estamos acessando o elemento
//na posição 4. Isto pode levar a um segfault, entretanto consegue-se
//uma melhora de ~100ms no kernel se fizermos esta alteração.
rn_cached[threadIdx.x] = rn[jj][threadIdx.x];
}
__syncthreads();
cxp = cxm[ii];
cyp = cym[ii];
czp = czm[ii];
g2 = gi[jg];
p2 = ome[jg];
sp = 1 + g2;
sm = 1 - g2;
p[0][0] = -0.25*sm;
p[1][0] = 0.25*sm;
p[2][0] = 0.25*sp;
p[3][0] = -0.25*sp;
g1 = gi[ig];
p1 = ome[ig];
rp = 1 + g1;
rm = 1 - g1;
f[0] = 0.25*rm*sm;
f[1] = 0.25*rp*sm;
f[2] = 0.25*rp*sp;
f[3] = 0.25*rm*sp;
p[0][1] = -0.25*rm;
p[1][1] = -0.25*rp;
p[2][1] = 0.25*rp;
p[3][1] = 0.25*rm;
for (iii = 0; iii < 2; ++iii)
{
for (jjj = 0; jjj < 3; ++jjj)
{
xj[jjj][iii] = p[0][iii]*co[jjj][0] + p[1][iii]*co[jjj][1]+ p[2][iii]*co[jjj][2] + p[3][iii]*co[jjj][3];
}
}
j1 = xj[1][0]*xj[2][1]-xj[1][1]*xj[2][0];
j2 = xj[0][1]*xj[2][0]-xj[0][0]*xj[2][1];
j3 = xj[0][0]*xj[1][1]-xj[0][1]*xj[1][0];
det = sqrt(j1*j1 + j2*j2 + j3*j3);
if (det < 1e-5)
{
*ret = 1;
return;
}
cxg = 0;
cyg = 0;
czg = 0;
for (iii = 0; iii < 4; ++iii)
{
cxg = cxg + co[0][iii]*f[iii];
cyg = cyg + co[1][iii]*f[iii];
czg = czg + co[2][iii]*f[iii];
}
r1 = cxg - cxp;
r2 = cyg - cyp;
r3 = czg - czp;
r = sqrt(r1*r1 + r2*r2 + r3*r3);
drn = (r1*rn_cached[0] + r2*rn_cached[1] + r3*rn_cached[2])/r;
rd[0] = r1/r;
rd[1] = r2/r;
rd[2] = r3/r;
zwi = thrust::complex<FREAL>(0, fr);
zc0 = ((FREAL) 1.0)/(((FREAL) 4.)*(pi)*(zge));
zc1 = ((zcp)/(zcs))*((zcp)/(zcs));
zc2 = ((zcs)/(zcp))*((zcs)/(zcp));
zkp = -zwi/(zcp);
zks = -zwi/(zcs);
zzp = zkp*r;
zzs = zks*r;
zezp= exp(zzp);
zezs= exp(zzs);
zp2 = zzp*zzp;
zs2 = zzs*zzs;
zfhi = (((FREAL) 1.) + ((FREAL) 1.)/zs2 - ((FREAL) 1.)/zzs)*zezs/r - zc2*(((FREAL) 1.)/zp2 - ((FREAL) 1.)/zzp)*zezp/r;
zcappa = (((FREAL) 1.) + ((FREAL) 3.)/zs2 - ((FREAL) 3.f)/zzs)*zezs/r - zc2*(((FREAL) 1.) + ((FREAL) 3.)/zp2 - ((FREAL) 3.)/zzp)*zezp/r;
zfhidr = (((FREAL) -2.)+ zzs + ((FREAL) 3.)/zzs - ((FREAL) 3.)/zs2)*zezs/(r*r) - zc2*(((FREAL) -1.) + ((FREAL) 3.)/zzp - ((FREAL) 3.)/zp2)*zezp/(r*r);
zcappadr= (zzs - ((FREAL) 4.) + ((FREAL) 9.f)/zzs - ((FREAL) 9.)/zs2)*zezs/(r*r) - zc2*(zzp - ((FREAL) 4.) + ((FREAL) 9.)/zzp - ((FREAL) 9.f)/zp2)*zezp/(r*r);
zaa = zfhidr-zcappa/r;
zbb = ((FREAL) 4.)*zcappa/r -((FREAL) 2.)*zcappadr;
zcc = (zc1-((FREAL) 2.))*(zaa + ((FREAL) 0.5)*zbb-((FREAL) 3.0)*zcappa/r)-((FREAL) 2.0)*zcappa/r;
p12 = p1*p2*det;
for (j = 0; j < 3; ++j)
{ for (i = 0; i < 3; ++i)
{
zgi = (zc0*(zfhi*delta[j][i] - zcappa*rd[j]*rd[i]));
zhi = (((FREAL) 1.0)/(((FREAL) 4.0)*pi))*((zaa*(drn*delta[j][i] +
rd[j]*rn_cached[i])) + rd[i]*rd[j]*drn*zbb +
rd[i]*rn_cached[j]*zcc);
if (ii == jj && fast_singular)
{
zgi = zgi - (c1/r)*(c2*delta[j][i] + rd[i]*rd[j]);
zhi = zhi - (c3/(r*r))*(drn*(c4*delta[j][i] + ((FREAL) 3.0)*rd[i]*rd[j]) + c4*(rd[j]*rn_cached[i] - rd[i]*rn_cached[j]));
}
zgi = zgi*p12;
zgi_real = zgi.real();
zgi_imag = zgi.imag();
zhi = zhi*p12;
zhi_real = zhi.real();
zhi_imag = zhi.imag();
for (int offset = 16; offset > 0; offset = offset/2)
{ zgi_real += shfl(zgi_real, offset);
zgi_imag += shfl(zgi_imag, offset);
zhi_real += shfl(zhi_real, offset);
zhi_imag += shfl(zhi_imag, offset);
}
if (lane == 0)
{
zhelem(j, i, warp) = thrust::complex<FREAL>(zhi_real, zhi_imag);
zgelem(j, i, warp) = thrust::complex<FREAL>(zgi_real, zgi_imag);
}
}
}
__syncthreads();
int index = 3*blockIdx.y + (3*nbe)*3*blockIdx.x + lane_x + (3*nbe)*lane_y;
if (num_warps == 1)
{
if (lane < 9)
{
// No need for reduction.
zg[index] = zgelem(lane_y, lane_x, 0);
zh[index] = zhelem(lane_y, lane_x, 0);
}
}
else
{
switch (warp)
{
case 0:
if (lane < 9)
zg[index] = thrust::reduce(thrust::seq, &zgelem(lane_y, lane_x, 0), &zgelem(lane_y, lane_x, num_warps));
break;
case 1:
if (lane < 9)
zh[index] = thrust::reduce(thrust::seq, &zhelem(lane_y, lane_x, 0), &zhelem(lane_y, lane_x, num_warps));
break;
}
}
}
void cuda_ghmatecd_(int* nbe,
int* npg,
int* n,
int* np,
thrust::complex<FREAL>* zge,
thrust::complex<FREAL>* zcs,
thrust::complex<FREAL>* zcp,
FREAL* c1,
FREAL* c2,
FREAL* c3,
FREAL* c4,
FREAL* fr,
FREAL* zhest_,
FREAL* zgest_,
thrust::complex<FREAL>* zgp_,
thrust::complex<FREAL>* zhp_,
int* fast_singular,
int* status
)
{
size_t column_size = 2*(3*(*nbe))*sizeof(thrust::complex<FREAL>);
int num_of_warps = ((*npg)*(*npg) + 31)/32;
int shared_mem_size = 2*3*3*num_of_warps*sizeof(thrust::complex<FREAL>);
cudaError_t error;
thrust::complex<FREAL>* device_zh;
thrust::complex<FREAL>* device_zg;
int* device_return_status;
int return_status;
/*Cast os parâmetros de volta para o tipo original*/
// FREAL (*zhest)[3*(*nbe)] = (FREAL (*)[3*(*nbe)]) zhest_;
// FREAL (*zgest)[3*(*nbe)] = (FREAL (*)[3*(*nbe)]) zgest_;
thrust::complex<FREAL> (*zgp)[3*(*nbe)] = (thrust::complex<FREAL> (*)[3*(*nbe)]) zgp_;
thrust::complex<FREAL> (*zhp)[3*(*nbe)] = (thrust::complex<FREAL> (*)[3*(*nbe)]) zhp_;
int i, iterations, width;
dim3 threadsPerBlock(*npg,*npg);
error = cudaMalloc(&device_return_status, sizeof(int));
cuda_assert(error);
width = largest_possible_width(column_size, *nbe, &iterations);
error = cudaMalloc(&device_zh, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
error = cudaMalloc(&device_zg, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
for (i = 0; i < iterations; ++i)
{
int starting_column = width*i;
// if (starting_column + width > *n)
// width = *n - starting_column;
if (starting_column + width > *nbe)
width = *nbe - starting_column;
dim3 numBlocks(width, *nbe);
error = cudaMemset(device_return_status, 0, sizeof(int));
cuda_assert(error);
error = cudaMemset(device_zh, 0, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
error = cudaMemset(device_zg, 0, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>));
cuda_assert(error);
cudaDeviceSynchronize();
ghmatecd_kernel<<<numBlocks, threadsPerBlock, shared_mem_size>>>(
device_cone,
device_cx,
device_cy,
device_cz,
device_cxm,
device_cym,
device_czm,
device_zh,
device_zg,
(FREAL (*)[3]) device_etas,
*zge,
*zcs,
*zcp,
*fr,
device_gi,
device_ome,
*c1,
*c2,
*c3,
*c4,
// *npg,
*n,
*nbe,
*n,
starting_column,
*fast_singular,
device_return_status
);
cudaDeviceSynchronize();
error = cudaMemcpy(&return_status, device_return_status, sizeof(int), cudaMemcpyDeviceToHost);
cuda_assert(error);
if (return_status != 0)
{
fputs("Matriz Singular\n", stderr);
}
error = cudaMemcpy(&zhp[3*starting_column], device_zh, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>), cudaMemcpyDeviceToHost);
cuda_assert(error);
error = cudaMemcpy(&zgp[3*starting_column], device_zg, (3*(*nbe))*(3*(width))*sizeof(thrust::complex<FREAL>), cudaMemcpyDeviceToHost);
cuda_assert(error);
}
error = cudaFree(device_zh);
cuda_assert(error);
error = cudaFree(device_zg);
cuda_assert(error);
*status = return_status;
error = cudaFree(device_return_status);
cuda_assert(error);
}
}
|
3267f047dbbc2f2e2bb118acc12607677d9ee7aa.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file BatchNormLayer_device.cu
* @date 2017-01-25
* @author moonhoen lee
* @brief
* @details
*/
#include "hip/hip_runtime.h"
#include "BatchNormLayer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "MathFunctions.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#define BATCHCONDLAYER_LOG 0
using namespace std;
// FIXME:
// ex.
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
template <typename Dtype>
__global__ void CalcMean(const Dtype *input, int depth, int batchCount, Dtype *mean)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
mean[idx] += input[index];
}
mean[idx] = mean[idx] / (Dtype)batchCount;
}
template <typename Dtype>
__global__ void CalcVariance(const Dtype *input, const Dtype* mean, int depth, int batchCount,
Dtype *variance)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
variance[idx] += (input[index] - mean[idx]) * (input[index] - mean[idx]);
}
variance[idx] = variance[idx] / (Dtype)batchCount;
}
template <typename Dtype>
__global__ void Normalize(const Dtype *input, const Dtype* mean, const Dtype* variance,
const Dtype* gamma, const Dtype* beta, int depth, int batchCount, Dtype epsilon,
Dtype* normInput, Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int count = depth * batchCount;
if (idx >= count)
return;
int curDepth = idx % depth;
Dtype denominator = sqrtf(variance[curDepth] + epsilon);
normInput[idx] = (input[idx] - mean[curDepth]) / denominator;
output[idx] = normInput[idx] * gamma[curDepth] + beta[curDepth];
}
#define USE_SIMPLE_MOVING_AVERAGE 1
template <typename Dtype>
__global__ void IncrementalMean(const Dtype *input, int depth, const Dtype counter,
Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
#if USE_SIMPLE_MOVING_AVERAGE
output[idx] = 0.99 * output[idx] + 0.01 * input[idx];
#else
output[idx] = ((counter - 1.0) * output[idx] + input[idx]) / counter;
#endif
}
template <typename Dtype>
__global__ void Inference(const Dtype *input, const Dtype *globalMean,
const Dtype *globalVar, const Dtype *gamma, const Dtype *beta, int depth,
int batchCount, const Dtype counter, Dtype epsilon, Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
Dtype varFactor = 1.0;
if (counter > 1.1) {
varFactor = (Dtype)counter / ((Dtype)counter - 1.0);
}
Dtype sqrtVariance = sqrtf(globalVar[idx] * varFactor + epsilon);
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
output[index] = input[index] * gamma[idx] / sqrtVariance + beta[idx] -
gamma[idx] * globalMean[idx] / sqrtVariance;
}
}
template <typename Dtype>
__global__ void ComputeNormInputGrad(const Dtype *outputGrads, const Dtype *gammas, int depth,
int batchCount, Dtype* normInputGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int count = depth * batchCount;
if (idx >= count)
return;
int curDepth = idx % depth;
normInputGrads[idx] = outputGrads[idx] * gammas[curDepth];
}
template <typename Dtype>
__global__ void ComputeVarianceGrad(const Dtype* normInputGrad, const Dtype *inputData,
const Dtype *mean, const Dtype *variance, Dtype epsilon, int depth, int batchCount,
Dtype* varianceGrad)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
varianceGrad[idx] = 0;
Dtype poweredVar = (-0.5) * pow((variance[idx] + epsilon), -1.5);
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
varianceGrad[idx] += normInputGrad[index] * (inputData[index] - mean[idx]) *
poweredVar;
}
}
template <typename Dtype>
__global__ void ComputeMeanGrad(const Dtype *normInputGrads, const Dtype *vars,
const Dtype *varGrads, const Dtype* inputData, const Dtype* means, int depth,
int batchCount, Dtype epsilon, Dtype* meanGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
meanGrads[idx] = 0;
Dtype sqrtVar = (-1) / sqrtf(vars[idx] + epsilon);
Dtype varGradFactor = varGrads[idx] * (-2) / (Dtype)batchCount;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
meanGrads[idx] += normInputGrads[index] * sqrtVar +
varGradFactor * (inputData[index] - means[idx]);
}
}
template <typename Dtype>
__global__ void ComputeInputGrad(const Dtype *normInputGrads, const Dtype *vars,
const Dtype *varGrads, const Dtype* inputData, const Dtype* means, const Dtype* meanGrads,
int depth, int batchCount, Dtype epsilon, Dtype* inputGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
Dtype sqrtVar = sqrtf(vars[idx] + epsilon);
Dtype varGradFactor = varGrads[idx] * 2 / (Dtype)batchCount;
Dtype meanFactor = meanGrads[idx] / (Dtype)batchCount;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
inputGrads[index] = normInputGrads[index] / sqrtVar +
varGradFactor * (inputData[index] - means[idx]) + meanFactor;
}
}
template <typename Dtype>
__global__ void ComputeScaleGrad(const Dtype *normInputs, const Dtype *outputGrads,
int depth, int batchCount, Dtype* gammaGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
gammaGrads[idx] = 0;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
gammaGrads[idx] += outputGrads[index] * normInputs[index];
}
}
template <typename Dtype>
__global__ void ComputeShiftGrad(const Dtype *outputGrads, int depth, int batchCount,
Dtype* betaGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
betaGrads[idx] = 0;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
betaGrads[idx] += outputGrads[index];
}
}
template<typename Dtype>
BatchNormLayer<Dtype>::~BatchNormLayer() {
if (SLPROP_BASE(receive)) {
Donator<Dtype>::releaseReceiver(SLPROP_BASE(donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
SASSERT0(this->normInputSet != NULL);
SFREE(this->normInputSet);
this->updateParams.clear();
}
template <typename Dtype>
void BatchNormLayer<Dtype>::update() {
const uint32_t size = this->depth;
const Dtype regScale = SNPROP(weightDecay);
const Dtype learnScale = Update<float>::calcLearningRate();
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(BatchNorm, decayedBeta1) *= beta1;
SLPROP(BatchNorm, decayedBeta2) *= beta2;
UpdateContext context = Update<Dtype>::makeContext(size, regScale, learnScale);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Gamma].context = context;
this->updateParams[Beta].context = context;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::feedforward() {
reshape();
struct timespec startTime;
SPERF_START(BATCHNORM_LAYER_FWTIME, &startTime);
// FIXME: CPU . GPU .
// (1) mini-batch mean .
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
if (SLPROP(BatchNorm, train)) {
Dtype* means = this->meanSet->mutable_device_data();
Dtype* vars = this->varSet->mutable_device_data();
// (1) mini-batch mean, variance .
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
means, this->depth, 0.0f);
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
vars, this->depth, 0.0f);
// (2) mini-batch mean .
hipLaunchKernelGGL(( CalcMean), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, this->depth, batchCount, means);
// (3) mini-batch variance .
hipLaunchKernelGGL(( CalcVariance), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, means, this->depth, batchCount, vars);
// (4) normalize
Dtype* normInputs = this->normInputSet->mutable_device_data();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
const Dtype* betas = this->_params[ParamType::Beta]->device_data();
hipLaunchKernelGGL(( Normalize), dim3(SOOOA_GET_BLOCKS(this->depth * batchCount)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, means, vars, gammas, betas, this->depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), normInputs, outputData);
// (5) global meanSets varianceSets .
Dtype* counter = this->_params[ParamType::GlobalCount]->mutable_host_data();
counter[0] += 1;
Dtype* globalMeans = this->_params[ParamType::GlobalMean]->mutable_device_data();
Dtype* globalVars = this->_params[ParamType::GlobalVar]->mutable_device_data();
hipLaunchKernelGGL(( IncrementalMean), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
means, this->depth, counter[0], globalMeans);
hipLaunchKernelGGL(( IncrementalMean), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
vars, this->depth, counter[0], globalVars);
} else {
const Dtype* counter = this->_params[ParamType::GlobalCount]->host_data();
SASSERT((counter[0] > 0), "need train before inference");
const Dtype* globalMeans = this->_params[ParamType::GlobalMean]->device_data();
const Dtype* globalVars = this->_params[ParamType::GlobalVar]->device_data();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
const Dtype* betas = this->_params[ParamType::Beta]->device_data();
hipLaunchKernelGGL(( Inference), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, globalMeans, globalVars, gammas, betas, this->depth, batchCount,
counter[0], (Dtype)SLPROP(BatchNorm, epsilon), outputData);
}
SPERF_END(BATCHNORM_LAYER_FWTIME, startTime);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
assert(count == inputDataCount);
}
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
uint32_t depth = this->_inputData[0]->getCountByAxis(1);
this->_inputShape[0] = {batches, channels, rows, cols};
this->_outputData[0]->reshape({batches, channels, rows, cols});
STDOUT_COND_LOG(BATCHCONDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
STDOUT_COND_LOG(BATCHCONDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
// Batch Normalization .
if (this->depth == 0) {
this->depth = depth;
Optimizer opt = (Optimizer)SNPROP(optimizer);
int paramHistoryDataCount = Update<Dtype>::getParamHistoryDataCount(opt);
this->_params[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_params[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
if (paramHistoryDataCount >= 1) {
this->_paramsHistory[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
}
if (paramHistoryDataCount >= 2) {
this->_paramsHistory2[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
}
this->meanSet->reshape({1, channels, rows, cols});
this->varSet->reshape({1, channels, rows, cols});
this->normInputSet->reshape({batches, channels, rows, cols});
// FIXME: ..
Dtype* gammas = this->_params[ParamType::Gamma]->mutable_device_data();
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
gammas, this->depth, 1.0f);
this->_paramsInitialized[ParamType::Gamma] = true;
Dtype* betas = this->_params[ParamType::Beta]->mutable_device_data();
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
betas, this->depth, 0.0f);
this->_paramsInitialized[ParamType::Beta] = true;
Dtype* globalMeans = this->_params[ParamType::GlobalMean]->mutable_device_data();
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
globalMeans, this->depth, 0.0f);
this->_paramsInitialized[ParamType::GlobalMean] = true;
Dtype* globalVars = this->_params[ParamType::GlobalVar]->mutable_device_data();
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
globalVars, this->depth, 1.0f);
this->_paramsInitialized[ParamType::GlobalVar] = true;
Dtype* globalCounts = this->_params[ParamType::GlobalCount]->mutable_device_data();
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
globalCounts, this->depth, 0.0f);
this->_paramsInitialized[ParamType::GlobalCount] = true;
} else {
SASSERT0(this->depth == depth);
}
if (this->updateParams.size() == 0) {
UpdateParam upGamma;
upGamma.paramType = Gamma;
upGamma.paramDataPtr = (void*)this->_params[Gamma];
upGamma.paramHis1Ptr = (void*)this->_paramsHistory[Gamma];
upGamma.paramHis2Ptr = (void*)this->_paramsHistory2[Gamma];
this->updateParams.push_back(upGamma);
UpdateParam upBeta;
upBeta.paramType = Beta;
upBeta.paramDataPtr = (void*)this->_params[Beta];
upBeta.paramHis1Ptr = (void*)this->_paramsHistory[Beta];
upBeta.paramHis2Ptr = (void*)this->_paramsHistory2[Beta];
this->updateParams.push_back(upBeta);
}
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeNormInputGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();
Dtype* normInputGrads = this->normInputSet->mutable_device_grad();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
hipLaunchKernelGGL(( ComputeNormInputGrad), dim3(SOOOA_GET_BLOCKS(this->depth * batchCount)),
dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
outputGrads, gammas, this->depth, batchCount, normInputGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeVarianceGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* varGrads = this->varSet->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* means = this->meanSet->device_data();
const Dtype* vars = this->varSet->device_data();
hipLaunchKernelGGL(( ComputeVarianceGrad), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
normInputGrads, inputData, means, vars, (Dtype)SLPROP(BatchNorm, epsilon), depth, batchCount,
varGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeMeanGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* meanGrads = this->meanSet->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* vars = this->varSet->device_data();
const Dtype* varGrads = this->varSet->device_grad();
const Dtype* means = this->meanSet->device_data();
hipLaunchKernelGGL(( ComputeMeanGrad), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
normInputGrads, vars, varGrads, inputData, means, depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), meanGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeInputGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* inputGrads = this->_inputData[0]->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* vars = this->varSet->device_data();
const Dtype* varGrads = this->varSet->device_grad();
const Dtype* means = this->meanSet->device_data();
const Dtype* meanGrads = this->meanSet->device_grad();
hipLaunchKernelGGL(( ComputeInputGrad), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
normInputGrads, vars, varGrads, inputData, means, meanGrads, depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), inputGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeScaleGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();;
Dtype* gammaGrads = this->_params[ParamType::Gamma]->mutable_device_grad();
const Dtype* normInputs = this->normInputSet->device_data();
hipLaunchKernelGGL(( ComputeScaleGrad), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
normInputs, outputGrads, depth, batchCount, gammaGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeShiftGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();
Dtype* betaGrads = this->_params[ParamType::Beta]->mutable_device_grad();
hipLaunchKernelGGL(( ComputeShiftGrad), dim3(SOOOA_GET_BLOCKS(this->depth)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
outputGrads, depth, batchCount, betaGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::backpropagation() {
struct timespec startTime;
SPERF_START(BATCHNORM_LAYER_BWTIME, &startTime);
/*
* simple network layer .
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Norm ^Xi i * ^Xi + i Yi (=Xi+1) ........
* ..... O --------- O --------------------- O ........
* dL/dYi is already computed
*
* ( Xi = i layer input , Norm = normaliztion
* ^Xi = i layer , i = scale factor, i = shift factor
* Yi = i layer ouput , i+1 layer input
* L = loss, dL/dYi = i+1 layer gradient )
*
* BatchNormLayer i, i dL/di, dL/di
* . , layer dL/dXi .
*
* (https://arxiv.org/abs/1502.03167)
* .)
*/
// (1) dL/d^Xi = dL/dYi * i
computeNormInputGrad();
// (2) dL/dSquaredSigma
computeVarianceGrad();
// (3) dL/dMean
computeMeanGrad();
// (4) dL/dXi
computeInputGrad();
// (5) dL/di
computeScaleGrad();
// (6) dL/di
computeShiftGrad();
SPERF_END(BATCHNORM_LAYER_BWTIME, startTime);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
return;
}
template <typename Dtype>
void BatchNormLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
return;
}
template BatchNormLayer<float>::~BatchNormLayer();
template void BatchNormLayer<float>::reshape();
template void BatchNormLayer<float>::update();
template void BatchNormLayer<float>::feedforward();
template void BatchNormLayer<float>::backpropagation();
template void BatchNormLayer<float>::applyChanges(LearnableLayer<float> *targetLayer);
template void BatchNormLayer<float>::syncParams(LearnableLayer<float> *targetLayer);
|
3267f047dbbc2f2e2bb118acc12607677d9ee7aa.cu
|
/**
* @file BatchNormLayer_device.cu
* @date 2017-01-25
* @author moonhoen lee
* @brief
* @details
*/
#include "cuda_runtime.h"
#include "BatchNormLayer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "MathFunctions.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#define BATCHCONDLAYER_LOG 0
using namespace std;
// FIXME: 커널함수들 더 빨리 동작시킬 수 있게 수정 필요
// ex. 중간 계산값을 메모리로 들고 있는 방식
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
template <typename Dtype>
__global__ void CalcMean(const Dtype *input, int depth, int batchCount, Dtype *mean)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
mean[idx] += input[index];
}
mean[idx] = mean[idx] / (Dtype)batchCount;
}
template <typename Dtype>
__global__ void CalcVariance(const Dtype *input, const Dtype* mean, int depth, int batchCount,
Dtype *variance)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
variance[idx] += (input[index] - mean[idx]) * (input[index] - mean[idx]);
}
variance[idx] = variance[idx] / (Dtype)batchCount;
}
template <typename Dtype>
__global__ void Normalize(const Dtype *input, const Dtype* mean, const Dtype* variance,
const Dtype* gamma, const Dtype* beta, int depth, int batchCount, Dtype epsilon,
Dtype* normInput, Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int count = depth * batchCount;
if (idx >= count)
return;
int curDepth = idx % depth;
Dtype denominator = sqrtf(variance[curDepth] + epsilon);
normInput[idx] = (input[idx] - mean[curDepth]) / denominator;
output[idx] = normInput[idx] * gamma[curDepth] + beta[curDepth];
}
#define USE_SIMPLE_MOVING_AVERAGE 1
template <typename Dtype>
__global__ void IncrementalMean(const Dtype *input, int depth, const Dtype counter,
Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
#if USE_SIMPLE_MOVING_AVERAGE
output[idx] = 0.99 * output[idx] + 0.01 * input[idx];
#else
output[idx] = ((counter - 1.0) * output[idx] + input[idx]) / counter;
#endif
}
template <typename Dtype>
__global__ void Inference(const Dtype *input, const Dtype *globalMean,
const Dtype *globalVar, const Dtype *gamma, const Dtype *beta, int depth,
int batchCount, const Dtype counter, Dtype epsilon, Dtype* output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
Dtype varFactor = 1.0;
if (counter > 1.1) {
varFactor = (Dtype)counter / ((Dtype)counter - 1.0);
}
Dtype sqrtVariance = sqrtf(globalVar[idx] * varFactor + epsilon);
for (int i = 0 ; i < batchCount; i++) {
int index = i * depth + idx;
output[index] = input[index] * gamma[idx] / sqrtVariance + beta[idx] -
gamma[idx] * globalMean[idx] / sqrtVariance;
}
}
template <typename Dtype>
__global__ void ComputeNormInputGrad(const Dtype *outputGrads, const Dtype *gammas, int depth,
int batchCount, Dtype* normInputGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int count = depth * batchCount;
if (idx >= count)
return;
int curDepth = idx % depth;
normInputGrads[idx] = outputGrads[idx] * gammas[curDepth];
}
template <typename Dtype>
__global__ void ComputeVarianceGrad(const Dtype* normInputGrad, const Dtype *inputData,
const Dtype *mean, const Dtype *variance, Dtype epsilon, int depth, int batchCount,
Dtype* varianceGrad)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
varianceGrad[idx] = 0;
Dtype poweredVar = (-0.5) * pow((variance[idx] + epsilon), -1.5);
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
varianceGrad[idx] += normInputGrad[index] * (inputData[index] - mean[idx]) *
poweredVar;
}
}
template <typename Dtype>
__global__ void ComputeMeanGrad(const Dtype *normInputGrads, const Dtype *vars,
const Dtype *varGrads, const Dtype* inputData, const Dtype* means, int depth,
int batchCount, Dtype epsilon, Dtype* meanGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
meanGrads[idx] = 0;
Dtype sqrtVar = (-1) / sqrtf(vars[idx] + epsilon);
Dtype varGradFactor = varGrads[idx] * (-2) / (Dtype)batchCount;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
meanGrads[idx] += normInputGrads[index] * sqrtVar +
varGradFactor * (inputData[index] - means[idx]);
}
}
template <typename Dtype>
__global__ void ComputeInputGrad(const Dtype *normInputGrads, const Dtype *vars,
const Dtype *varGrads, const Dtype* inputData, const Dtype* means, const Dtype* meanGrads,
int depth, int batchCount, Dtype epsilon, Dtype* inputGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
Dtype sqrtVar = sqrtf(vars[idx] + epsilon);
Dtype varGradFactor = varGrads[idx] * 2 / (Dtype)batchCount;
Dtype meanFactor = meanGrads[idx] / (Dtype)batchCount;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
inputGrads[index] = normInputGrads[index] / sqrtVar +
varGradFactor * (inputData[index] - means[idx]) + meanFactor;
}
}
template <typename Dtype>
__global__ void ComputeScaleGrad(const Dtype *normInputs, const Dtype *outputGrads,
int depth, int batchCount, Dtype* gammaGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
gammaGrads[idx] = 0;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
gammaGrads[idx] += outputGrads[index] * normInputs[index];
}
}
template <typename Dtype>
__global__ void ComputeShiftGrad(const Dtype *outputGrads, int depth, int batchCount,
Dtype* betaGrads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= depth)
return;
betaGrads[idx] = 0;
for (int i = 0; i < batchCount; i++) {
int index = i * depth + idx;
betaGrads[idx] += outputGrads[index];
}
}
template<typename Dtype>
BatchNormLayer<Dtype>::~BatchNormLayer() {
if (SLPROP_BASE(receive)) {
Donator<Dtype>::releaseReceiver(SLPROP_BASE(donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
SASSERT0(this->normInputSet != NULL);
SFREE(this->normInputSet);
this->updateParams.clear();
}
template <typename Dtype>
void BatchNormLayer<Dtype>::update() {
const uint32_t size = this->depth;
const Dtype regScale = SNPROP(weightDecay);
const Dtype learnScale = Update<float>::calcLearningRate();
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(BatchNorm, decayedBeta1) *= beta1;
SLPROP(BatchNorm, decayedBeta2) *= beta2;
UpdateContext context = Update<Dtype>::makeContext(size, regScale, learnScale);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Gamma].context = context;
this->updateParams[Beta].context = context;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::feedforward() {
reshape();
struct timespec startTime;
SPERF_START(BATCHNORM_LAYER_FWTIME, &startTime);
// FIXME: 현재 CPU 코드로 구현이 되어 있다. GPU 코드로 변경하자.
// (1) mini-batch mean 값을 구한다.
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
if (SLPROP(BatchNorm, train)) {
Dtype* means = this->meanSet->mutable_device_data();
Dtype* vars = this->varSet->mutable_device_data();
// (1) mini-batch에 사용하는 mean, variance를 초기화 한다.
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
means, this->depth, 0.0f);
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
vars, this->depth, 0.0f);
// (2) mini-batch mean 값을 구한다.
CalcMean<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
inputData, this->depth, batchCount, means);
// (3) mini-batch variance 값을 구한다.
CalcVariance<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
inputData, means, this->depth, batchCount, vars);
// (4) normalize
Dtype* normInputs = this->normInputSet->mutable_device_data();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
const Dtype* betas = this->_params[ParamType::Beta]->device_data();
Normalize<<<SOOOA_GET_BLOCKS(this->depth * batchCount), SOOOA_CUDA_NUM_THREADS>>>(
inputData, means, vars, gammas, betas, this->depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), normInputs, outputData);
// (5) global meanSets과 varianceSets를 갱신한다.
Dtype* counter = this->_params[ParamType::GlobalCount]->mutable_host_data();
counter[0] += 1;
Dtype* globalMeans = this->_params[ParamType::GlobalMean]->mutable_device_data();
Dtype* globalVars = this->_params[ParamType::GlobalVar]->mutable_device_data();
IncrementalMean<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
means, this->depth, counter[0], globalMeans);
IncrementalMean<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
vars, this->depth, counter[0], globalVars);
} else {
const Dtype* counter = this->_params[ParamType::GlobalCount]->host_data();
SASSERT((counter[0] > 0), "need train before inference");
const Dtype* globalMeans = this->_params[ParamType::GlobalMean]->device_data();
const Dtype* globalVars = this->_params[ParamType::GlobalVar]->device_data();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
const Dtype* betas = this->_params[ParamType::Beta]->device_data();
Inference<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
inputData, globalMeans, globalVars, gammas, betas, this->depth, batchCount,
counter[0], (Dtype)SLPROP(BatchNorm, epsilon), outputData);
}
SPERF_END(BATCHNORM_LAYER_FWTIME, startTime);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
assert(count == inputDataCount);
}
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
uint32_t depth = this->_inputData[0]->getCountByAxis(1);
this->_inputShape[0] = {batches, channels, rows, cols};
this->_outputData[0]->reshape({batches, channels, rows, cols});
STDOUT_COND_LOG(BATCHCONDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
STDOUT_COND_LOG(BATCHCONDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), batches, channels, rows, cols);
// Batch Normalization 과정에 필요한 구조체들의 메모리를 할당한다.
if (this->depth == 0) {
this->depth = depth;
Optimizer opt = (Optimizer)SNPROP(optimizer);
int paramHistoryDataCount = Update<Dtype>::getParamHistoryDataCount(opt);
this->_params[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_params[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_params[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
if (paramHistoryDataCount >= 1) {
this->_paramsHistory[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_paramsHistory[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
}
if (paramHistoryDataCount >= 2) {
this->_paramsHistory2[ParamType::Gamma]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::Beta]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalMean]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalVar]->reshape({1, channels, rows, cols});
this->_paramsHistory2[ParamType::GlobalCount]->reshape({1, 1, 1, 1});
}
this->meanSet->reshape({1, channels, rows, cols});
this->varSet->reshape({1, channels, rows, cols});
this->normInputSet->reshape({batches, channels, rows, cols});
// FIXME: 더 좋은 초기화 방법이 있을지도 모른다..
Dtype* gammas = this->_params[ParamType::Gamma]->mutable_device_data();
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
gammas, this->depth, 1.0f);
this->_paramsInitialized[ParamType::Gamma] = true;
Dtype* betas = this->_params[ParamType::Beta]->mutable_device_data();
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
betas, this->depth, 0.0f);
this->_paramsInitialized[ParamType::Beta] = true;
Dtype* globalMeans = this->_params[ParamType::GlobalMean]->mutable_device_data();
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
globalMeans, this->depth, 0.0f);
this->_paramsInitialized[ParamType::GlobalMean] = true;
Dtype* globalVars = this->_params[ParamType::GlobalVar]->mutable_device_data();
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
globalVars, this->depth, 1.0f);
this->_paramsInitialized[ParamType::GlobalVar] = true;
Dtype* globalCounts = this->_params[ParamType::GlobalCount]->mutable_device_data();
FillValues<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
globalCounts, this->depth, 0.0f);
this->_paramsInitialized[ParamType::GlobalCount] = true;
} else {
SASSERT0(this->depth == depth);
}
if (this->updateParams.size() == 0) {
UpdateParam upGamma;
upGamma.paramType = Gamma;
upGamma.paramDataPtr = (void*)this->_params[Gamma];
upGamma.paramHis1Ptr = (void*)this->_paramsHistory[Gamma];
upGamma.paramHis2Ptr = (void*)this->_paramsHistory2[Gamma];
this->updateParams.push_back(upGamma);
UpdateParam upBeta;
upBeta.paramType = Beta;
upBeta.paramDataPtr = (void*)this->_params[Beta];
upBeta.paramHis1Ptr = (void*)this->_paramsHistory[Beta];
upBeta.paramHis2Ptr = (void*)this->_paramsHistory2[Beta];
this->updateParams.push_back(upBeta);
}
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeNormInputGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();
Dtype* normInputGrads = this->normInputSet->mutable_device_grad();
const Dtype* gammas = this->_params[ParamType::Gamma]->device_data();
ComputeNormInputGrad<<<SOOOA_GET_BLOCKS(this->depth * batchCount),
SOOOA_CUDA_NUM_THREADS>>>(
outputGrads, gammas, this->depth, batchCount, normInputGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeVarianceGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* varGrads = this->varSet->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* means = this->meanSet->device_data();
const Dtype* vars = this->varSet->device_data();
ComputeVarianceGrad<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
normInputGrads, inputData, means, vars, (Dtype)SLPROP(BatchNorm, epsilon), depth, batchCount,
varGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeMeanGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* meanGrads = this->meanSet->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* vars = this->varSet->device_data();
const Dtype* varGrads = this->varSet->device_grad();
const Dtype* means = this->meanSet->device_data();
ComputeMeanGrad<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
normInputGrads, vars, varGrads, inputData, means, depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), meanGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeInputGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* inputGrads = this->_inputData[0]->mutable_device_grad();
const Dtype* normInputGrads = this->normInputSet->device_grad();
const Dtype* vars = this->varSet->device_data();
const Dtype* varGrads = this->varSet->device_grad();
const Dtype* means = this->meanSet->device_data();
const Dtype* meanGrads = this->meanSet->device_grad();
ComputeInputGrad<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
normInputGrads, vars, varGrads, inputData, means, meanGrads, depth, batchCount,
(Dtype)SLPROP(BatchNorm, epsilon), inputGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeScaleGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();;
Dtype* gammaGrads = this->_params[ParamType::Gamma]->mutable_device_grad();
const Dtype* normInputs = this->normInputSet->device_data();
ComputeScaleGrad<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
normInputs, outputGrads, depth, batchCount, gammaGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::computeShiftGrad() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
const Dtype* outputGrads = this->_outputData[0]->device_grad();
Dtype* betaGrads = this->_params[ParamType::Beta]->mutable_device_grad();
ComputeShiftGrad<<<SOOOA_GET_BLOCKS(this->depth), SOOOA_CUDA_NUM_THREADS>>>(
outputGrads, depth, batchCount, betaGrads);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::backpropagation() {
struct timespec startTime;
SPERF_START(BATCHNORM_LAYER_BWTIME, &startTime);
/*
* 아래와 같은 simple한 network layer가 있다고 가정하자.
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Norm ^Xi γi * ^Xi + βi Yi (=Xi+1) ........
* ..... O --------- O --------------------- O ........
* dL/dYi is already computed
*
* (※ Xi = i번째 layer의 input 값, Norm = normaliztion
* ^Xi = i번째 layer의 중간 값, γi = scale factor, βi = shift factor
* Yi = i번째 layer의 ouput 값, i+1 번째 layer의 input 값이기도 함
* L = loss, dL/dYi = i+1번째 layer에서 계산되었던 gradient 값)
*
* BatchNormLayer에서는 γi, βi를 학습해야 하는데 그것을 위해서 dL/dγi, dL/dβi를 계산해야
* 한다. 또한, 하위 layer에 전달할 dL/dXi이 필요하다.
*
* 논문(https://arxiv.org/abs/1502.03167)에서 각각의 계산식이 있기 때문에 그것을 이용하여
* 연산을 하도록 하자.)
*/
// (1) dL/d^Xi = dL/dYi * γi
computeNormInputGrad();
// (2) dL/dSquaredSigma
computeVarianceGrad();
// (3) dL/dMean
computeMeanGrad();
// (4) dL/dXi
computeInputGrad();
// (5) dL/dγi
computeScaleGrad();
// (6) dL/dβi
computeShiftGrad();
SPERF_END(BATCHNORM_LAYER_BWTIME, startTime);
}
template <typename Dtype>
void BatchNormLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
return;
}
template <typename Dtype>
void BatchNormLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
return;
}
template BatchNormLayer<float>::~BatchNormLayer();
template void BatchNormLayer<float>::reshape();
template void BatchNormLayer<float>::update();
template void BatchNormLayer<float>::feedforward();
template void BatchNormLayer<float>::backpropagation();
template void BatchNormLayer<float>::applyChanges(LearnableLayer<float> *targetLayer);
template void BatchNormLayer<float>::syncParams(LearnableLayer<float> *targetLayer);
|
3130c85132e54efea0e51a8f674cdcc309524b73.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_vector_types.h>
#include <hip/hip_runtime.h>
#define EPS2 1.0E-9
__device__ float3
bodyBodyInteraction(float4 bi, float4 bj, float3 ai)
{
float3 r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = bj.w * invDistCube;
//a_i= a_i+s*r_ij[6FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
printf("ai.x : %f\n", ai.x);
return ai;
}
// __device__ float3
// tile_calculation(float4 myPosition, float3 accel)
// {
// int i;
// extern __shared__ float4 shPosition[];
// for (i = 0; i < blockDim.x; i++) {
// accel = bodyBodyInteraction(myPosition, shPosition[i], accel);
// }
// return accel;
// }
__global__ void
calculate_forces(void *devX, void *devA, int N)
{
extern __shared__ float4 shPosition[];
float4 *globalX = (float4 *)devX;
float4 *globalA = (float4 *)devA;
float4 myPosition;
int i, tile;
float3 acc = {0.0f, 0.0f, 0.0f};
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int p = blockDim.x;
myPosition = globalX[gtid];
for (i = 0, tile = 0; i < N; i += p, tile++) {
int idx = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = globalX[idx];
__syncthreads();
for (i = 0; i < blockDim.x; i++) {
acc = bodyBodyInteraction(myPosition, shPosition[i], acc);
printf("%f\n", globalX[i]);
}
__syncthreads();
}
// Save the result in global memory for the integration step.
float4 acc4 = {acc.x, acc.y, acc.z, 0.0f};
globalA[gtid] = acc4;
}
|
3130c85132e54efea0e51a8f674cdcc309524b73.cu
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector_types.h>
#include <cuda_runtime.h>
#define EPS2 1.0E-9
__device__ float3
bodyBodyInteraction(float4 bi, float4 bj, float3 ai)
{
float3 r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = bj.w * invDistCube;
//a_i= a_i+s*r_ij[6FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
printf("ai.x : %f\n", ai.x);
return ai;
}
// __device__ float3
// tile_calculation(float4 myPosition, float3 accel)
// {
// int i;
// extern __shared__ float4 shPosition[];
// for (i = 0; i < blockDim.x; i++) {
// accel = bodyBodyInteraction(myPosition, shPosition[i], accel);
// }
// return accel;
// }
__global__ void
calculate_forces(void *devX, void *devA, int N)
{
extern __shared__ float4 shPosition[];
float4 *globalX = (float4 *)devX;
float4 *globalA = (float4 *)devA;
float4 myPosition;
int i, tile;
float3 acc = {0.0f, 0.0f, 0.0f};
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int p = blockDim.x;
myPosition = globalX[gtid];
for (i = 0, tile = 0; i < N; i += p, tile++) {
int idx = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = globalX[idx];
__syncthreads();
for (i = 0; i < blockDim.x; i++) {
acc = bodyBodyInteraction(myPosition, shPosition[i], acc);
printf("%f\n", globalX[i]);
}
__syncthreads();
}
// Save the result in global memory for the integration step.
float4 acc4 = {acc.x, acc.y, acc.z, 0.0f};
globalA[gtid] = acc4;
}
|
aa9ab7c58aa57361bcacad0f143c4b05a95dae32.hip
|
// !!! This is a file automatically generated by hipify!!!
//sys
#include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <string.h>
//my
#include "detect.h"
#define NV_CUDA_CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << hipGetErrorString(status) << " in file " << __FILE__ \
<< " at line " << __LINE__ << std::endl; \
abort(); \
} \
}
namespace nvinfer1
{
Detect::Detect(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char*>(data), *a = d;
read(d,_n_anchor);
read(d,_n_classes);
read(d,_n_grid_h);
read(d, _n_grid_w);
read(d, _n_output_size);
//printf("anchor:%d,classes:%d,gh:%d,gw:%d,size:%d\n", _n_anchor, _n_classes, _n_grid_h, _n_grid_w, _n_output_size);
assert(d == a + length);
}
Detect::Detect(const uint32_t n_anchor_, const uint32_t n_classes_,
const uint32_t n_grid_h_, const uint32_t n_grid_w_/*,
const uint32_t &n_stride_h_, const uint32_t &n_stride_w_*/):
_n_anchor(n_anchor_),
_n_classes(n_classes_),
_n_grid_h(n_grid_h_),
_n_grid_w(n_grid_w_)
{
_n_output_size = (5 + _n_classes)*_n_anchor*_n_grid_h*_n_grid_w;
}
inline __device__ float sigmoidGPU(const float& x)
{
return 1.0f / (1.0f + __expf(-x));
}
__global__ void gpu_detect_layer(const float *input_,
float* output_,
const uint32_t n_grid_h_,
const uint32_t n_grid_w_,
const uint32_t n_classes_,
const uint32_t n_anchor_)
{
uint32_t x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= n_grid_w_) || (y_id >= n_grid_h_) || (z_id >= n_anchor_))
{
return;
}
// printf("grid_h:%d,grid_w:%d,class:%d,anchor:%d\n", n_grid_h_, n_grid_w_, n_classes_, n_anchor_);
const int numGridCells = n_grid_h_ * n_grid_w_;
const int bbindex = y_id * n_grid_w_ + x_id;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 0)]
= 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 0)])-0.5f;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 1)]
= 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 1)])-0.5f;
float w = 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 2)]);
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 2)]
= w*w;
float h = 2.f* sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 3)]);
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 3)]
= h*h;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 4)]
= sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 4)]);
for (uint32_t i = 0; i < n_classes_; ++i)
{
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + (5 + i))]
= sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + (5 + i))]);
}
}
hipError_t cuda_detect_layer(const void* input_,
void* output_,
const uint32_t& batch_size_,
const uint32_t& grid_h_,
const uint32_t& grid_w_,
const uint32_t& n_classes_,
const uint32_t& n_anchor_,
uint64_t n_output_size_,
hipStream_t stream_)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((grid_w_ / threads_per_block.x) + 1,
(grid_h_ / threads_per_block.y) + 1,
(n_anchor_ / threads_per_block.z) + 1);
for (int batch = 0; batch < batch_size_; ++batch)
{
gpu_detect_layer << <number_of_blocks, threads_per_block, 0, stream_ >> >(
reinterpret_cast<const float*>(input_) + (batch * n_output_size_),
reinterpret_cast<float*>(output_) + (batch * n_output_size_),
grid_h_,
grid_w_,
n_classes_,
n_anchor_);
}
return hipGetLastError();
}
int Detect::enqueue(int batchSize,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) noexcept
{
NV_CUDA_CHECK(cuda_detect_layer(inputs[0], outputs[0], batchSize, _n_grid_h, _n_grid_w, _n_classes, _n_anchor, _n_output_size, stream));
return 0;
}
int Detect::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
hipStream_t stream) noexcept
{
return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream);
}
bool Detect::supportsFormat(DataType type, PluginFormat format) const noexcept
{
return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR);
}
void Detect::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept
{
}
size_t Detect::getSerializationSize() const noexcept
{
return sizeof(_n_anchor) + sizeof(_n_classes) + sizeof(_n_grid_h) + sizeof(_n_grid_w)
+ sizeof(_n_output_size);
}
void Detect::serialize(void *buffer) const noexcept
{
char *d = static_cast<char*>(buffer), *a = d;
write(d,_n_anchor);
write(d, _n_classes);
write(d, _n_grid_h);
write(d, _n_grid_w);
write(d, _n_output_size);
assert(d == a + getSerializationSize());
}
void Detect::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
IPluginV2* Detect::clone() const noexcept
{
Detect *p = new Detect(_n_anchor,_n_classes,_n_grid_h,_n_grid_w);
p->setPluginNamespace(_s_plugin_namespace.c_str());
return p;
}
//
PluginFieldCollection DetectPluginCreator::_fc{};
std::vector<PluginField> DetectPluginCreator::_vec_plugin_attributes;
DetectPluginCreator::DetectPluginCreator()
{
_vec_plugin_attributes.clear();
_fc.nbFields = _vec_plugin_attributes.size();
_fc.fields = _vec_plugin_attributes.data();
}
const char* DetectPluginCreator::getPluginName() const noexcept
{
return "DETECT_TRT";
}
const char* DetectPluginCreator::getPluginVersion() const noexcept
{
return "1.0";
}
const PluginFieldCollection* DetectPluginCreator::getFieldNames() noexcept
{
return &_fc;
}
IPluginV2* DetectPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
Detect* obj = new Detect();
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
IPluginV2* DetectPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept
{
Detect* obj = new Detect(serialData, serialLength);
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
void DetectPluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
_s_name_space = libNamespace;
}
const char* DetectPluginCreator::getPluginNamespace() const noexcept
{
return _s_name_space.c_str();
}
}//end namespace nvinfer1
|
aa9ab7c58aa57361bcacad0f143c4b05a95dae32.cu
|
//sys
#include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdint.h>
#include <string.h>
//my
#include "detect.h"
#define NV_CUDA_CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \
<< " at line " << __LINE__ << std::endl; \
abort(); \
} \
}
namespace nvinfer1
{
Detect::Detect(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char*>(data), *a = d;
read(d,_n_anchor);
read(d,_n_classes);
read(d,_n_grid_h);
read(d, _n_grid_w);
read(d, _n_output_size);
//printf("anchor:%d,classes:%d,gh:%d,gw:%d,size:%d\n", _n_anchor, _n_classes, _n_grid_h, _n_grid_w, _n_output_size);
assert(d == a + length);
}
Detect::Detect(const uint32_t n_anchor_, const uint32_t n_classes_,
const uint32_t n_grid_h_, const uint32_t n_grid_w_/*,
const uint32_t &n_stride_h_, const uint32_t &n_stride_w_*/):
_n_anchor(n_anchor_),
_n_classes(n_classes_),
_n_grid_h(n_grid_h_),
_n_grid_w(n_grid_w_)
{
_n_output_size = (5 + _n_classes)*_n_anchor*_n_grid_h*_n_grid_w;
}
inline __device__ float sigmoidGPU(const float& x)
{
return 1.0f / (1.0f + __expf(-x));
}
__global__ void gpu_detect_layer(const float *input_,
float* output_,
const uint32_t n_grid_h_,
const uint32_t n_grid_w_,
const uint32_t n_classes_,
const uint32_t n_anchor_)
{
uint32_t x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= n_grid_w_) || (y_id >= n_grid_h_) || (z_id >= n_anchor_))
{
return;
}
// printf("grid_h:%d,grid_w:%d,class:%d,anchor:%d\n", n_grid_h_, n_grid_w_, n_classes_, n_anchor_);
const int numGridCells = n_grid_h_ * n_grid_w_;
const int bbindex = y_id * n_grid_w_ + x_id;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 0)]
= 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 0)])-0.5f;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 1)]
= 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 1)])-0.5f;
float w = 2.f * sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 2)]);
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 2)]
= w*w;
float h = 2.f* sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 3)]);
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 3)]
= h*h;
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 4)]
= sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + 4)]);
for (uint32_t i = 0; i < n_classes_; ++i)
{
output_[bbindex + numGridCells * (z_id * (5 + n_classes_) + (5 + i))]
= sigmoidGPU(input_[bbindex + numGridCells * (z_id * (5 + n_classes_) + (5 + i))]);
}
}
cudaError_t cuda_detect_layer(const void* input_,
void* output_,
const uint32_t& batch_size_,
const uint32_t& grid_h_,
const uint32_t& grid_w_,
const uint32_t& n_classes_,
const uint32_t& n_anchor_,
uint64_t n_output_size_,
cudaStream_t stream_)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((grid_w_ / threads_per_block.x) + 1,
(grid_h_ / threads_per_block.y) + 1,
(n_anchor_ / threads_per_block.z) + 1);
for (int batch = 0; batch < batch_size_; ++batch)
{
gpu_detect_layer << <number_of_blocks, threads_per_block, 0, stream_ >> >(
reinterpret_cast<const float*>(input_) + (batch * n_output_size_),
reinterpret_cast<float*>(output_) + (batch * n_output_size_),
grid_h_,
grid_w_,
n_classes_,
n_anchor_);
}
return cudaGetLastError();
}
int Detect::enqueue(int batchSize,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) noexcept
{
NV_CUDA_CHECK(cuda_detect_layer(inputs[0], outputs[0], batchSize, _n_grid_h, _n_grid_w, _n_classes, _n_anchor, _n_output_size, stream));
return 0;
}
int Detect::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
cudaStream_t stream) noexcept
{
return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream);
}
bool Detect::supportsFormat(DataType type, PluginFormat format) const noexcept
{
return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR);
}
void Detect::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept
{
}
size_t Detect::getSerializationSize() const noexcept
{
return sizeof(_n_anchor) + sizeof(_n_classes) + sizeof(_n_grid_h) + sizeof(_n_grid_w)
+ sizeof(_n_output_size);
}
void Detect::serialize(void *buffer) const noexcept
{
char *d = static_cast<char*>(buffer), *a = d;
write(d,_n_anchor);
write(d, _n_classes);
write(d, _n_grid_h);
write(d, _n_grid_w);
write(d, _n_output_size);
assert(d == a + getSerializationSize());
}
void Detect::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
IPluginV2* Detect::clone() const noexcept
{
Detect *p = new Detect(_n_anchor,_n_classes,_n_grid_h,_n_grid_w);
p->setPluginNamespace(_s_plugin_namespace.c_str());
return p;
}
//
PluginFieldCollection DetectPluginCreator::_fc{};
std::vector<PluginField> DetectPluginCreator::_vec_plugin_attributes;
DetectPluginCreator::DetectPluginCreator()
{
_vec_plugin_attributes.clear();
_fc.nbFields = _vec_plugin_attributes.size();
_fc.fields = _vec_plugin_attributes.data();
}
const char* DetectPluginCreator::getPluginName() const noexcept
{
return "DETECT_TRT";
}
const char* DetectPluginCreator::getPluginVersion() const noexcept
{
return "1.0";
}
const PluginFieldCollection* DetectPluginCreator::getFieldNames() noexcept
{
return &_fc;
}
IPluginV2* DetectPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
Detect* obj = new Detect();
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
IPluginV2* DetectPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept
{
Detect* obj = new Detect(serialData, serialLength);
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
void DetectPluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
_s_name_space = libNamespace;
}
const char* DetectPluginCreator::getPluginNamespace() const noexcept
{
return _s_name_space.c_str();
}
}//end namespace nvinfer1
|
7c667fe9d52109cae2b15ef93e07dbb3e896c684.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=1
#include <hip/hip_runtime.h>
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = 10;
A[idx] = temp;
temp = A[idx + 1];
}
|
7c667fe9d52109cae2b15ef93e07dbb3e896c684.cu
|
//pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = 10;
A[idx] = temp;
temp = A[idx + 1];
}
|
09db71c90529e7be1cf0fdf252853dace74400f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <thrust/device_vector.h>
//64
#define N 64
//32 // Threads per block
#define TPB 32
__device__ float scale(int i, int n)
{
return ((float)i) / (n - 1);
}
__device__ float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
// __global__ void distanceKernel( float *d_out, float ref, int len )
// {
// const int i = blockIdx.x*blockDim.x + threadIdx.x;
// const float x = scale( i, len );
// d_out[i] = distance( x, ref );
// printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] );
// }
__global__ void distanceKernel(float *d_out, float ref, int len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
printf("Hello from block %2d (%2d), thread %2d\n", blockIdx.x, blockDim.x, threadIdx.x);
// printf( "asdf" );
for (int j = 0; j < 1000000; ++j)
{
d_out[i] = distance(x, ref);
// printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] );
}
}
int main()
{
const float ref = 0.5f;
const int repeat = 1;
printf("Hello World\n");
// Pointer for an array of floats (initizlied to zero - null)
float *d_out = 0;
// Allocate device memory to store the output array
hipMalloc(&d_out, N * sizeof(float));
// Launch kernel to compute and store distance values
for (int i = 0; i < repeat; ++i)
{
// distanceKernel<<<N/TPB, TPB>>>( d_out, ref, N );
hipLaunchKernelGGL(( distanceKernel), dim3(2), dim3(32), 0, 0, d_out, ref, N);
}
// Free Memory
hipFree(d_out);
printf("Bye\n");
return 0;
}
|
09db71c90529e7be1cf0fdf252853dace74400f3.cu
|
#include <stdio.h>
#include <thrust/device_vector.h>
//64
#define N 64
//32 // Threads per block
#define TPB 32
__device__ float scale(int i, int n)
{
return ((float)i) / (n - 1);
}
__device__ float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
// __global__ void distanceKernel( float *d_out, float ref, int len )
// {
// const int i = blockIdx.x*blockDim.x + threadIdx.x;
// const float x = scale( i, len );
// d_out[i] = distance( x, ref );
// printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] );
// }
__global__ void distanceKernel(float *d_out, float ref, int len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
printf("Hello from block %2d (%2d), thread %2d\n", blockIdx.x, blockDim.x, threadIdx.x);
// printf( "asdf" );
for (int j = 0; j < 1000000; ++j)
{
d_out[i] = distance(x, ref);
// printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] );
}
}
int main()
{
const float ref = 0.5f;
const int repeat = 1;
printf("Hello World\n");
// Pointer for an array of floats (initizlied to zero - null)
float *d_out = 0;
// Allocate device memory to store the output array
cudaMalloc(&d_out, N * sizeof(float));
// Launch kernel to compute and store distance values
for (int i = 0; i < repeat; ++i)
{
// distanceKernel<<<N/TPB, TPB>>>( d_out, ref, N );
distanceKernel<<<2, 32>>>(d_out, ref, N);
}
// Free Memory
cudaFree(d_out);
printf("Bye\n");
return 0;
}
|
e758bf609331d940a1f2bbc37fbb381be0d12b26.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b_gpu(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_2_b_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel4_plus_2_b");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_b), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 79;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 79;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_2_b_execute;
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel4_plus_2_b");
}
ops_enqueue_kernel(desc);
}
#endif
|
e758bf609331d940a1f2bbc37fbb381be0d12b26.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b_gpu(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_2_b_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel4_plus_2_b");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_2_b<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 79;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 79;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_2_b_execute;
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel4_plus_2_b");
}
ops_enqueue_kernel(desc);
}
#endif
|
c567a6aae7386a71f6a48a1c075db90d6efba249.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
class DefaultAllocator : public GpuMat::Allocator
{
public:
bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize);
void free(GpuMat* mat);
};
bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize)
{
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( hipMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( hipMalloc(&mat->data, elemSize * cols * rows) );
mat->step = elemSize * cols;
}
mat->refcount = (int*) fastMalloc(sizeof(int));
return true;
}
void DefaultAllocator::free(GpuMat* mat)
{
hipFree(mat->datastart);
fastFree(mat->refcount);
}
DefaultAllocator cudaDefaultAllocator;
GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator;
}
GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator()
{
return g_defaultAllocator;
}
void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator)
{
CV_Assert( allocator != 0 );
g_defaultAllocator = allocator;
}
/////////////////////////////////////////////////////
/// create
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
const size_t esz = elemSize();
bool allocSuccess = allocator->allocate(this, rows, cols, esz);
if (!allocSuccess)
{
// custom allocator fails, try default allocator
allocator = defaultAllocator();
allocSuccess = allocator->allocate(this, rows, cols, esz);
CV_Assert( allocSuccess );
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data;
dataend = data + nettosize;
if (refcount)
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::cuda::GpuMat::release()
{
CV_DbgAssert( allocator != 0 );
if (refcount && CV_XADD(refcount, -1) == 1)
allocator->free(this);
dataend = data = datastart = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::cuda::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( hipMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice) );
}
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost) );
}
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice) );
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
uchar* data0 = _dst.getGpuMat().data;
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
// do not leave dst uninitialized
if (dst.data != data0)
dst.setTo(Scalar::all(0), stream);
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
if (mask.empty())
{
return setTo(value, stream);
}
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
|
c567a6aae7386a71f6a48a1c075db90d6efba249.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
class DefaultAllocator : public GpuMat::Allocator
{
public:
bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize);
void free(GpuMat* mat);
};
bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize)
{
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( cudaMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( cudaMalloc(&mat->data, elemSize * cols * rows) );
mat->step = elemSize * cols;
}
mat->refcount = (int*) fastMalloc(sizeof(int));
return true;
}
void DefaultAllocator::free(GpuMat* mat)
{
cudaFree(mat->datastart);
fastFree(mat->refcount);
}
DefaultAllocator cudaDefaultAllocator;
GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator;
}
GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator()
{
return g_defaultAllocator;
}
void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator)
{
CV_Assert( allocator != 0 );
g_defaultAllocator = allocator;
}
/////////////////////////////////////////////////////
/// create
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
const size_t esz = elemSize();
bool allocSuccess = allocator->allocate(this, rows, cols, esz);
if (!allocSuccess)
{
// custom allocator fails, try default allocator
allocator = defaultAllocator();
allocSuccess = allocator->allocate(this, rows, cols, esz);
CV_Assert( allocSuccess );
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data;
dataend = data + nettosize;
if (refcount)
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::cuda::GpuMat::release()
{
CV_DbgAssert( allocator != 0 );
if (refcount && CV_XADD(refcount, -1) == 1)
allocator->free(this);
dataend = data = datastart = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::cuda::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
}
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
}
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
uchar* data0 = _dst.getGpuMat().data;
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
// do not leave dst uninitialized
if (dst.data != data0)
dst.setTo(Scalar::all(0), stream);
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
if (mask.empty())
{
return setTo(value, stream);
}
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
|
80a51d331ac9950e472c484af55ad8580c8fcd87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_maxpy.cu normal z -> d, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_dlobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
double *X,
double *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_daxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X double*
input vector X
@param
Y double*
input/output vector Y
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dlobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
double *X,
double *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
hipLaunchKernelGGL(( magma_dlobpcg_maxpy_kernel), dim3(grid), dim3(block), 0, magma_stream ,
num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
80a51d331ac9950e472c484af55ad8580c8fcd87.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_maxpy.cu normal z -> d, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_dlobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
double *X,
double *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_daxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X double*
input vector X
@param
Y double*
input/output vector Y
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dlobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
double *X,
double *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
magma_dlobpcg_maxpy_kernel<<< grid, block, 0, magma_stream >>>
( num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
39b5021f8ed1be26940d75eab79a3f9ef4121af0.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=64 --equality-abstraction --no-inline
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#define N 2
__global__ void foo(int* p) {
__shared__ int A[10];
int* x;
x = p;
assert(*p <2);
x[0] = 0;
x = A;
x[0] = 0;
}
int main(){
int *b;
int *dev_b;
b = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i){
b[i] = i+1;
printf("%d; ", b[i]);
}
printf("\n");
hipMalloc((void**)&dev_b, N*sizeof(int));
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
//foo<<<1,N>>>(dev_b);
ESBMC_verify_kernel(foo,1,N,dev_b);
hipMemcpy(b, dev_b, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
printf("%d; ", b[i]);
}
assert(b[0]==0);
free(b);
hipFree(dev_b);
}
|
39b5021f8ed1be26940d75eab79a3f9ef4121af0.cu
|
//pass
//--blockDim=64 --gridDim=64 --equality-abstraction --no-inline
#include "cuda.h"
#include <stdio.h>
#include <assert.h>
#define N 2
__global__ void foo(int* p) {
__shared__ int A[10];
int* x;
x = p;
assert(*p <2);
x[0] = 0;
x = A;
x[0] = 0;
}
int main(){
int *b;
int *dev_b;
b = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i){
b[i] = i+1;
printf("%d; ", b[i]);
}
printf("\n");
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
//foo<<<1,N>>>(dev_b);
ESBMC_verify_kernel(foo,1,N,dev_b);
cudaMemcpy(b, dev_b, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
printf("%d; ", b[i]);
}
assert(b[0]==0);
free(b);
cudaFree(dev_b);
}
|
5c1689e60952e4fde1f769e76c0295d4e59c277f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// buildTree.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* The partitioning algorithm uses an approach based on the following:
* "Efficient Stream Compaction on Wide SIMD Many-Core Architectures"
* by Markus Billeter, Ola Olsson, Ulf Assarsson
* http://www.cse.chalmers.se/~uffe/streamcompaction.pdf
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include <assert.h>
#include <helper_cuda.h>
#include <sm_30_intrinsics.h>
#include "buildKdTree_common.h"
#include "Gpu.h"
__device__ KdCoord superKeyCompareB(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = a[p] - b[p];
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
__device__ KdCoord superKeyComparePD(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* Check the validity of the merge sort and remove duplicates from a reference array.
*
* calling parameters:
*
* reference - a vector<int*> that represents one of the reference arrays
* i - the leading dimension for the super key
* dim - the number of dimensions
*
* returns: the end index of the reference array following removal of duplicate elements
*/
__device__ void cuWarpCopyRef(refIdx_t refout[], refIdx_t refin[], sint segSize, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
refIdx_t ref;
if (segSize < warpSize*200) { //The copy is small, so do a simple unaligned copy and return
for (sint j = 0; j+thrdIdx < segSize; j += warpSize){
refout[j+thrdIdx] = refin[j+thrdIdx];
}
return;
}
// allocate the shared memory that will be used for coalescing of writes.
__shared__ refIdx_t s_ref[SHARED_SIZE_LIMIT];
__shared__ uint s_tag[SHARED_SIZE_LIMIT];
// allocate the input and output counter
uint outCnt, oldOutCnt;
uint inCnt;
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// so the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (warpIndex % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
// clear the dirty tags
s_tag[sharedBase + thrdIdx] = 0;
s_tag[sharedBase + warpSize + thrdIdx] = 0;
// come up with warpSize word aligned base write address
// first calculate the warp aligned read address below the starting address
refIdx_t* refptr = (refIdx_t*)((ulong)refout & ~((warpSize*sizeof(refIdx_t)) -1));
// initialize the output counter to be relative to the warpSize aligned write buffers
outCnt = int(refout - refptr);
refout = refptr;
// Do the first reads to align the input pointers to warpSize word boundary
// First calculate the warp aligned read address below the starting address
refptr = (refIdx_t*) ((ulong)refin & ~((warpSize*sizeof(refIdx_t)) -1));
// Calculate the input counter
inCnt = warpSize + refptr - refin;
// then read the words from the input only up to the next warpSize Boundary
// and write to shared memory as indexed by the output counter
if (thrdIdx < inCnt) {
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
}
// Increment the aligned input pointer
refin = refptr + warpSize;
// Update the output counters
oldOutCnt = outCnt;
outCnt += inCnt;
// If the last read crossed the boundary of the coalescing buffers, write out the valid words in the old buffer
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
} else { // Else read another warp's worth to prime the buffer
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
oldOutCnt = outCnt;
outCnt += warpSize;
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
}
// Increment the input counter
inCnt += warpSize;
// Increment the aligned input pointer
refin += warpSize;
}
// OK, input pointer is now at a warSize addr boundary and the coalesce buffer has been primed.
// Time to go into the main loop The loop will count through the remaining inputs
while (inCnt < segSize) {
if (inCnt+thrdIdx < segSize) {
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
}
oldOutCnt = outCnt;
outCnt += inCnt+warpSize <= segSize ? warpSize : segSize - inCnt;
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~((warpSize)-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
}
// Increment the input counter
inCnt += warpSize;
// Increment the aligned input pointer
refin += warpSize;
}
// Write out the final buffer
if (s_tag[sharedBase + (outCnt & warpSize) + thrdIdx] == 1) {
refout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (outCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (outCnt & warpSize) + thrdIdx] = 0;
}
}
__global__ void cuPartitionRemoveGaps(refIdx_t refoutx[], refIdx_t refinxLT[], refIdx_t refinxGT[], uint segLengthsLT[],
uint segLengthsGT[], const sint startIn, const sint endIn, sint level) {
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x / warpSize;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint start = startIn;
uint end = endIn;
uint mid;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
sint partSize = end-start+1;
uint segSize = (partSize + (allWarps>>level) - 1) / (allWarps>>level);
uint segStart = start + segSize * (warpIndex - (warpIndex & ~((allWarps >> level) - 1)));
// do the simple slow implementation first
// get the seg start and seg size from the segLentghs array written by the partition functions
// sum up the lengths of all of the lengths of the segments at a lower index than this segment
// start at the base of the warp group. Do the LT data copy first
uint segStartOut = start;
if (thrdIdx == 0) {
for (uint i = (warpIndex & ~((allWarps >> level) - 1)); i < warpIndex; i++)
segStartOut += segLengthsLT[i];
segSize = segLengthsLT[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.
cuWarpCopyRef(refoutx+segStartOut, refinxLT+segStart, segSize, partSize);
// Check to see that the partitioned data did not exceed it's half of the output array.
sint partitionCount = segStartOut + segLengthsLT[warpIndex];
if (partitionCount > (mid)) {
return; //TODO should add an assert here;
}
// do the copy again for the gt data
segStartOut = mid+1;
if (thrdIdx == 0) {
for (uint i = (warpIndex & ~((allWarps >> level) - 1)); i < warpIndex; i++)
segStartOut += segLengthsGT[i];
segSize = segLengthsGT[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.+
cuWarpCopyRef(refoutx+segStartOut, refinxGT+segStart, segSize, partSize);
// Check to see that the partitioned data did not exceed it's half of the output array.
partitionCount = segStartOut + segLengthsGT[warpIndex];
if (partitionCount > (end+1)) {
return; //TODO should add an assert here;
}
}
#define SIMPLE_COPY
#ifdef SIMPLE_COPY
__global__ void cuCopyRef(refIdx_t refout[], refIdx_t refin[], const sint numRefs) {
uint allThreads = gridDim.x * blockDim.x; // Total number of warps started
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
for (sint i = pos; i<numRefs; i += allThreads)
refout[i] = refin[i];
}
#else
__global__ void cuCopyRef(refIdx_t refoutx[], refIdx_t refinx[], const sint numTuples) {
uint allWarps = gridDim.x * blockDim.x / warpSize; // Total number of warps started
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
// uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segSize = (numTuples + allWarps - 1) / (allWarps);
// calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSize;
if (segStart + segSize > numTuples) {
segSize = numTuples - segStart;
}
cuWarpCopyRef(refoutx + segStart, refinx + segStart, segSize, numTuples);
}
#endif
#define PART_SIZE_GT_SUB_PART_SIZE -1
#define PART_FINISH_DELTA_TOO_LARGE -2
__device__ void cuSmallPartition( const __restrict__ KdCoord coords[],
refIdx_t refoutxLT[], refIdx_t refoutxGT[], refIdx_t refinx[],
const refIdx_t divRef, const sint p, const sint dim, uint segSizex,
const uint subWarpSize)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // thread ID
uint thrdIdx = (pos & (warpSize-1)); // Index within the warp
uint subWarpIdx = thrdIdx / subWarpSize; // subWarp index within the warp
uint subThrdIdx = thrdIdx - subWarpIdx * subWarpSize; // Thread index within the subWarp
uint subWarpMask = ((1<<subWarpSize)-1) << subWarpIdx * subWarpSize; // subWarp Mask
uint segSize;
uint outCntLT = 0;
uint outCntGT = 0;
segSize = segSizex;
refIdx_t* refin = refinx;
refIdx_t* refoutLT = refoutxLT;
refIdx_t* refoutGT = refoutxGT;
KdCoord divVal = coords[divRef*dim+p];
KdCoord cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
uint ref;
if (subThrdIdx < segSize) {// inside the segment?
ref = refin[subThrdIdx];
// do the compare
KdCoord val = coords[ref*dim+p];
cmp = superKeyComparePD(val, divVal, coords+ref*dim, coords+divRef*dim, p, dim);
} else {
cmp = 0; // Use cmp == 0 to exclude data outside the segment
}
refin += warpSize;
// Write out the less than indices
uint shflMask = __ballot(cmp<0) & subWarpMask;
if (cmp < 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
refoutLT[(outCntLT + wrtIdx)] = ref;
}
// Update the output counter
outCntLT += __popc(shflMask);
// Write out the greater than values
shflMask = __ballot(cmp>0) & subWarpMask;
if (cmp > 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
refoutGT[(outCntGT + wrtIdx)] = ref;
}
// Update the output counter
outCntGT += __popc(shflMask);
}
__global__ void cuPartitionShort( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutx[], refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
refIdx_t midRefs[], refIdx_t lastMidRefs[],
sint startIn, sint endIn,
const sint level, const sint logNumSubWarps, const sint logSubWarpSize,
sint* d_partitionError)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // This thread's position in all threads
uint subWarpSize = 1<<logSubWarpSize;
uint allSubWarps = gridDim.x * blockDim.x / subWarpSize; // Total number of subWarps started
uint subThrdIdx = (pos & (subWarpSize-1)); // this threads position in the subWarp.
uint subWarpIndex = (pos - subThrdIdx)/(subWarpSize); // this subWarps position in all threads
uint loopLevels = level-logNumSubWarps; // log of the nuber of iterations to be done
// This first loop iterates over the partition regions when there are more portion regions then thread.
// Note that if the there are more warps than partition regions (level < logNumWarps) the iteration count will be 0
// and start and end will be untouched.
for (uint loop = 0; loop < (1<<loopLevels); loop++) {
uint start = startIn; // local copy of start
uint end = endIn; // local copy of end
uint mid; // mid between start and end
// This loop determines the start and end of the current iteration over the partitions.
for (uint k = 1; k <= loopLevels; k++) {
mid = start + ((end - start)>>1);
if (loop & (1 << (loopLevels - k )))
{ start = mid + 1; } else { end = mid -1; }
}
// Now calculate the start and end end and mid using the iterative methode for this warps partition segment.
for (uint i = 0; i < (logNumSubWarps); i++) {
mid = start + ((end - start)>>1);
if (subWarpIndex & (allSubWarps >> (i+1)))
{ start = mid + 1; } else { end = mid -1; }
}
if((end - start + 1) > subWarpSize) {
*d_partitionError = PART_SIZE_GT_SUB_PART_SIZE;
}
mid = start + ((end - start)>>1);
// Calculate the size of the partition segment that this warp will partition.
sint partSize = end - start + 1; // number of reference to partition
// get the reference to the coordinate that will be partitioned against.
refIdx_t midRef = refp[mid];
cuSmallPartition( coords, // pointer to coordinate array
refoutx+start, // pointer to the beginning of the output ref array for this subdivision
refoutx+mid+1, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
partSize, // The size of the segment that each warp will partition
subWarpSize // size of the subwarp
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
if (subThrdIdx == 0 ) {
uint mra = subWarpIndex + loop * allSubWarps;
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
}
__device__ void cuSinglePartition( const __restrict__ KdCoord coords[], refIdx_t refoutxLT[], refIdx_t refoutxGT[], refIdx_t refinx[],
const refIdx_t divRef, const sint p, const sint dim, uint segSizex, uint segLengthsLT[], uint segLengthsGT[],
const sint numTuples, uint warpGroupSize)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize) % warpGroupSize;
uint segSize;
uint outCntLT = 0;
uint outCntGT = 0;
uint oldOutCntLT;
uint oldOutCntGT;
refIdx_t ref;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
refIdx_t* refin = refinx + segStart;
refIdx_t* refoutLT = refoutxLT + segStart;
refIdx_t* refoutGT = refoutxGT + segStart;
// Allocate the shared memory that will be used for coalescing of writes.
__shared__ refIdx_t s_refLT[SHARED_SIZE_LIMIT];
__shared__ refIdx_t s_refGT[SHARED_SIZE_LIMIT];
KdCoord divVal = coords[divRef*dim+p];
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// so the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (((pos - thrdIdx)/warpSize) % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
KdCoord cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
// Now start looping
for (sint j = 0; j < segSize; j += warpSize){
if (j+thrdIdx < segSize) {
// s_ref[sharedBase + ((outCntLT + thrdIdx) & sharedAddrMask)] = ref = refin[thrdIdx];
ref = refin[thrdIdx];
// Do the compare
KdCoord val = coords[ref*dim+p];
cmp = superKeyComparePD(val, divVal, coords+ref*dim, coords+divRef*dim, p, dim);
// First check for compare failure
} else {
cmp = 0; // Use cmp == 0 to exclude data outside the segment
}
refin += warpSize;
// Write out the less than indices
uint shflMask = __ballot(cmp<0);
if (cmp < 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
s_refLT[sharedBase + ((outCntLT + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCntLT = outCntLT;
outCntLT += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCntLT ^ outCntLT) & warpSize) != 0) {
refoutLT[(oldOutCntLT & ~(warpSize-1)) + thrdIdx] = s_refLT[sharedBase + (oldOutCntLT & warpSize) + thrdIdx];
}
// Write out the greater than values
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
s_refGT[sharedBase + ((outCntGT + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCntGT = outCntGT;
outCntGT += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCntGT ^ outCntGT) & warpSize) != 0) {
refoutGT[(oldOutCntGT & ~(warpSize-1)) + thrdIdx] = s_refGT[sharedBase + (oldOutCntGT & warpSize) + thrdIdx];
}
}
// Write out the final LT buffer
if ((outCntLT & (warpSize-1)) > thrdIdx) {
refoutLT[(outCntLT & ~(warpSize-1)) + thrdIdx] = s_refLT[sharedBase + (outCntLT & warpSize) + thrdIdx];
}
// write out the final GT buffer
if ((outCntGT & (warpSize-1)) > thrdIdx) {
refoutGT[(outCntGT & ~(warpSize-1)) + thrdIdx] = s_refGT[sharedBase + (outCntGT & warpSize) + thrdIdx];
}
// And finally store the number of LT writes that were done by this warp
if (thrdIdx == 0 && segLengthsLT != NULL) segLengthsLT[warpIndex] = outCntLT;
// And finally store the number of GT writes that were done by this warp
if (thrdIdx == 0 && segLengthsGT != NULL) segLengthsGT[warpIndex] = outCntGT;
}
__global__ void cuPartitionLWTP( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutx[], refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
refIdx_t midRefs[], refIdx_t lastMidRefs[],
sint startIn, sint endIn, const sint level, const sint logNumWarps)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // This thread's position in all threads
uint allWarps = gridDim.x * blockDim.x / warpSize; // Total number of warps started
uint thrdIdx = (pos & (warpSize-1)); // this threads position in the warp.
uint warpIndex = ((pos - thrdIdx)/warpSize); // this warps position in all threads
uint loopLevels = level-logNumWarps; // log of the nuber of iterations to be done
// This first loop iterates over the partition regions when there are more partion regiions then thread.
// Note that if the there are more warps than partition regions (level < logNumWarps) the iteration count will be 0
// and start and end will be untouched.
for (uint loop = 0; loop < (1<<loopLevels); loop++) {
uint start = startIn; // local copy of start
uint end = endIn; // local copy of end
uint mid; // mid between start and end
// This loop determines the start and end of the current iteration over the partitions.
for (uint k = 1; k <= loopLevels; k++) {
mid = start + ((end - start)>>1);
if (loop & (1 << (loopLevels - k ))) {
start = mid + 1;
} else {
end = mid -1;
}
}
// Now calculate the start and end end and mid using the iterative method for this warps partition segment.
for (uint i = 0; i < logNumWarps; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
// Calculate the size of the partition segment that this warp will partition.
sint partSize = end - start + 1; // number of reference to partition
// get the reference to the coordinate that will be partitioned against.
refIdx_t midRef = refp[mid];
cuSinglePartition( coords, // pointer to coordinate array
refoutx+start, // pointer to the beginning of the output ref array for this subdivision
refoutx+mid+1, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
partSize,// The size of the segment that each warp will partition
NULL, // pointer to where the resulting segment lengths for this subdivision will be put
NULL, // pointer to where the resulting segment lengths for this subdivision will be put
partSize, // total length for all partitions. This bounds the partition so no overflow.
1 // number of warps being applied to partitioning this subdivision
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
if (thrdIdx == 0 ) {
uint mra = warpIndex+loop*allWarps;
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
}
__global__ void cuPartition( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutxLT[], refIdx_t refoutxGT[],
refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
uint segLengthsLT[], uint segLengthsGT[],
refIdx_t midRefs[], refIdx_t lastMidRefs[],
const sint startIn, const sint endIn, const sint level)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x / warpSize;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint start = startIn;
uint end = endIn;
uint mid;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
sint partSize = end-start+1;
uint segSize = (partSize + (allWarps>>level) - 1) / (allWarps>>level);
refIdx_t midRef = refp[mid];
cuSinglePartition( coords, // pointer to coordinate array
refoutxLT+start, // pointer to the beginning of the output ref array for this subdivision
refoutxGT+start, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
segSize, // The size of the segment that each warp will partition
segLengthsLT+(warpIndex & ~((allWarps >> level) - 1)), // pointer to where the resulting segment lengths for this subdivision will be put
segLengthsGT+(warpIndex & ~((allWarps >> level) - 1)), // pointer to where the resulting segment lengths for this subdivision will be put
partSize, // total length of the partition for all warps.
(allWarps>>level) // number of warps being applied to partitioning this subdivision
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
// if (thrdIdx == 0 && (warpIndex & ((allWarps >> (level+1)) - 1)) == 0) midRefs[warpIndex >> (level-1)] = midRef;
if (thrdIdx == 0 ){
uint mra = warpIndex/(allWarps>>level);
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
__global__ void cuPartitionLast(KdNode kdNodes[], refIdx_t refp[], refIdx_t midRefs[], refIdx_t lastMidRefs[],
const sint startIn, const sint endIn, const sint level,
sint* d_partitionError)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x;
uint start = startIn;
uint end = endIn;
uint mid;
refIdx_t midRef = -1;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (pos & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
if (end - start > 2){
// set an error condition. Indicates that not enough partition loops were done.
*d_partitionError = PART_FINISH_DELTA_TOO_LARGE;
} else if (end - start == 2) {
mid = start + ((end - start)>>1);
midRef = refp[mid];
kdNodes[midRef].gtChild = refp[end];
kdNodes[midRef].ltChild = refp[start];
} else if (end - start == 1) {
midRef = refp[start];
kdNodes[midRef].gtChild = refp[end];
} else if (end - start == 0) {
midRef = refp[start];
}
if (midRef != -1){
midRefs[pos] = midRef;
if (pos & 1) { // odd or even?
kdNodes[lastMidRefs[pos>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[pos>>1]].ltChild = midRef;
}
}
}
void Gpu::initBuildKdTree() {
uint numWarps = numBlocks*numThreads/32;
#pragma omp critical (launchLock)
{
setDevice();
// Create the array that stores the length of each treads segment length
checkCudaErrors(hipMalloc((void **)&d_segLengthsLT, numWarps * sizeof(uint)));
checkCudaErrors(hipMalloc((void **)&d_segLengthsGT, numWarps * sizeof(uint)));
// Allocate the arrays to store the midpoint references for this level
checkCudaErrors(hipMalloc((void **)&d_midRefs[0], num * sizeof(refIdx_t)));
checkCudaErrors(hipMalloc((void **)&d_midRefs[1], num * sizeof(refIdx_t)));
}
}
void Gpu::closeBuildKdTree() {
syncGPU();
#pragma omp critical (launchLock)
{
setDevice();
// Free the array that stores the length of each treads segment length
checkCudaErrors(hipFree(d_segLengthsLT));
checkCudaErrors(hipFree(d_segLengthsGT));
// Free the arrays to store the midpoint references for this level
checkCudaErrors(hipFree(d_midRefs[0]));
checkCudaErrors(hipFree(d_midRefs[1]));
}
}
void Gpu::partitionDim(KdNode d_kdNodes[], const KdCoord d_coords[], refIdx_t* l_references[],
const sint p, const sint dim, const sint numTuples, const sint level, const sint numThreads) {
uint numWarps = numThreads/32;
uint logNumWarps = (uint)std::log2((float)numWarps);
uint logNumTuples = (uint)ceil(std::log2((float)numTuples));
// This portion sets up the tread and block size to work with small numbers of thread
// This is only useful for debug situations.
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
refIdx_t* thisMidRefs = d_midRefs[level % 2]; // Find out if this is an odd or even level
refIdx_t* lastMidRefs = d_midRefs[(level-1) % 2]; // Find out if this is an odd or even level
if (level == 0) {
lastMidRefs = NULL; // On the first pass null out the pointer to the last level because there isn't one.
}
//#define PRINT_TIME
#ifdef PRINT_TIME
float time;
hipEvent_t t_start, t_stop;
checkCudaErrors(hipEventCreate(&t_start));
checkCudaErrors(hipEventCreate(&t_stop));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_start));
#endif
// Create pointers to the array that stores the length of each treads segment length used in
// the compaction part of the partitioning functions. Only needed when there are there are more
// warps than there are partition segments. Need 1 array for GT side, 1 array for LT side.
// When the number of partitions to be performed is less that the number of
// warps, the partition kernel will apply multiple warps to each partition
// This is the case where the segLength arrays are needed
sint loopLevels;
sint remLevels;
if (level < logNumWarps){
loopLevels = 0;
remLevels = level;
sint start = 0;
sint end = numTuples-1;
for (sint thisDim = 1; thisDim < dim; thisDim++) { // Partition every dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuPartition), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], l_references[dim+1], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimentions
d_segLengthsLT, d_segLengthsGT, // used byt the partition kernal to store segment sizes
thisMidRefs, // array of this midpoint refrences
lastMidRefs, // array of last midpoint refrences
start, end, remLevels); // start and end of the data and sub level.
checkCudaErrors(hipGetLastError());
// Do the copy to close up the gaps, lt to the lower half, gt to the upper half
hipLaunchKernelGGL(( cuPartitionRemoveGaps), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, l_references[r], l_references[dim], l_references[dim+1],
d_segLengthsLT, d_segLengthsGT, start, end, remLevels);
}
}
} else {
if ((logNumTuples - level > 5)) {
loopLevels = (level-logNumWarps);
remLevels = logNumWarps;
loopLevels = 0;
remLevels = level;
for (sint loop = 0; loop < (1<<loopLevels); loop++) {
sint start = 0;
sint end = numTuples-1;
uint mid;
for (int k=1; k<=loopLevels; k++) {
mid = start + (end - start)/2;
if (loop & (1 << loopLevels-k))
start = mid + 1;
else
end = mid - 1;
}
for (sint thisDim = 1; thisDim < dim; thisDim++) { // partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuPartitionLWTP), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimensions
thisMidRefs+loop*numWarps, // array of this midpoint references
lastMidRefs+loop*numWarps/2, // array of last midpoint references
start, end, remLevels, logNumWarps); // start and end of the data and sub level.
checkCudaErrors(hipGetLastError());
// do the copy to close up the gaps, lt to the lower half, gt to the upper half
hipLaunchKernelGGL(( cuCopyRef), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, l_references[r]+start, l_references[dim]+start, end - start + 1);
}
}
}
} else {
#define CHECK_FOR_ERRORS
#ifdef CHECK_FOR_ERRORS
sint partitionError = 0;
// hipMemcpyToSymbol(*d_partitionError,
// &partitionError,
// sizeof(partitionError),
// 0,hipMemcpyHostToDevice);
checkCudaErrors(hipMemcpy(d_partitionError, &partitionError, sizeof(sint), hipMemcpyHostToDevice));
#endif
sint logSubWarpSize = logNumTuples - level; // Should never be bigger than 32
sint logNumSubWarps = logNumWarps + 5 - logSubWarpSize;
sint start = 0;
sint end = numTuples-1;
for (sint thisDim = 1; thisDim < dim; thisDim++) { // Partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuPartitionShort), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimentions
thisMidRefs, // array of this midpoint refrences
lastMidRefs, // array of last midpoint refrences
start, end, // start and end of the data
level, logNumSubWarps, logSubWarpSize, d_partitionError); // sub level.
checkCudaErrors(hipGetLastError());
// Do the copy to close up the gaps, lt to the lower half, gt to the upper half
hipLaunchKernelGGL(( cuCopyRef), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, l_references[r]+start, l_references[dim]+start, end - start + 1);
}
checkCudaErrors(hipGetLastError());
#ifdef CHECK_FOR_ERRORS
// hipMemcpyFromSymbolAsync(&partitionError,
// *d_partitionError,
// sizeof(partitionError),
// 0,hipMemcpyDeviceToHost, stream);
checkCudaErrors(hipMemcpyAsync(&partitionError, d_partitionError, sizeof(sint), hipMemcpyDeviceToHost, stream));
if (partitionError == PART_SIZE_GT_SUB_PART_SIZE ) {
cout << "Error in partition size vs sub warp size on level " << level << endl;
exit(1);
}
#endif
}
}
}
#ifdef PRINT_TIME
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_stop));
checkCudaErrors(hipEventSynchronize(t_stop));
checkCudaErrors(hipEventElapsedTime(&time, t_start, t_stop));
printf ("Partition took %f seconds\n",time/1000.0);
checkCudaErrors(hipEventDestroy(t_start));
checkCudaErrors(hipEventDestroy(t_stop));
#endif
// Get the mid values back but only on the first dimension processed of level 0. This will be the root node
if (level == 0) {
checkCudaErrors(hipMemcpyAsync(&rootNode, d_midRefs[0], sizeof(refIdx_t), hipMemcpyDeviceToHost, stream));
}
if (-1 == rootNode) {
cout << " Build Tree Error: Failure in assembly" << endl;
}
return;
}
void Gpu::partitionDimLast(KdNode d_kdNodes[], const KdCoord coord[], refIdx_t* l_references[],
const sint p, const sint dim, const sint numTuples, const sint level, const sint numThreads) {
uint numWarps = numThreads;
uint logNumWarps = (uint)std::log2((float)numWarps);
sint loopLevels;
sint remLevels;
if (logNumWarps < level){
loopLevels = (level-logNumWarps);
remLevels = logNumWarps;
} else {
loopLevels = 0;
remLevels = level;
}
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
refIdx_t* thisMidRefs = d_midRefs[level % 2]; // Find out id this is an odd or even level
refIdx_t* lastMidRefs = d_midRefs[(level-1) % 2]; // Find out id this is an odd or even level
#ifdef PRINT_TIME
float time;
hipEvent_t t_start, t_stop;
checkCudaErrors(hipEventCreate(&t_start));
checkCudaErrors(hipEventCreate(&t_stop));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_start));
#endif
for (sint loop = 0; loop < (1<<loopLevels); loop++) {
sint start = 0;
sint end = numTuples-1;
sint mid;
for (sint k=1; k<=loopLevels; k++) {
mid = start + (end - start)/2;
if (loop & (1 << loopLevels-k))
start = mid + 1;
else
end = mid - 1;
}
for (sint thisDim = 1; thisDim < 2; thisDim++) { // Partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#ifdef CHECK_FOR_ERRORS
sint partitionError = 0;
// hipMemcpyToSymbol(*d_partitionError, &partitionError,
// sizeof(partitionError), 0,hipMemcpyHostToDevice);
checkCudaErrors(hipMemcpy(d_partitionError, &partitionError, sizeof(sint), hipMemcpyHostToDevice));
#endif
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuPartitionLast), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, d_kdNodes, // pointer to kdnode array.
l_references[p], // Reference array for primary
thisMidRefs+loop*numThreads, // mid reference array for current level
lastMidRefs+loop*numThreads/2, // mid reference array for last level
start, end, remLevels, d_partitionError); // Address range and more levels.
checkCudaErrors(hipGetLastError());
}
#ifdef CHECK_FOR_ERRORS
// hipMemcpyFromSymbol(&partitionError, *d_partitionError,
// sizeof(partitionError), 0,hipMemcpyDeviceToHost);
checkCudaErrors(hipMemcpyAsync(&partitionError, d_partitionError, sizeof(sint), hipMemcpyDeviceToHost, stream));
if (partitionError == PART_FINISH_DELTA_TOO_LARGE ) {
cout << "Error in last partition pass. Probably due to insufficient number of partiion passes, level = " << level << endl;
exit(1);
}
#endif
}
}
// checkCudaErrors(hipGetLastError());
#ifdef PRINT_TIME
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_stop));
checkCudaErrors(hipEventSynchronize(t_stop));
checkCudaErrors(hipEventElapsedTime(&time, t_start, t_stop));
printf ("Partition took %f seconds\n",time/1000.0);
checkCudaErrors(hipEventDestroy(t_start));
checkCudaErrors(hipEventDestroy(t_stop));
#endif
return;
}
uint Gpu::copyRef(refIdx_t refout[], refIdx_t refin[], uint numTuples, sint numThreads){
// This portion sets up the tread and block size to work with small numbers of thread
// This is only useful for debug situations.
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
hipLaunchKernelGGL(( cuCopyRef), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, refout, refin, numTuples);
return 0;
}
|
5c1689e60952e4fde1f769e76c0295d4e59c277f.cu
|
//
// buildTree.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* The partitioning algorithm uses an approach based on the following:
* "Efficient Stream Compaction on Wide SIMD Many-Core Architectures"
* by Markus Billeter, Ola Olsson, Ulf Assarsson
* http://www.cse.chalmers.se/~uffe/streamcompaction.pdf
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include <assert.h>
#include <helper_cuda.h>
#include <sm_30_intrinsics.h>
#include "buildKdTree_common.h"
#include "Gpu.h"
__device__ KdCoord superKeyCompareB(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = a[p] - b[p];
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
__device__ KdCoord superKeyComparePD(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* Check the validity of the merge sort and remove duplicates from a reference array.
*
* calling parameters:
*
* reference - a vector<int*> that represents one of the reference arrays
* i - the leading dimension for the super key
* dim - the number of dimensions
*
* returns: the end index of the reference array following removal of duplicate elements
*/
__device__ void cuWarpCopyRef(refIdx_t refout[], refIdx_t refin[], sint segSize, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
refIdx_t ref;
if (segSize < warpSize*200) { //The copy is small, so do a simple unaligned copy and return
for (sint j = 0; j+thrdIdx < segSize; j += warpSize){
refout[j+thrdIdx] = refin[j+thrdIdx];
}
return;
}
// allocate the shared memory that will be used for coalescing of writes.
__shared__ refIdx_t s_ref[SHARED_SIZE_LIMIT];
__shared__ uint s_tag[SHARED_SIZE_LIMIT];
// allocate the input and output counter
uint outCnt, oldOutCnt;
uint inCnt;
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// so the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (warpIndex % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
// clear the dirty tags
s_tag[sharedBase + thrdIdx] = 0;
s_tag[sharedBase + warpSize + thrdIdx] = 0;
// come up with warpSize word aligned base write address
// first calculate the warp aligned read address below the starting address
refIdx_t* refptr = (refIdx_t*)((ulong)refout & ~((warpSize*sizeof(refIdx_t)) -1));
// initialize the output counter to be relative to the warpSize aligned write buffers
outCnt = int(refout - refptr);
refout = refptr;
// Do the first reads to align the input pointers to warpSize word boundary
// First calculate the warp aligned read address below the starting address
refptr = (refIdx_t*) ((ulong)refin & ~((warpSize*sizeof(refIdx_t)) -1));
// Calculate the input counter
inCnt = warpSize + refptr - refin;
// then read the words from the input only up to the next warpSize Boundary
// and write to shared memory as indexed by the output counter
if (thrdIdx < inCnt) {
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
}
// Increment the aligned input pointer
refin = refptr + warpSize;
// Update the output counters
oldOutCnt = outCnt;
outCnt += inCnt;
// If the last read crossed the boundary of the coalescing buffers, write out the valid words in the old buffer
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
} else { // Else read another warp's worth to prime the buffer
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
oldOutCnt = outCnt;
outCnt += warpSize;
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
}
// Increment the input counter
inCnt += warpSize;
// Increment the aligned input pointer
refin += warpSize;
}
// OK, input pointer is now at a warSize addr boundary and the coalesce buffer has been primed.
// Time to go into the main loop The loop will count through the remaining inputs
while (inCnt < segSize) {
if (inCnt+thrdIdx < segSize) {
ref = refin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref;
s_tag[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = 1;
}
oldOutCnt = outCnt;
outCnt += inCnt+warpSize <= segSize ? warpSize : segSize - inCnt;
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
if (s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] == 1) {
refout[(oldOutCnt & ~((warpSize)-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (oldOutCnt & warpSize) + thrdIdx] = 0;
}
}
// Increment the input counter
inCnt += warpSize;
// Increment the aligned input pointer
refin += warpSize;
}
// Write out the final buffer
if (s_tag[sharedBase + (outCnt & warpSize) + thrdIdx] == 1) {
refout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (outCnt & warpSize) + thrdIdx];
s_tag[sharedBase + (outCnt & warpSize) + thrdIdx] = 0;
}
}
__global__ void cuPartitionRemoveGaps(refIdx_t refoutx[], refIdx_t refinxLT[], refIdx_t refinxGT[], uint segLengthsLT[],
uint segLengthsGT[], const sint startIn, const sint endIn, sint level) {
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x / warpSize;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint start = startIn;
uint end = endIn;
uint mid;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
sint partSize = end-start+1;
uint segSize = (partSize + (allWarps>>level) - 1) / (allWarps>>level);
uint segStart = start + segSize * (warpIndex - (warpIndex & ~((allWarps >> level) - 1)));
// do the simple slow implementation first
// get the seg start and seg size from the segLentghs array written by the partition functions
// sum up the lengths of all of the lengths of the segments at a lower index than this segment
// start at the base of the warp group. Do the LT data copy first
uint segStartOut = start;
if (thrdIdx == 0) {
for (uint i = (warpIndex & ~((allWarps >> level) - 1)); i < warpIndex; i++)
segStartOut += segLengthsLT[i];
segSize = segLengthsLT[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.
cuWarpCopyRef(refoutx+segStartOut, refinxLT+segStart, segSize, partSize);
// Check to see that the partitioned data did not exceed it's half of the output array.
sint partitionCount = segStartOut + segLengthsLT[warpIndex];
if (partitionCount > (mid)) {
return; //TODO should add an assert here;
}
// do the copy again for the gt data
segStartOut = mid+1;
if (thrdIdx == 0) {
for (uint i = (warpIndex & ~((allWarps >> level) - 1)); i < warpIndex; i++)
segStartOut += segLengthsGT[i];
segSize = segLengthsGT[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.+
cuWarpCopyRef(refoutx+segStartOut, refinxGT+segStart, segSize, partSize);
// Check to see that the partitioned data did not exceed it's half of the output array.
partitionCount = segStartOut + segLengthsGT[warpIndex];
if (partitionCount > (end+1)) {
return; //TODO should add an assert here;
}
}
#define SIMPLE_COPY
#ifdef SIMPLE_COPY
__global__ void cuCopyRef(refIdx_t refout[], refIdx_t refin[], const sint numRefs) {
uint allThreads = gridDim.x * blockDim.x; // Total number of warps started
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
for (sint i = pos; i<numRefs; i += allThreads)
refout[i] = refin[i];
}
#else
__global__ void cuCopyRef(refIdx_t refoutx[], refIdx_t refinx[], const sint numTuples) {
uint allWarps = gridDim.x * blockDim.x / warpSize; // Total number of warps started
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
// uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segSize = (numTuples + allWarps - 1) / (allWarps);
// calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSize;
if (segStart + segSize > numTuples) {
segSize = numTuples - segStart;
}
cuWarpCopyRef(refoutx + segStart, refinx + segStart, segSize, numTuples);
}
#endif
#define PART_SIZE_GT_SUB_PART_SIZE -1
#define PART_FINISH_DELTA_TOO_LARGE -2
__device__ void cuSmallPartition( const __restrict__ KdCoord coords[],
refIdx_t refoutxLT[], refIdx_t refoutxGT[], refIdx_t refinx[],
const refIdx_t divRef, const sint p, const sint dim, uint segSizex,
const uint subWarpSize)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // thread ID
uint thrdIdx = (pos & (warpSize-1)); // Index within the warp
uint subWarpIdx = thrdIdx / subWarpSize; // subWarp index within the warp
uint subThrdIdx = thrdIdx - subWarpIdx * subWarpSize; // Thread index within the subWarp
uint subWarpMask = ((1<<subWarpSize)-1) << subWarpIdx * subWarpSize; // subWarp Mask
uint segSize;
uint outCntLT = 0;
uint outCntGT = 0;
segSize = segSizex;
refIdx_t* refin = refinx;
refIdx_t* refoutLT = refoutxLT;
refIdx_t* refoutGT = refoutxGT;
KdCoord divVal = coords[divRef*dim+p];
KdCoord cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
uint ref;
if (subThrdIdx < segSize) {// inside the segment?
ref = refin[subThrdIdx];
// do the compare
KdCoord val = coords[ref*dim+p];
cmp = superKeyComparePD(val, divVal, coords+ref*dim, coords+divRef*dim, p, dim);
} else {
cmp = 0; // Use cmp == 0 to exclude data outside the segment
}
refin += warpSize;
// Write out the less than indices
uint shflMask = __ballot(cmp<0) & subWarpMask;
if (cmp < 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
refoutLT[(outCntLT + wrtIdx)] = ref;
}
// Update the output counter
outCntLT += __popc(shflMask);
// Write out the greater than values
shflMask = __ballot(cmp>0) & subWarpMask;
if (cmp > 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
refoutGT[(outCntGT + wrtIdx)] = ref;
}
// Update the output counter
outCntGT += __popc(shflMask);
}
__global__ void cuPartitionShort( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutx[], refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
refIdx_t midRefs[], refIdx_t lastMidRefs[],
sint startIn, sint endIn,
const sint level, const sint logNumSubWarps, const sint logSubWarpSize,
sint* d_partitionError)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // This thread's position in all threads
uint subWarpSize = 1<<logSubWarpSize;
uint allSubWarps = gridDim.x * blockDim.x / subWarpSize; // Total number of subWarps started
uint subThrdIdx = (pos & (subWarpSize-1)); // this threads position in the subWarp.
uint subWarpIndex = (pos - subThrdIdx)/(subWarpSize); // this subWarps position in all threads
uint loopLevels = level-logNumSubWarps; // log of the nuber of iterations to be done
// This first loop iterates over the partition regions when there are more portion regions then thread.
// Note that if the there are more warps than partition regions (level < logNumWarps) the iteration count will be 0
// and start and end will be untouched.
for (uint loop = 0; loop < (1<<loopLevels); loop++) {
uint start = startIn; // local copy of start
uint end = endIn; // local copy of end
uint mid; // mid between start and end
// This loop determines the start and end of the current iteration over the partitions.
for (uint k = 1; k <= loopLevels; k++) {
mid = start + ((end - start)>>1);
if (loop & (1 << (loopLevels - k )))
{ start = mid + 1; } else { end = mid -1; }
}
// Now calculate the start and end end and mid using the iterative methode for this warps partition segment.
for (uint i = 0; i < (logNumSubWarps); i++) {
mid = start + ((end - start)>>1);
if (subWarpIndex & (allSubWarps >> (i+1)))
{ start = mid + 1; } else { end = mid -1; }
}
if((end - start + 1) > subWarpSize) {
*d_partitionError = PART_SIZE_GT_SUB_PART_SIZE;
}
mid = start + ((end - start)>>1);
// Calculate the size of the partition segment that this warp will partition.
sint partSize = end - start + 1; // number of reference to partition
// get the reference to the coordinate that will be partitioned against.
refIdx_t midRef = refp[mid];
cuSmallPartition( coords, // pointer to coordinate array
refoutx+start, // pointer to the beginning of the output ref array for this subdivision
refoutx+mid+1, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
partSize, // The size of the segment that each warp will partition
subWarpSize // size of the subwarp
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
if (subThrdIdx == 0 ) {
uint mra = subWarpIndex + loop * allSubWarps;
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
}
__device__ void cuSinglePartition( const __restrict__ KdCoord coords[], refIdx_t refoutxLT[], refIdx_t refoutxGT[], refIdx_t refinx[],
const refIdx_t divRef, const sint p, const sint dim, uint segSizex, uint segLengthsLT[], uint segLengthsGT[],
const sint numTuples, uint warpGroupSize)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize) % warpGroupSize;
uint segSize;
uint outCntLT = 0;
uint outCntGT = 0;
uint oldOutCntLT;
uint oldOutCntGT;
refIdx_t ref;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
refIdx_t* refin = refinx + segStart;
refIdx_t* refoutLT = refoutxLT + segStart;
refIdx_t* refoutGT = refoutxGT + segStart;
// Allocate the shared memory that will be used for coalescing of writes.
__shared__ refIdx_t s_refLT[SHARED_SIZE_LIMIT];
__shared__ refIdx_t s_refGT[SHARED_SIZE_LIMIT];
KdCoord divVal = coords[divRef*dim+p];
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// so the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (((pos - thrdIdx)/warpSize) % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
KdCoord cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
// Now start looping
for (sint j = 0; j < segSize; j += warpSize){
if (j+thrdIdx < segSize) {
// s_ref[sharedBase + ((outCntLT + thrdIdx) & sharedAddrMask)] = ref = refin[thrdIdx];
ref = refin[thrdIdx];
// Do the compare
KdCoord val = coords[ref*dim+p];
cmp = superKeyComparePD(val, divVal, coords+ref*dim, coords+divRef*dim, p, dim);
// First check for compare failure
} else {
cmp = 0; // Use cmp == 0 to exclude data outside the segment
}
refin += warpSize;
// Write out the less than indices
uint shflMask = __ballot(cmp<0);
if (cmp < 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
s_refLT[sharedBase + ((outCntLT + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCntLT = outCntLT;
outCntLT += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCntLT ^ outCntLT) & warpSize) != 0) {
refoutLT[(oldOutCntLT & ~(warpSize-1)) + thrdIdx] = s_refLT[sharedBase + (oldOutCntLT & warpSize) + thrdIdx];
}
// Write out the greater than values
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of kept values less than this thread.
sint wrtIdx = __popc(shflMask & maskGEme);
s_refGT[sharedBase + ((outCntGT + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCntGT = outCntGT;
outCntGT += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCntGT ^ outCntGT) & warpSize) != 0) {
refoutGT[(oldOutCntGT & ~(warpSize-1)) + thrdIdx] = s_refGT[sharedBase + (oldOutCntGT & warpSize) + thrdIdx];
}
}
// Write out the final LT buffer
if ((outCntLT & (warpSize-1)) > thrdIdx) {
refoutLT[(outCntLT & ~(warpSize-1)) + thrdIdx] = s_refLT[sharedBase + (outCntLT & warpSize) + thrdIdx];
}
// write out the final GT buffer
if ((outCntGT & (warpSize-1)) > thrdIdx) {
refoutGT[(outCntGT & ~(warpSize-1)) + thrdIdx] = s_refGT[sharedBase + (outCntGT & warpSize) + thrdIdx];
}
// And finally store the number of LT writes that were done by this warp
if (thrdIdx == 0 && segLengthsLT != NULL) segLengthsLT[warpIndex] = outCntLT;
// And finally store the number of GT writes that were done by this warp
if (thrdIdx == 0 && segLengthsGT != NULL) segLengthsGT[warpIndex] = outCntGT;
}
__global__ void cuPartitionLWTP( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutx[], refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
refIdx_t midRefs[], refIdx_t lastMidRefs[],
sint startIn, sint endIn, const sint level, const sint logNumWarps)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x); // This thread's position in all threads
uint allWarps = gridDim.x * blockDim.x / warpSize; // Total number of warps started
uint thrdIdx = (pos & (warpSize-1)); // this threads position in the warp.
uint warpIndex = ((pos - thrdIdx)/warpSize); // this warps position in all threads
uint loopLevels = level-logNumWarps; // log of the nuber of iterations to be done
// This first loop iterates over the partition regions when there are more partion regiions then thread.
// Note that if the there are more warps than partition regions (level < logNumWarps) the iteration count will be 0
// and start and end will be untouched.
for (uint loop = 0; loop < (1<<loopLevels); loop++) {
uint start = startIn; // local copy of start
uint end = endIn; // local copy of end
uint mid; // mid between start and end
// This loop determines the start and end of the current iteration over the partitions.
for (uint k = 1; k <= loopLevels; k++) {
mid = start + ((end - start)>>1);
if (loop & (1 << (loopLevels - k ))) {
start = mid + 1;
} else {
end = mid -1;
}
}
// Now calculate the start and end end and mid using the iterative method for this warps partition segment.
for (uint i = 0; i < logNumWarps; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
// Calculate the size of the partition segment that this warp will partition.
sint partSize = end - start + 1; // number of reference to partition
// get the reference to the coordinate that will be partitioned against.
refIdx_t midRef = refp[mid];
cuSinglePartition( coords, // pointer to coordinate array
refoutx+start, // pointer to the beginning of the output ref array for this subdivision
refoutx+mid+1, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
partSize,// The size of the segment that each warp will partition
NULL, // pointer to where the resulting segment lengths for this subdivision will be put
NULL, // pointer to where the resulting segment lengths for this subdivision will be put
partSize, // total length for all partitions. This bounds the partition so no overflow.
1 // number of warps being applied to partitioning this subdivision
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
if (thrdIdx == 0 ) {
uint mra = warpIndex+loop*allWarps;
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
}
__global__ void cuPartition( KdNode kdNodes[], const __restrict__ KdCoord coords[],
refIdx_t refoutxLT[], refIdx_t refoutxGT[],
refIdx_t refinx[], refIdx_t refp[],
const sint p, const sint dim,
uint segLengthsLT[], uint segLengthsGT[],
refIdx_t midRefs[], refIdx_t lastMidRefs[],
const sint startIn, const sint endIn, const sint level)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x / warpSize;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint start = startIn;
uint end = endIn;
uint mid;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (warpIndex & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
mid = start + ((end - start)>>1);
sint partSize = end-start+1;
uint segSize = (partSize + (allWarps>>level) - 1) / (allWarps>>level);
refIdx_t midRef = refp[mid];
cuSinglePartition( coords, // pointer to coordinate array
refoutxLT+start, // pointer to the beginning of the output ref array for this subdivision
refoutxGT+start, // pointer to the beginning of the output ref array for this subdivision
refinx+start, // pointer to the beginning of the input ref array for this subdivision
midRef, // reference to coordinate against which spitting the arrays will be partitioned
p, dim, // which dimension is being used for partitioning and the number of dimensions
segSize, // The size of the segment that each warp will partition
segLengthsLT+(warpIndex & ~((allWarps >> level) - 1)), // pointer to where the resulting segment lengths for this subdivision will be put
segLengthsGT+(warpIndex & ~((allWarps >> level) - 1)), // pointer to where the resulting segment lengths for this subdivision will be put
partSize, // total length of the partition for all warps.
(allWarps>>level) // number of warps being applied to partitioning this subdivision
);
// if this thread is the 0th thread and this warp is starting warp in a warp group, save off the mid point.
// if (thrdIdx == 0 && (warpIndex & ((allWarps >> (level+1)) - 1)) == 0) midRefs[warpIndex >> (level-1)] = midRef;
if (thrdIdx == 0 ){
uint mra = warpIndex/(allWarps>>level);
midRefs[mra] = midRef;
if (lastMidRefs != NULL) {
if (mra & 1) { // odd or even?
kdNodes[lastMidRefs[mra>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[mra>>1]].ltChild = midRef;
}
}
}
}
__global__ void cuPartitionLast(KdNode kdNodes[], refIdx_t refp[], refIdx_t midRefs[], refIdx_t lastMidRefs[],
const sint startIn, const sint endIn, const sint level,
sint* d_partitionError)
{
uint pos = (blockIdx.x * blockDim.x + threadIdx.x);
uint allWarps = gridDim.x * blockDim.x;
uint start = startIn;
uint end = endIn;
uint mid;
refIdx_t midRef = -1;
for (uint i = 0; i < level; i++) {
mid = start + ((end - start)>>1);
if (pos & (allWarps >> (i+1))) {
start = mid + 1;
} else {
end = mid -1;
}
}
if (end - start > 2){
// set an error condition. Indicates that not enough partition loops were done.
*d_partitionError = PART_FINISH_DELTA_TOO_LARGE;
} else if (end - start == 2) {
mid = start + ((end - start)>>1);
midRef = refp[mid];
kdNodes[midRef].gtChild = refp[end];
kdNodes[midRef].ltChild = refp[start];
} else if (end - start == 1) {
midRef = refp[start];
kdNodes[midRef].gtChild = refp[end];
} else if (end - start == 0) {
midRef = refp[start];
}
if (midRef != -1){
midRefs[pos] = midRef;
if (pos & 1) { // odd or even?
kdNodes[lastMidRefs[pos>>1]].gtChild = midRef;
} else {
kdNodes[lastMidRefs[pos>>1]].ltChild = midRef;
}
}
}
void Gpu::initBuildKdTree() {
uint numWarps = numBlocks*numThreads/32;
#pragma omp critical (launchLock)
{
setDevice();
// Create the array that stores the length of each treads segment length
checkCudaErrors(cudaMalloc((void **)&d_segLengthsLT, numWarps * sizeof(uint)));
checkCudaErrors(cudaMalloc((void **)&d_segLengthsGT, numWarps * sizeof(uint)));
// Allocate the arrays to store the midpoint references for this level
checkCudaErrors(cudaMalloc((void **)&d_midRefs[0], num * sizeof(refIdx_t)));
checkCudaErrors(cudaMalloc((void **)&d_midRefs[1], num * sizeof(refIdx_t)));
}
}
void Gpu::closeBuildKdTree() {
syncGPU();
#pragma omp critical (launchLock)
{
setDevice();
// Free the array that stores the length of each treads segment length
checkCudaErrors(cudaFree(d_segLengthsLT));
checkCudaErrors(cudaFree(d_segLengthsGT));
// Free the arrays to store the midpoint references for this level
checkCudaErrors(cudaFree(d_midRefs[0]));
checkCudaErrors(cudaFree(d_midRefs[1]));
}
}
void Gpu::partitionDim(KdNode d_kdNodes[], const KdCoord d_coords[], refIdx_t* l_references[],
const sint p, const sint dim, const sint numTuples, const sint level, const sint numThreads) {
uint numWarps = numThreads/32;
uint logNumWarps = (uint)std::log2((float)numWarps);
uint logNumTuples = (uint)ceil(std::log2((float)numTuples));
// This portion sets up the tread and block size to work with small numbers of thread
// This is only useful for debug situations.
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
refIdx_t* thisMidRefs = d_midRefs[level % 2]; // Find out if this is an odd or even level
refIdx_t* lastMidRefs = d_midRefs[(level-1) % 2]; // Find out if this is an odd or even level
if (level == 0) {
lastMidRefs = NULL; // On the first pass null out the pointer to the last level because there isn't one.
}
//#define PRINT_TIME
#ifdef PRINT_TIME
float time;
cudaEvent_t t_start, t_stop;
checkCudaErrors(cudaEventCreate(&t_start));
checkCudaErrors(cudaEventCreate(&t_stop));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_start));
#endif
// Create pointers to the array that stores the length of each treads segment length used in
// the compaction part of the partitioning functions. Only needed when there are there are more
// warps than there are partition segments. Need 1 array for GT side, 1 array for LT side.
// When the number of partitions to be performed is less that the number of
// warps, the partition kernel will apply multiple warps to each partition
// This is the case where the segLength arrays are needed
sint loopLevels;
sint remLevels;
if (level < logNumWarps){
loopLevels = 0;
remLevels = level;
sint start = 0;
sint end = numTuples-1;
for (sint thisDim = 1; thisDim < dim; thisDim++) { // Partition every dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
cuPartition<<<numBlocks, numThrdPerBlk, 0, stream>>>(d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], l_references[dim+1], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimentions
d_segLengthsLT, d_segLengthsGT, // used byt the partition kernal to store segment sizes
thisMidRefs, // array of this midpoint refrences
lastMidRefs, // array of last midpoint refrences
start, end, remLevels); // start and end of the data and sub level.
checkCudaErrors(cudaGetLastError());
// Do the copy to close up the gaps, lt to the lower half, gt to the upper half
cuPartitionRemoveGaps<<<numBlocks, numThrdPerBlk, 0, stream>>>(l_references[r], l_references[dim], l_references[dim+1],
d_segLengthsLT, d_segLengthsGT, start, end, remLevels);
}
}
} else {
if ((logNumTuples - level > 5)) {
loopLevels = (level-logNumWarps);
remLevels = logNumWarps;
loopLevels = 0;
remLevels = level;
for (sint loop = 0; loop < (1<<loopLevels); loop++) {
sint start = 0;
sint end = numTuples-1;
uint mid;
for (int k=1; k<=loopLevels; k++) {
mid = start + (end - start)/2;
if (loop & (1 << loopLevels-k))
start = mid + 1;
else
end = mid - 1;
}
for (sint thisDim = 1; thisDim < dim; thisDim++) { // partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
cuPartitionLWTP<<<numBlocks, numThrdPerBlk, 0, stream>>>(d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimensions
thisMidRefs+loop*numWarps, // array of this midpoint references
lastMidRefs+loop*numWarps/2, // array of last midpoint references
start, end, remLevels, logNumWarps); // start and end of the data and sub level.
checkCudaErrors(cudaGetLastError());
// do the copy to close up the gaps, lt to the lower half, gt to the upper half
cuCopyRef<<<numBlocks, numThrdPerBlk, 0, stream>>>(l_references[r]+start, l_references[dim]+start, end - start + 1);
}
}
}
} else {
#define CHECK_FOR_ERRORS
#ifdef CHECK_FOR_ERRORS
sint partitionError = 0;
// cudaMemcpyToSymbol(*d_partitionError,
// &partitionError,
// sizeof(partitionError),
// 0,cudaMemcpyHostToDevice);
checkCudaErrors(cudaMemcpy(d_partitionError, &partitionError, sizeof(sint), cudaMemcpyHostToDevice));
#endif
sint logSubWarpSize = logNumTuples - level; // Should never be bigger than 32
sint logNumSubWarps = logNumWarps + 5 - logSubWarpSize;
sint start = 0;
sint end = numTuples-1;
for (sint thisDim = 1; thisDim < dim; thisDim++) { // Partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#pragma omp critical (launchLock)
{
setDevice();
cuPartitionShort<<<numBlocks, numThrdPerBlk, 0, stream>>>(d_kdNodes, d_coords, //pointer to coordinates
l_references[dim], // pointers to the LT and GT partition output arrays
l_references[r], l_references[p], // pointers to the partitioned and primary array
p, dim, // axis and number of dimentions
thisMidRefs, // array of this midpoint refrences
lastMidRefs, // array of last midpoint refrences
start, end, // start and end of the data
level, logNumSubWarps, logSubWarpSize, d_partitionError); // sub level.
checkCudaErrors(cudaGetLastError());
// Do the copy to close up the gaps, lt to the lower half, gt to the upper half
cuCopyRef<<<numBlocks, numThrdPerBlk, 0, stream>>>(l_references[r]+start, l_references[dim]+start, end - start + 1);
}
checkCudaErrors(cudaGetLastError());
#ifdef CHECK_FOR_ERRORS
// cudaMemcpyFromSymbolAsync(&partitionError,
// *d_partitionError,
// sizeof(partitionError),
// 0,cudaMemcpyDeviceToHost, stream);
checkCudaErrors(cudaMemcpyAsync(&partitionError, d_partitionError, sizeof(sint), cudaMemcpyDeviceToHost, stream));
if (partitionError == PART_SIZE_GT_SUB_PART_SIZE ) {
cout << "Error in partition size vs sub warp size on level " << level << endl;
exit(1);
}
#endif
}
}
}
#ifdef PRINT_TIME
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_stop));
checkCudaErrors(cudaEventSynchronize(t_stop));
checkCudaErrors(cudaEventElapsedTime(&time, t_start, t_stop));
printf ("Partition took %f seconds\n",time/1000.0);
checkCudaErrors(cudaEventDestroy(t_start));
checkCudaErrors(cudaEventDestroy(t_stop));
#endif
// Get the mid values back but only on the first dimension processed of level 0. This will be the root node
if (level == 0) {
checkCudaErrors(cudaMemcpyAsync(&rootNode, d_midRefs[0], sizeof(refIdx_t), cudaMemcpyDeviceToHost, stream));
}
if (-1 == rootNode) {
cout << " Build Tree Error: Failure in assembly" << endl;
}
return;
}
void Gpu::partitionDimLast(KdNode d_kdNodes[], const KdCoord coord[], refIdx_t* l_references[],
const sint p, const sint dim, const sint numTuples, const sint level, const sint numThreads) {
uint numWarps = numThreads;
uint logNumWarps = (uint)std::log2((float)numWarps);
sint loopLevels;
sint remLevels;
if (logNumWarps < level){
loopLevels = (level-logNumWarps);
remLevels = logNumWarps;
} else {
loopLevels = 0;
remLevels = level;
}
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
refIdx_t* thisMidRefs = d_midRefs[level % 2]; // Find out id this is an odd or even level
refIdx_t* lastMidRefs = d_midRefs[(level-1) % 2]; // Find out id this is an odd or even level
#ifdef PRINT_TIME
float time;
cudaEvent_t t_start, t_stop;
checkCudaErrors(cudaEventCreate(&t_start));
checkCudaErrors(cudaEventCreate(&t_stop));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_start));
#endif
for (sint loop = 0; loop < (1<<loopLevels); loop++) {
sint start = 0;
sint end = numTuples-1;
sint mid;
for (sint k=1; k<=loopLevels; k++) {
mid = start + (end - start)/2;
if (loop & (1 << loopLevels-k))
start = mid + 1;
else
end = mid - 1;
}
for (sint thisDim = 1; thisDim < 2; thisDim++) { // Partition ever dimension except the p partition.
sint r = thisDim + p;
r = (r >= dim) ? r-dim : r;
#ifdef CHECK_FOR_ERRORS
sint partitionError = 0;
// cudaMemcpyToSymbol(*d_partitionError, &partitionError,
// sizeof(partitionError), 0,cudaMemcpyHostToDevice);
checkCudaErrors(cudaMemcpy(d_partitionError, &partitionError, sizeof(sint), cudaMemcpyHostToDevice));
#endif
#pragma omp critical (launchLock)
{
setDevice();
cuPartitionLast<<<numBlocks, numThrdPerBlk, 0, stream>>>(d_kdNodes, // pointer to kdnode array.
l_references[p], // Reference array for primary
thisMidRefs+loop*numThreads, // mid reference array for current level
lastMidRefs+loop*numThreads/2, // mid reference array for last level
start, end, remLevels, d_partitionError); // Address range and more levels.
checkCudaErrors(cudaGetLastError());
}
#ifdef CHECK_FOR_ERRORS
// cudaMemcpyFromSymbol(&partitionError, *d_partitionError,
// sizeof(partitionError), 0,cudaMemcpyDeviceToHost);
checkCudaErrors(cudaMemcpyAsync(&partitionError, d_partitionError, sizeof(sint), cudaMemcpyDeviceToHost, stream));
if (partitionError == PART_FINISH_DELTA_TOO_LARGE ) {
cout << "Error in last partition pass. Probably due to insufficient number of partiion passes, level = " << level << endl;
exit(1);
}
#endif
}
}
// checkCudaErrors(cudaGetLastError());
#ifdef PRINT_TIME
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_stop));
checkCudaErrors(cudaEventSynchronize(t_stop));
checkCudaErrors(cudaEventElapsedTime(&time, t_start, t_stop));
printf ("Partition took %f seconds\n",time/1000.0);
checkCudaErrors(cudaEventDestroy(t_start));
checkCudaErrors(cudaEventDestroy(t_stop));
#endif
return;
}
uint Gpu::copyRef(refIdx_t refout[], refIdx_t refin[], uint numTuples, sint numThreads){
// This portion sets up the tread and block size to work with small numbers of thread
// This is only useful for debug situations.
sint numBlocks;
sint numThrdPerBlk;
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
cuCopyRef<<<numBlocks, numThrdPerBlk, 0, stream>>>(refout, refin, numTuples);
return 0;
}
|
10dc632ff24b8dcaf5c13272709fff14047b3004.hip
|
// !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
This work is COMS 4824 Architecture Final Project, advised by Professor Martha Kim.
"A Walk on the Hyperthreaded Side:
an Implementation and Analysis of Translating Virtual Addresses on a GPU"
Copyright (c) 2017 Columbia University.
All rights reserved.
Created by Serena Liu and Raphael Norwitz.
AWS EC2 instance configuration:
--Instance type: g2.2xlarge
--AMI Id: NVIDIA CUDA 7.5 Toolkit on Amazon Linux-0ce7aca3-5b96-4ff4-8396-05245687380a-ami-52420645.3 (ami-52f7b345)
CUDA examples used:
https://raw.githubusercontent.com/berkeley-scf/gpu-workshop-2016/master/kernelExample.cu
Software Usage:
--compile: nvcc page_walk.cu -o page_walk.out
--output: ./page_walk.out -n <total addresses> <intermediate table parameters>
(eg, ./page_walk.out -n 400 2 3 2 4)
************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#define BLOCK_D1 512
#define BLOCK_D2 1
#define BLOCK_D3 1
#define MAX_LEVELS 20
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static int max_table;
struct trans_thread {
void **curr_table;
unsigned long cuda_result;
int offset[MAX_LEVELS];
int curr;
int max;
};
//function called by CUDA kernel
__host__ __device__ int translate_cpu(struct trans_thread *trans) {
unsigned long *gpu_ptr = (unsigned long *) trans->curr_table;
while(trans->curr < trans->max-1) {
gpu_ptr = (unsigned long *) gpu_ptr[trans->offset[trans->curr]];
trans->curr++;
}
return (int) *((int *) gpu_ptr + trans->offset[trans->curr]);
// ((void *) trans->curr_table + trans->offset[trans->max-1]);
}
//CPU counterpart
int translate_cpu2(struct trans_thread *trans) {
while(trans->curr < trans->max-1) {
trans->curr_table = (void **) trans->curr_table[trans->offset[trans->curr]];
trans->curr++;
}
return (int) *((int *) trans->curr_table + trans->offset[trans->curr]);
// ((void *) trans->curr_table + trans->offset[trans->max-1]);
}
// CUDA kernel: gpu_run_time<<<gridSize, blockSize>>>(d_new_threads, total_addresses);
__global__ void gpu_run_time(struct trans_thread *trans, int addresses) {
// id of the block
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
// size of each block (within grid of blocks)
int blocksize = blockDim.x * blockDim.y * blockDim.z;
// id of thread in a given block
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// assign overall id/index of the thread
int idx = myblock * blocksize + subthread;
if(idx < addresses) {
translate_cpu(&trans[idx]);
}
}
// CPU analog for speed comparison
float cpu_run_time(struct trans_thread *trans, int addresses) {
for(int i = 0; i < addresses; i++) {
translate_cpu2(&trans[i]);
}
return 0;
}
/* --------------------------- host code ------------------------------*/
double read_timer() {
struct timeval end;
gettimeofday( &end, NULL );
return end.tv_sec+1.e-6*end.tv_usec;
}
//sequential phase 1: construct the table
int construct_table(void *table, int *levels, int num_levels) {
int i, j, level_size = 1;
void **table_ptr = (void **) table;
unsigned long **level_ptr;
// set intermediate addresses of table
for(i = 0; i < num_levels-1; i++)
{
level_size *= levels[i];
level_ptr = (unsigned long **) table + level_size + (((unsigned long *)table_ptr - (unsigned long *) table)/(sizeof(unsigned long *)));
fprintf(stderr, "level_size: %d, level_ptr: %d, table_ptr: %d\n", level_size, (level_ptr- (unsigned long **) table) / sizeof(void *), (unsigned long **) table_ptr - (unsigned long **) table);
for(j = 0; j < level_size; j++) {
table_ptr[j] = level_ptr + ((j)*levels[i+1]);
}
table_ptr += level_size;
}
assert((intptr_t )table_ptr - (intptr_t )table < max_table);
// set last level of page table to garbage;
for(i = 0; i < level_size * levels[num_levels-1]; i++) {
*table_ptr = (unsigned long *) i;
table_ptr++;
}
assert((intptr_t )table_ptr - (intptr_t )table == max_table);
// return number of entries at the lowest level of the page table
return levels[num_levels-1] * level_size;
}
//sequential phase 2: generate struct trans_thread for each address
struct trans_thread *gen_addresses(int num_addr, int levels, int *level_sizes, void **pgd)
{
int i,j;
struct trans_thread *new_threads = (struct trans_thread *)malloc(sizeof(struct trans_thread) * num_addr);
if (!new_threads){
fprintf(stderr, "malloc failed: %d\n", strerror(errno));
exit(1);
}
for(i = 0; i < num_addr; i++)
{
new_threads[i].curr_table = pgd;
new_threads[i].max = levels;
new_threads[i].curr = 0;
for(j = 0; j < levels; j++) {
new_threads[i].offset[j] =
rand() % level_sizes[j];
}
}
return new_threads;
}
////////////////////////////////////////////////////////////////////////////////
//
// Main Program
//
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
void **d_pg_table; //device page table
int i, j, table_size = 0, level_size = 1,
total_addresses, table_lowest_addresses,
levels = argc-3;
int level_sizes[levels];
//struct trans_thread *sample;
struct trans_thread *h_new_threads; //host
struct trans_thread *d_new_threads; //device
hipError_t cudaStat;
//start of the program
printf("===============================================================================\n");
srand(time(NULL));
// get number of pointers in contiguous page table
for(i = 1, j =0; i < argc; i++) {
if ( !strcmp(argv[i], "-n")) {
total_addresses = atoi(argv[++i]);
continue;
}
level_size *= atoi(argv[i]);
level_sizes[j++] = atoi(argv[i]);
table_size += level_size;
}
//tuning the parameters of the device
// fixed block size (512 x1x1 threads)
const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3);
// determine number of blocks we need for a given problem size
int tmp = ceil(pow(total_addresses/BLOCK_D1, 0.5));
printf("Grid dimension is %i x %i\n", tmp, tmp);
//grid size
dim3 gridSize(tmp, tmp, 1);
//total number of threads
int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp;
//threads number legitimacy check
if (nthreads < total_addresses){
printf("\n============ NOT ENOUGH THREADS TO COVER total addresses=%d ===============\n\n",total_addresses);
} else {
printf("Launching %d threads (total_addresses=%d)\n", nthreads, total_addresses);
}
// allocate host memory
max_table = table_size * sizeof(void *); //total size of page table
void **pg_table = (void **) malloc(sizeof(void *) * table_size);
if (!pg_table) {
fprintf(stderr, "host memory allocation failed: %d\n", strerror(errno));
exit(1);
}
else {
printf ("host memory allocation succeeded.\n");
}
// allocate device memory
cudaStat = hipMalloc(&d_pg_table, sizeof(void *) * table_size);
if(cudaStat != hipSuccess) {
printf ("device memory allocation failed.\n");
return EXIT_FAILURE;
}
else {
printf ("device memory allocation succeeded.\n");
}
/* --------------------------- sequential code on CPU; phase 1 --------------------------------------*/
//number of entries at the lowest level of the page table
//number of translatable addresses
printf ("now construct the page table on the host.\n");
table_lowest_addresses = construct_table(pg_table, level_sizes, levels);
fprintf(stderr, "number of translatable addresses: %d\n", table_lowest_addresses);
fprintf(stderr, "total size of page table: %d\n", max_table);
hipDeviceSynchronize();
double tInit = read_timer();
/* --------------------------- copy the page table from CPU to the GPU ------------------------------*/
cudaStat = hipMemcpy(d_pg_table, pg_table, sizeof(void *) * table_size, hipMemcpyHostToDevice);
printf("Memory Copy for page table from Host to Device");
if (cudaStat != hipSuccess){
printf("failed.\n");
return EXIT_FAILURE;
} else {
printf("successful.\n");
}
hipDeviceSynchronize();
double tTransferToGPU_pgtable = read_timer();
/* --------------------------- sequential code on CPU; phase 2 ---------------------------------------*/
h_new_threads = gen_addresses(total_addresses, levels, level_sizes, pg_table);
hipDeviceSynchronize();
double tInit2 = read_timer();
/* ------------------------- copy the trans_threads from CPU to the GPU ------------------------------*/
cudaStat = hipMalloc( (void**) &d_new_threads, sizeof(struct trans_thread) * total_addresses) ;
if (cudaStat != hipSuccess){
printf("device memory allocation for d_new_threads failed.\n");
return EXIT_FAILURE;
} else {
printf("device memory allocation for d_new_threads succeeded.\n");
}
cudaStat = hipMemcpy( d_new_threads, h_new_threads, sizeof(struct trans_thread) * total_addresses, hipMemcpyHostToDevice);
printf("Memory Copy h_new_threads from Host to Device");
if (cudaStat != hipSuccess){
printf("failed.\n");
return EXIT_FAILURE;
} else {
printf(" successful.\n");
}
cudaCheckErrors("hipMemcpy h_new_threads fail");
hipDeviceSynchronize();
double tTransferToGPU_threads = read_timer();
/* ----------------------------------- parallel code on GPU: kernel -----------------------------------*/
hipLaunchKernelGGL(( gpu_run_time), dim3(gridSize), dim3(blockSize), 0, 0, d_new_threads, total_addresses);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr){
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
} else {
printf("kernel launch success!\n");
}
hipDeviceSynchronize();
double gpu_time = read_timer();
printf("GPU done!\n");
/* --------------------------------- sequential code on CPU; phase 3 ----------------------------------*/
printf("now do calculation on CPU for comparison!\n");
cpu_run_time(h_new_threads, total_addresses);
double cpu_time = read_timer();
fprintf(stderr, "The CPU took %lu microseconds to compute %d addresses. ""For a table of depth %d.\n", cpu_time - gpu_time , total_addresses, levels);
/* --------------------------------- sequential code on CPU; phase 4 ----------------------------------*/
printf("Timing results for n = %d\n", total_addresses);
//printf("page table Transfer to GPU time: %f\n", tTransferToGPU_pgtable - tInit);
//printf("threads Transfer to GPU time: %f\n", tTransferToGPU_threads - tInit2);
printf("Calculation time (GPU): %f\n", gpu_time - tTransferToGPU_threads);
//printf("Transfer from GPU time: %f\n", tTransferFromGPU - gpu_time);
printf("Calculation time (CPU): %f\n", cpu_time - gpu_time);
printf("Freeing memory...\n");
printf("====================================================\n");
free(pg_table);
free(h_new_threads);
hipFree(d_pg_table);
hipFree(d_new_threads);
return 0;
}
|
10dc632ff24b8dcaf5c13272709fff14047b3004.cu
|
/***********************************************************************************
This work is COMS 4824 Architecture Final Project, advised by Professor Martha Kim.
"A Walk on the Hyperthreaded Side:
an Implementation and Analysis of Translating Virtual Addresses on a GPU"
Copyright (c) 2017 Columbia University.
All rights reserved.
Created by Serena Liu and Raphael Norwitz.
AWS EC2 instance configuration:
--Instance type: g2.2xlarge
--AMI Id: NVIDIA CUDA 7.5 Toolkit on Amazon Linux-0ce7aca3-5b96-4ff4-8396-05245687380a-ami-52420645.3 (ami-52f7b345)
CUDA examples used:
https://raw.githubusercontent.com/berkeley-scf/gpu-workshop-2016/master/kernelExample.cu
Software Usage:
--compile: nvcc page_walk.cu -o page_walk.out
--output: ./page_walk.out -n <total addresses> <intermediate table parameters>
(eg, ./page_walk.out -n 400 2 3 2 4)
************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdint.h>
#define BLOCK_D1 512
#define BLOCK_D2 1
#define BLOCK_D3 1
#define MAX_LEVELS 20
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static int max_table;
struct trans_thread {
void **curr_table;
unsigned long cuda_result;
int offset[MAX_LEVELS];
int curr;
int max;
};
//function called by CUDA kernel
__host__ __device__ int translate_cpu(struct trans_thread *trans) {
unsigned long *gpu_ptr = (unsigned long *) trans->curr_table;
while(trans->curr < trans->max-1) {
gpu_ptr = (unsigned long *) gpu_ptr[trans->offset[trans->curr]];
trans->curr++;
}
return (int) *((int *) gpu_ptr + trans->offset[trans->curr]);
// ((void *) trans->curr_table + trans->offset[trans->max-1]);
}
//CPU counterpart
int translate_cpu2(struct trans_thread *trans) {
while(trans->curr < trans->max-1) {
trans->curr_table = (void **) trans->curr_table[trans->offset[trans->curr]];
trans->curr++;
}
return (int) *((int *) trans->curr_table + trans->offset[trans->curr]);
// ((void *) trans->curr_table + trans->offset[trans->max-1]);
}
// CUDA kernel: gpu_run_time<<<gridSize, blockSize>>>(d_new_threads, total_addresses);
__global__ void gpu_run_time(struct trans_thread *trans, int addresses) {
// id of the block
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
// size of each block (within grid of blocks)
int blocksize = blockDim.x * blockDim.y * blockDim.z;
// id of thread in a given block
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// assign overall id/index of the thread
int idx = myblock * blocksize + subthread;
if(idx < addresses) {
translate_cpu(&trans[idx]);
}
}
// CPU analog for speed comparison
float cpu_run_time(struct trans_thread *trans, int addresses) {
for(int i = 0; i < addresses; i++) {
translate_cpu2(&trans[i]);
}
return 0;
}
/* --------------------------- host code ------------------------------*/
double read_timer() {
struct timeval end;
gettimeofday( &end, NULL );
return end.tv_sec+1.e-6*end.tv_usec;
}
//sequential phase 1: construct the table
int construct_table(void *table, int *levels, int num_levels) {
int i, j, level_size = 1;
void **table_ptr = (void **) table;
unsigned long **level_ptr;
// set intermediate addresses of table
for(i = 0; i < num_levels-1; i++)
{
level_size *= levels[i];
level_ptr = (unsigned long **) table + level_size + (((unsigned long *)table_ptr - (unsigned long *) table)/(sizeof(unsigned long *)));
fprintf(stderr, "level_size: %d, level_ptr: %d, table_ptr: %d\n", level_size, (level_ptr- (unsigned long **) table) / sizeof(void *), (unsigned long **) table_ptr - (unsigned long **) table);
for(j = 0; j < level_size; j++) {
table_ptr[j] = level_ptr + ((j)*levels[i+1]);
}
table_ptr += level_size;
}
assert((intptr_t )table_ptr - (intptr_t )table < max_table);
// set last level of page table to garbage;
for(i = 0; i < level_size * levels[num_levels-1]; i++) {
*table_ptr = (unsigned long *) i;
table_ptr++;
}
assert((intptr_t )table_ptr - (intptr_t )table == max_table);
// return number of entries at the lowest level of the page table
return levels[num_levels-1] * level_size;
}
//sequential phase 2: generate struct trans_thread for each address
struct trans_thread *gen_addresses(int num_addr, int levels, int *level_sizes, void **pgd)
{
int i,j;
struct trans_thread *new_threads = (struct trans_thread *)malloc(sizeof(struct trans_thread) * num_addr);
if (!new_threads){
fprintf(stderr, "malloc failed: %d\n", strerror(errno));
exit(1);
}
for(i = 0; i < num_addr; i++)
{
new_threads[i].curr_table = pgd;
new_threads[i].max = levels;
new_threads[i].curr = 0;
for(j = 0; j < levels; j++) {
new_threads[i].offset[j] =
rand() % level_sizes[j];
}
}
return new_threads;
}
////////////////////////////////////////////////////////////////////////////////
//
// Main Program
//
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
void **d_pg_table; //device page table
int i, j, table_size = 0, level_size = 1,
total_addresses, table_lowest_addresses,
levels = argc-3;
int level_sizes[levels];
//struct trans_thread *sample;
struct trans_thread *h_new_threads; //host
struct trans_thread *d_new_threads; //device
cudaError_t cudaStat;
//start of the program
printf("===============================================================================\n");
srand(time(NULL));
// get number of pointers in contiguous page table
for(i = 1, j =0; i < argc; i++) {
if ( !strcmp(argv[i], "-n")) {
total_addresses = atoi(argv[++i]);
continue;
}
level_size *= atoi(argv[i]);
level_sizes[j++] = atoi(argv[i]);
table_size += level_size;
}
//tuning the parameters of the device
// fixed block size (512 x1x1 threads)
const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3);
// determine number of blocks we need for a given problem size
int tmp = ceil(pow(total_addresses/BLOCK_D1, 0.5));
printf("Grid dimension is %i x %i\n", tmp, tmp);
//grid size
dim3 gridSize(tmp, tmp, 1);
//total number of threads
int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp;
//threads number legitimacy check
if (nthreads < total_addresses){
printf("\n============ NOT ENOUGH THREADS TO COVER total addresses=%d ===============\n\n",total_addresses);
} else {
printf("Launching %d threads (total_addresses=%d)\n", nthreads, total_addresses);
}
// allocate host memory
max_table = table_size * sizeof(void *); //total size of page table
void **pg_table = (void **) malloc(sizeof(void *) * table_size);
if (!pg_table) {
fprintf(stderr, "host memory allocation failed: %d\n", strerror(errno));
exit(1);
}
else {
printf ("host memory allocation succeeded.\n");
}
// allocate device memory
cudaStat = cudaMalloc(&d_pg_table, sizeof(void *) * table_size);
if(cudaStat != cudaSuccess) {
printf ("device memory allocation failed.\n");
return EXIT_FAILURE;
}
else {
printf ("device memory allocation succeeded.\n");
}
/* --------------------------- sequential code on CPU; phase 1 --------------------------------------*/
//number of entries at the lowest level of the page table
//number of translatable addresses
printf ("now construct the page table on the host.\n");
table_lowest_addresses = construct_table(pg_table, level_sizes, levels);
fprintf(stderr, "number of translatable addresses: %d\n", table_lowest_addresses);
fprintf(stderr, "total size of page table: %d\n", max_table);
cudaDeviceSynchronize();
double tInit = read_timer();
/* --------------------------- copy the page table from CPU to the GPU ------------------------------*/
cudaStat = cudaMemcpy(d_pg_table, pg_table, sizeof(void *) * table_size, cudaMemcpyHostToDevice);
printf("Memory Copy for page table from Host to Device");
if (cudaStat != cudaSuccess){
printf("failed.\n");
return EXIT_FAILURE;
} else {
printf("successful.\n");
}
cudaDeviceSynchronize();
double tTransferToGPU_pgtable = read_timer();
/* --------------------------- sequential code on CPU; phase 2 ---------------------------------------*/
h_new_threads = gen_addresses(total_addresses, levels, level_sizes, pg_table);
cudaDeviceSynchronize();
double tInit2 = read_timer();
/* ------------------------- copy the trans_threads from CPU to the GPU ------------------------------*/
cudaStat = cudaMalloc( (void**) &d_new_threads, sizeof(struct trans_thread) * total_addresses) ;
if (cudaStat != cudaSuccess){
printf("device memory allocation for d_new_threads failed.\n");
return EXIT_FAILURE;
} else {
printf("device memory allocation for d_new_threads succeeded.\n");
}
cudaStat = cudaMemcpy( d_new_threads, h_new_threads, sizeof(struct trans_thread) * total_addresses, cudaMemcpyHostToDevice);
printf("Memory Copy h_new_threads from Host to Device");
if (cudaStat != cudaSuccess){
printf("failed.\n");
return EXIT_FAILURE;
} else {
printf(" successful.\n");
}
cudaCheckErrors("cudaMemcpy h_new_threads fail");
cudaDeviceSynchronize();
double tTransferToGPU_threads = read_timer();
/* ----------------------------------- parallel code on GPU: kernel -----------------------------------*/
gpu_run_time<<<gridSize, blockSize>>>(d_new_threads, total_addresses);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr){
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
} else {
printf("kernel launch success!\n");
}
cudaDeviceSynchronize();
double gpu_time = read_timer();
printf("GPU done!\n");
/* --------------------------------- sequential code on CPU; phase 3 ----------------------------------*/
printf("now do calculation on CPU for comparison!\n");
cpu_run_time(h_new_threads, total_addresses);
double cpu_time = read_timer();
fprintf(stderr, "The CPU took %lu microseconds to compute %d addresses. ""For a table of depth %d.\n", cpu_time - gpu_time , total_addresses, levels);
/* --------------------------------- sequential code on CPU; phase 4 ----------------------------------*/
printf("Timing results for n = %d\n", total_addresses);
//printf("page table Transfer to GPU time: %f\n", tTransferToGPU_pgtable - tInit);
//printf("threads Transfer to GPU time: %f\n", tTransferToGPU_threads - tInit2);
printf("Calculation time (GPU): %f\n", gpu_time - tTransferToGPU_threads);
//printf("Transfer from GPU time: %f\n", tTransferFromGPU - gpu_time);
printf("Calculation time (CPU): %f\n", cpu_time - gpu_time);
printf("Freeing memory...\n");
printf("====================================================\n");
free(pg_table);
free(h_new_threads);
cudaFree(d_pg_table);
cudaFree(d_new_threads);
return 0;
}
|
b53af52f184759928065cb45d2aa5867b22e915c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_tester_cuda.h"
#include "neural_network_cuda_exception.h"
#include "layer_testing_schema_factory.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "unsupervised_data_reader_async_helper.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
__global__ void convert_compacted_to_raw_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
network_tester_cuda::network_tester_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_tester(schema)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
{
testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config));
}
setup_network_cuda();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it)
{
schema_data.push_back((*it)->get_schema_buffers());
}
}
network_tester_cuda::~network_tester_cuda()
{
}
void network_tester_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
data_stream = cuda_stream_smart_ptr(new cuda_stream());
}
// The method is called when client calls set_data. The data is guaranteed to be compatible with schema
void network_tester_cuda::actual_set_data(network_data_smart_ptr data)
{
host_net_data = data;
update_data();
}
void network_tester_cuda::update_data()
{
net_data.clear();
if (tester_list.empty() || (host_net_data == 0))
return;
for(int i = 0; i < ::min(host_net_data->size(), tester_list.size()); ++i)
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = tester_list[i]->get_data(host_net_data->at(i));
net_data.push_back(device_data);
}
}
// The method is called when client calls set_input_configuration_specific and the convolution specific configuration is modified.
// The layer_config_list is guaranteed to be compatible with schema
void network_tester_cuda::layer_config_list_modified()
{
tester_list.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf)
{
tester_list.push_back(
(*it)->create_tester(
*it_conf,
*(it_conf + 1)));
}
update_data();
}
void network_tester_cuda::update_buffers_configuration_testing(buffer_cuda_size_configuration& buffer_configuration) const
{
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = schema_data.begin(); it != schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
}
output_neuron_value_set_smart_ptr network_tester_cuda::actual_run(unsupervised_data_reader& reader)
{
reader.reset();
layer_configuration_specific input_configuration = reader.get_input_configuration();
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
unsigned int entry_count = reader.get_entry_count();
neuron_data_type::input_type type_code = reader.get_input_type();
size_t input_neuron_elem_size = reader.get_input_neuron_elem_size();
output_neuron_value_set_smart_ptr predicted_output_neuron_value_set(new output_neuron_value_set(entry_count, output_neuron_count));
buffer_cuda_size_configuration buffers_config;
update_buffers_configuration_testing(buffers_config);
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
unsigned int max_entry_count = std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), entry_count);
cuda_linear_buffer_device_smart_ptr input_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
};
cuda_linear_buffer_device_smart_ptr output_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
};
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size));
unsigned char * input = *input_host_buf;
cuda_linear_buffer_host_smart_ptr output_predicted_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float)));
float * output_predicted = *output_predicted_host_buf;
unsigned int current_data_slot = 0;
unsigned int current_command_slot = 1;
unsigned int entries_available_for_copy_in_count = entry_count;
unsigned int entries_available_for_processing_count = 0;
unsigned int entries_available_for_copy_out_count = 0;
unsigned int entries_processed_count = 0;
cuda_event output_copied_event;
cuda_event data_processed_event;
cuda_event input_copied_event;
int power_of_two_spinup = 3;
while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0) || (entries_available_for_copy_out_count > 0))
{
unsupervised_data_reader_async_helper async_reader;
if (entries_available_for_copy_in_count > 0)
{
unsigned int entries_to_read_count = std::min<unsigned int>(::max(max_entry_count >> power_of_two_spinup, 1U), entries_available_for_copy_in_count);
async_reader.fun = unsupervised_data_reader_functor(
entries_to_read_count,
&reader,
input,
*(input_buf[current_data_slot]),
*data_stream);
async_reader.start();
power_of_two_spinup = (power_of_two_spinup > 0) ? (power_of_two_spinup - 1) : 0;
}
if (entries_available_for_processing_count > 0)
{
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf[current_command_slot],
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(
*input_converted_buf,
*input_buf[current_command_slot],
input_neuron_count * entries_available_for_processing_count * sizeof(float),
hipMemcpyDeviceToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_run cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
entries_available_for_processing_count);
}
}
// Copy output
{
cuda_safe_call(hipMemcpyAsync(
*output_buf[current_command_slot],
*output_buffer,
output_neuron_count * entries_available_for_processing_count * sizeof(float),
hipMemcpyDeviceToDevice,
*command_stream));
}
if (cuda_config->is_flush_required())
{
cuda_safe_call(hipEventRecord(data_processed_event, *command_stream));
hipEventQuery(data_processed_event);
}
}
unsigned int entries_read_count = 0;
if (entries_available_for_copy_in_count > 0)
entries_read_count = async_reader.wait();
if (entries_available_for_copy_out_count > 0)
{
cuda_safe_call(hipMemcpyAsync(
output_predicted,
*(output_buf[current_data_slot]),
entries_available_for_copy_out_count * output_neuron_count * sizeof(float),
hipMemcpyDeviceToHost,
*data_stream));
cuda_safe_call(hipStreamSynchronize(*data_stream));
const float * predicted_it = output_predicted;
for(std::vector<std::vector<float> >::iterator it = predicted_output_neuron_value_set->neuron_value_list.begin() + entries_processed_count;
it != predicted_output_neuron_value_set->neuron_value_list.begin() + entries_processed_count + entries_available_for_copy_out_count;
it++, predicted_it += output_neuron_count)
{
std::vector<float>& value_list = *it;
std::copy(predicted_it, predicted_it + output_neuron_count, value_list.begin());
}
entries_processed_count += entries_available_for_copy_out_count;
}
cuda_safe_call(hipStreamSynchronize(*data_stream));
cuda_safe_call(hipStreamSynchronize(*command_stream));
entries_available_for_copy_out_count = entries_available_for_processing_count;
entries_available_for_processing_count = entries_read_count;
entries_available_for_copy_in_count -= entries_read_count;
current_data_slot = 1 - current_data_slot;
current_command_slot = 1 - current_command_slot;
}
return predicted_output_neuron_value_set;
}
std::vector<layer_configuration_specific_snapshot_smart_ptr> network_tester_cuda::actual_get_snapshot(
const void * input,
neuron_data_type::input_type type_code)
{
std::vector<layer_configuration_specific_snapshot_smart_ptr> res;
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
cuda_linear_buffer_device_smart_ptr input_buf(new cuda_linear_buffer_device(input_neuron_count * input_neuron_elem_size));
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
std::vector<cuda_linear_buffer_device_smart_ptr> output_buffer_list;
output_buffer_list.push_back(output_buffer);
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(1);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
output_buffer_list.push_back(output_buffer);
}
// Copy input
{
cuda_safe_call(hipMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
}
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(*input_converted_buf, *input_buf, input_neuron_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream));
}
else throw neural_network_exception((boost::format("actual_get_snapshot cannot handle input neurons of type %1%") % type_code).str());
{
layer_configuration_specific_snapshot_smart_ptr input_elem(new layer_configuration_specific_snapshot(layer_config_list[0]));
res.push_back(input_elem);
cuda_safe_call(hipMemcpyAsync(
&(*(input_elem->data.begin())),
*output_buffer_list[0],
input_elem->data.size() * sizeof(float),
hipMemcpyDeviceToHost,
*command_stream));
}
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
int layer_id = 0;
int output_buffer_id = 1;
for(
std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin();
it != tester_list.end();
++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it, ++layer_id, ++output_buffer_id)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
1);
layer_configuration_specific_snapshot_smart_ptr new_elem(new layer_configuration_specific_snapshot(layer_config_list[layer_id + 1]));
res.push_back(new_elem);
cuda_safe_call(hipMemcpyAsync(
&(*(new_elem->data.begin())),
*output_buffer_list[output_buffer_id],
new_elem->data.size() * sizeof(float),
hipMemcpyDeviceToHost,
*command_stream));
}
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
return res;
}
layer_configuration_specific_snapshot_smart_ptr network_tester_cuda::actual_run(
const void * input,
neuron_data_type::input_type type_code)
{
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot(layer_config_list[layer_config_list.size() - 1]));
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
cuda_linear_buffer_device_smart_ptr input_buf(new cuda_linear_buffer_device(input_neuron_count * input_neuron_elem_size));
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(1);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
// Copy input
{
cuda_safe_call(hipMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
}
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(*input_converted_buf, *input_buf, input_neuron_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream));
}
else throw neural_network_exception((boost::format("actual_run cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
1);
}
}
// Copy output
{
cuda_safe_call(hipMemcpyAsync(
&(*(res->data.begin())),
*output_buffer,
output_neuron_count * sizeof(float),
hipMemcpyDeviceToHost,
*command_stream));
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
return res;
}
}
}
|
b53af52f184759928065cb45d2aa5867b22e915c.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_tester_cuda.h"
#include "neural_network_cuda_exception.h"
#include "layer_testing_schema_factory.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "unsupervised_data_reader_async_helper.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
__global__ void convert_compacted_to_raw_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
network_tester_cuda::network_tester_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_tester(schema)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
{
testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config));
}
setup_network_cuda();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it)
{
schema_data.push_back((*it)->get_schema_buffers());
}
}
network_tester_cuda::~network_tester_cuda()
{
}
void network_tester_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
data_stream = cuda_stream_smart_ptr(new cuda_stream());
}
// The method is called when client calls set_data. The data is guaranteed to be compatible with schema
void network_tester_cuda::actual_set_data(network_data_smart_ptr data)
{
host_net_data = data;
update_data();
}
void network_tester_cuda::update_data()
{
net_data.clear();
if (tester_list.empty() || (host_net_data == 0))
return;
for(int i = 0; i < std::min(host_net_data->size(), tester_list.size()); ++i)
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = tester_list[i]->get_data(host_net_data->at(i));
net_data.push_back(device_data);
}
}
// The method is called when client calls set_input_configuration_specific and the convolution specific configuration is modified.
// The layer_config_list is guaranteed to be compatible with schema
void network_tester_cuda::layer_config_list_modified()
{
tester_list.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf)
{
tester_list.push_back(
(*it)->create_tester(
*it_conf,
*(it_conf + 1)));
}
update_data();
}
void network_tester_cuda::update_buffers_configuration_testing(buffer_cuda_size_configuration& buffer_configuration) const
{
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = schema_data.begin(); it != schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
}
output_neuron_value_set_smart_ptr network_tester_cuda::actual_run(unsupervised_data_reader& reader)
{
reader.reset();
layer_configuration_specific input_configuration = reader.get_input_configuration();
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
unsigned int entry_count = reader.get_entry_count();
neuron_data_type::input_type type_code = reader.get_input_type();
size_t input_neuron_elem_size = reader.get_input_neuron_elem_size();
output_neuron_value_set_smart_ptr predicted_output_neuron_value_set(new output_neuron_value_set(entry_count, output_neuron_count));
buffer_cuda_size_configuration buffers_config;
update_buffers_configuration_testing(buffers_config);
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
unsigned int max_entry_count = std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), entry_count);
cuda_linear_buffer_device_smart_ptr input_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
};
cuda_linear_buffer_device_smart_ptr output_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
};
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size));
unsigned char * input = *input_host_buf;
cuda_linear_buffer_host_smart_ptr output_predicted_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float)));
float * output_predicted = *output_predicted_host_buf;
unsigned int current_data_slot = 0;
unsigned int current_command_slot = 1;
unsigned int entries_available_for_copy_in_count = entry_count;
unsigned int entries_available_for_processing_count = 0;
unsigned int entries_available_for_copy_out_count = 0;
unsigned int entries_processed_count = 0;
cuda_event output_copied_event;
cuda_event data_processed_event;
cuda_event input_copied_event;
int power_of_two_spinup = 3;
while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0) || (entries_available_for_copy_out_count > 0))
{
unsupervised_data_reader_async_helper async_reader;
if (entries_available_for_copy_in_count > 0)
{
unsigned int entries_to_read_count = std::min<unsigned int>(std::max(max_entry_count >> power_of_two_spinup, 1U), entries_available_for_copy_in_count);
async_reader.fun = unsupervised_data_reader_functor(
entries_to_read_count,
&reader,
input,
*(input_buf[current_data_slot]),
*data_stream);
async_reader.start();
power_of_two_spinup = (power_of_two_spinup > 0) ? (power_of_two_spinup - 1) : 0;
}
if (entries_available_for_processing_count > 0)
{
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf[current_command_slot],
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(
*input_converted_buf,
*input_buf[current_command_slot],
input_neuron_count * entries_available_for_processing_count * sizeof(float),
cudaMemcpyDeviceToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_run cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
entries_available_for_processing_count);
}
}
// Copy output
{
cuda_safe_call(cudaMemcpyAsync(
*output_buf[current_command_slot],
*output_buffer,
output_neuron_count * entries_available_for_processing_count * sizeof(float),
cudaMemcpyDeviceToDevice,
*command_stream));
}
if (cuda_config->is_flush_required())
{
cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream));
cudaEventQuery(data_processed_event);
}
}
unsigned int entries_read_count = 0;
if (entries_available_for_copy_in_count > 0)
entries_read_count = async_reader.wait();
if (entries_available_for_copy_out_count > 0)
{
cuda_safe_call(cudaMemcpyAsync(
output_predicted,
*(output_buf[current_data_slot]),
entries_available_for_copy_out_count * output_neuron_count * sizeof(float),
cudaMemcpyDeviceToHost,
*data_stream));
cuda_safe_call(cudaStreamSynchronize(*data_stream));
const float * predicted_it = output_predicted;
for(std::vector<std::vector<float> >::iterator it = predicted_output_neuron_value_set->neuron_value_list.begin() + entries_processed_count;
it != predicted_output_neuron_value_set->neuron_value_list.begin() + entries_processed_count + entries_available_for_copy_out_count;
it++, predicted_it += output_neuron_count)
{
std::vector<float>& value_list = *it;
std::copy(predicted_it, predicted_it + output_neuron_count, value_list.begin());
}
entries_processed_count += entries_available_for_copy_out_count;
}
cuda_safe_call(cudaStreamSynchronize(*data_stream));
cuda_safe_call(cudaStreamSynchronize(*command_stream));
entries_available_for_copy_out_count = entries_available_for_processing_count;
entries_available_for_processing_count = entries_read_count;
entries_available_for_copy_in_count -= entries_read_count;
current_data_slot = 1 - current_data_slot;
current_command_slot = 1 - current_command_slot;
}
return predicted_output_neuron_value_set;
}
std::vector<layer_configuration_specific_snapshot_smart_ptr> network_tester_cuda::actual_get_snapshot(
const void * input,
neuron_data_type::input_type type_code)
{
std::vector<layer_configuration_specific_snapshot_smart_ptr> res;
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
cuda_linear_buffer_device_smart_ptr input_buf(new cuda_linear_buffer_device(input_neuron_count * input_neuron_elem_size));
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
std::vector<cuda_linear_buffer_device_smart_ptr> output_buffer_list;
output_buffer_list.push_back(output_buffer);
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(1);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
output_buffer_list.push_back(output_buffer);
}
// Copy input
{
cuda_safe_call(cudaMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
}
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(*input_converted_buf, *input_buf, input_neuron_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream));
}
else throw neural_network_exception((boost::format("actual_get_snapshot cannot handle input neurons of type %1%") % type_code).str());
{
layer_configuration_specific_snapshot_smart_ptr input_elem(new layer_configuration_specific_snapshot(layer_config_list[0]));
res.push_back(input_elem);
cuda_safe_call(cudaMemcpyAsync(
&(*(input_elem->data.begin())),
*output_buffer_list[0],
input_elem->data.size() * sizeof(float),
cudaMemcpyDeviceToHost,
*command_stream));
}
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
int layer_id = 0;
int output_buffer_id = 1;
for(
std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin();
it != tester_list.end();
++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it, ++layer_id, ++output_buffer_id)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
1);
layer_configuration_specific_snapshot_smart_ptr new_elem(new layer_configuration_specific_snapshot(layer_config_list[layer_id + 1]));
res.push_back(new_elem);
cuda_safe_call(cudaMemcpyAsync(
&(*(new_elem->data.begin())),
*output_buffer_list[output_buffer_id],
new_elem->data.size() * sizeof(float),
cudaMemcpyDeviceToHost,
*command_stream));
}
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
return res;
}
layer_configuration_specific_snapshot_smart_ptr network_tester_cuda::actual_run(
const void * input,
neuron_data_type::input_type type_code)
{
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot(layer_config_list[layer_config_list.size() - 1]));
unsigned int input_neuron_count = layer_config_list.begin()->get_neuron_count();
unsigned int input_neuron_count_per_feature_map = layer_config_list.begin()->get_neuron_count_per_feature_map();
unsigned int output_neuron_count = (layer_config_list.end() - 1)->get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
cuda_linear_buffer_device_smart_ptr input_buf(new cuda_linear_buffer_device(input_neuron_count * input_neuron_elem_size));
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(1);
input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
// Copy input
{
cuda_safe_call(cudaMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
}
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(*input_converted_buf, *input_buf, input_neuron_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream));
}
else throw neural_network_exception((boost::format("actual_run cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = schema_data.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++net_data_it, ++schema_data_it)
{
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
1);
}
}
// Copy output
{
cuda_safe_call(cudaMemcpyAsync(
&(*(res->data.begin())),
*output_buffer,
output_neuron_count * sizeof(float),
cudaMemcpyDeviceToHost,
*command_stream));
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
return res;
}
}
}
|
035d54344c3ac52dce2b2ca40ac78837f07600a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_y1 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = y1(x[id]);
}
}
|
035d54344c3ac52dce2b2ca40ac78837f07600a4.cu
|
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_y1 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = y1(x[id]);
}
}
|
d890d3f5861080d5fbc572793cd911d805de2460.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
//thrust::device_vector<int> dev_idata(idata, idata + n), dev_odata(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
//thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
thrust::exclusive_scan(idata, idata + n, odata);
timer().endGpuTimer();
//thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
d890d3f5861080d5fbc572793cd911d805de2460.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
//thrust::device_vector<int> dev_idata(idata, idata + n), dev_odata(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
//thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
thrust::exclusive_scan(idata, idata + n, odata);
timer().endGpuTimer();
//thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
4c0dd0409977d4c3ba883d93e0758d58c338d831.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "global.h"
#include "consts.h"
#include "film.h"
#include "camera.h"
#include "thread.h"
#include "vector.h"
#include "_lightcuts.h"
#include "obj_object.h"
#include "gpu_util.h"
#include "gpu/geometry_gpu.h"
#include "gpu/_lightcuts_gpu.h"
#include "IL/ilut.h"
ILuint nCurrImg = 1;
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
///
Ray *_hostRays = NULL;
Ray_gpu *_deviceRays = NULL;
//
// Engine..
//
Scene scene;
volatile bool bIsRunning = false;
void global_init()
{
// DevIL init
//
ilInit();
ilutRenderer(ILUT_OPENGL);
ilutEnable(ILUT_OPENGL_CONV);
//ilOriginFunc(IL_ORIGIN_UPPER_LEFT);
//ilEnable(IL_ORIGIN_SET);
ilGenImages(1, &nCurrImg);
ilBindImage(nCurrImg);
//
srand(clock());
}
void global_destroy()
{
if(bLCEnabled)
{
deleteLightTree();
if(bGPUEnabled)
{
releaseLightcutsParam();
}
}
gpu_destroy();
end_thread();
// DevIL finalization
ilDeleteImages(1, &nCurrImg);
}
|
4c0dd0409977d4c3ba883d93e0758d58c338d831.cu
|
#include "global.h"
#include "consts.h"
#include "film.h"
#include "camera.h"
#include "thread.h"
#include "vector.h"
#include "_lightcuts.h"
#include "obj_object.h"
#include "gpu_util.h"
#include "gpu/geometry_gpu.h"
#include "gpu/_lightcuts_gpu.h"
#include "IL/ilut.h"
ILuint nCurrImg = 1;
#include <time.h>
#include <stdlib.h>
#include <cuda_runtime.h>
///
Ray *_hostRays = NULL;
Ray_gpu *_deviceRays = NULL;
//
// Engine..
//
Scene scene;
volatile bool bIsRunning = false;
void global_init()
{
// DevIL init
//
ilInit();
ilutRenderer(ILUT_OPENGL);
ilutEnable(ILUT_OPENGL_CONV);
//ilOriginFunc(IL_ORIGIN_UPPER_LEFT);
//ilEnable(IL_ORIGIN_SET);
ilGenImages(1, &nCurrImg);
ilBindImage(nCurrImg);
//
srand(clock());
}
void global_destroy()
{
if(bLCEnabled)
{
deleteLightTree();
if(bGPUEnabled)
{
releaseLightcutsParam();
}
}
gpu_destroy();
end_thread();
// DevIL finalization
ilDeleteImages(1, &nCurrImg);
}
|
bd2ac1abe4c447fcdf5f2b8086bc147eefe9d083.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SpMat.h"
#include "utils.h"
#include "stdio.h"
//method for calculating number of non-zero elements in a row (DEPRICATED)
__global__ void nnz_in_row(const double* data_partial, const int n, const int cols, int* nnz) {
int i = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(n-1 < i)
return;
if(abs(data_partial[i]) > ZERO)
atomicAdd(nnz, 1);
}
//method for computing the cummulative sum of non-zero elements along the rows (DEPRICATED)
__global__ void cum_sum(const int * rowNnz, const int rows, int * cumsum) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(rows < idx)
return;
if(idx == rows-1)
cumsum[0] = 0;
else
cumsum[idx+1] = rowNnz[idx];
for(int stride = 1; stride < rows; stride*=2) {
__syncthreads();
if(stride < idx)
cumsum[idx] = cumsum[idx] + cumsum[idx-stride];
}
__syncthreads();
if(idx == rows)
cumsum[idx] = cumsum[idx] + rowNnz[idx-1];
}
//method for extracting column index and according value from each row (DEPRICATED)
__global__ void get_ind_val(const double* data_partial, const int n, const int cols, int * colInd, double * val, int& nnz) {
int i = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(n-1 < i)
return;
if(!(abs(data_partial[i]) > ZERO))
return;
int my_ind;
my_ind = atomicSub(&nnz, 1) - 1;
colInd[my_ind] = i;
val[my_ind] = data_partial[i];
}
// This constructor is DEPRICATED
// it basically loads the data batchwise to the GPU and computes the CSR representation of the Matrix
SpMat::SpMat(int rows, int cols, double * data) : rows(rows), cols(cols) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(dimBlock.x*dimBlock.y / (cols*BULK_SIZE) + 1);
double * data_partial;
int *RowNonzero;
int *nnz_elem;
hipMalloc(&nnz_elem, sizeof(int));
hipMalloc(&data_partial, cols * BULK_SIZE * sizeof(double));
hipMalloc(&RowNonzero, rows*sizeof(int));
int i_it_num = rows % BULK_SIZE == 0 ? rows/BULK_SIZE : rows/BULK_SIZE + 1;
int j_it_num, elem_num;
for(int i = 0; i < i_it_num; i++) {
elem_num = (BULK_SIZE*cols) * (i+1) < rows*cols ? (BULK_SIZE*cols) : rows*cols - (BULK_SIZE*cols)*(i);
hipMemcpy(data_partial, data + (BULK_SIZE*cols) * i, elem_num * sizeof(double), hipMemcpyDefault);
j_it_num = BULK_SIZE*(i+1) < rows ? BULK_SIZE : rows - BULK_SIZE*i;
for (int j = 0; j < j_it_num; j++)
hipLaunchKernelGGL(( nnz_in_row), dim3(dimGrid), dim3(dimBlock), 0, 0, data_partial + cols*j, cols, cols, (RowNonzero + j) + BULK_SIZE*i);
}
hipMalloc(&rowPtr, (rows + 1) * sizeof(int));
hipLaunchKernelGGL(( cum_sum), dim3(dimGrid), dim3(dimBlock), 0, 0, RowNonzero, rows, rowPtr);
hipMemcpy(&nnz,rowPtr + rows,sizeof(int),hipMemcpyDefault);
hipMalloc(&colInd, (nnz) * sizeof(int));
hipMalloc(&val, (nnz) * sizeof(double));
printf("Matrix has %d Non-Zero Elements\n",nnz);
int offset, row_num;
for(int i = 0; i < i_it_num; i++) {
elem_num = (BULK_SIZE*cols) * (i+1) < rows*cols ? (BULK_SIZE*cols) : rows*cols - (BULK_SIZE*cols)*(i);
hipMemcpy(data_partial, data + (BULK_SIZE*cols) * i, elem_num * sizeof(double), hipMemcpyDefault);
j_it_num = BULK_SIZE*(i+1) < rows ? BULK_SIZE : rows - BULK_SIZE*i;
for (int j = 0; j < j_it_num; j++) {
row_num = j + BULK_SIZE*i;
hipMemcpy(&offset, rowPtr + row_num, sizeof(int), hipMemcpyDefault);
hipLaunchKernelGGL(( get_ind_val), dim3(dimGrid), dim3(dimBlock), 0, 0, data_partial + cols*j, cols, cols, colInd + offset, val + offset, RowNonzero[row_num]);
}
}
CUDAFREE(RowNonzero);
CUDAFREE(data_partial);
}
//kernel for calculating the dot product between x and A and storing the results in y (DEPRICATED)
__global__ void dot_kernel( const int * rowPtr, const int * colInd, const double* val,
const double* x, double* y, int row_num, int col_num, double * y_nnz){
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(rowPtr[row_num] - 1 < idx)
return;
// each thread has to determine its row number (which was a bad idea)
int row;
for (int i = 0; i < row_num; i++)
if((idx >= rowPtr[i] && idx < rowPtr[i+1]) && rowPtr[i] != rowPtr[i+1]) {
row = i;
}
// each thread has to calculate its value
y_nnz[idx] = x[colInd[idx]]*val[idx];
if(idx != rowPtr[row])
return;
//first thread of a row sums all entrys in the row and stores result in the according entry in y
int n = rowPtr[row+1] - rowPtr[row];
for(int i = 0; i < n; i++)
y[row] += y_nnz[idx+i];
}
// method that calculates dot product between this matrix and GPUVector x and stores results to GPUVector y
void SpMat::dot(const GPUVector & x,GPUVector & y ) {
assert(x.n == cols);
size_t buffer_size;
double h_one = 1.0;
const double h_zero = 0.0;
// calculate buffer size
cusparseStat = cusparseCsrmvEx_bufferSize(cusparseH,
CUSPARSE_ALG_MERGE_PATH,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
rows,
cols,
nnz,
&h_one, HIP_R_64F,
descrA,
val, HIP_R_64F,
rowPtr,
colInd,
x.elements, HIP_R_64F,
&h_zero, HIP_R_64F,
y.elements, HIP_R_64F,
HIP_R_64F,
&buffer_size);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
// allocate buffer for calculation
void* buffer;
hipMalloc ((void**)&buffer, buffer_size);
cusparseStat = cusparseCsrmvEx(cusparseH,
CUSPARSE_ALG_MERGE_PATH,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
rows,
cols,
nnz,
&h_one, HIP_R_64F,
descrA,
val, HIP_R_64F,
rowPtr,
colInd,
x.elements, HIP_R_64F,
&h_zero, HIP_R_64F,
y.elements, HIP_R_64F,
HIP_R_64F,
buffer);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
CUDAFREE(buffer);
}
// star operator for calling the dot product
GPUVector SpMat::operator*(const GPUVector &b) {
GPUVector y(b.handle, rows);
dot(b,y);
return y;
}
// kernel for calculating the number of nnz elements in each column of matrix A
__global__ void transpose_row_nnz(const int * colInd, int cols, int nnz, int* colNnz, int* cumsum) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(nnz - 1 < idx)
return;
// increment number of ellements in your column
atomicAdd(colNnz + colInd[idx],1);
if(cols + 1 < idx)
return;
if(idx == cols-1)
cumsum[0] = 0;
else
cumsum[idx+1] = colNnz[idx];
// use predicate sum approach from lecture to compute cummulative sum
for(int stride = 1; stride < cols; stride*=2) {
__syncthreads();
if(stride < idx)
cumsum[idx] = cumsum[idx] + cumsum[idx-stride];
}
__syncthreads();
if(idx == cols)
cumsum[idx] = cumsum[idx] + colNnz[idx-1];
}
// kernel for transposing the entries in matrix A
__global__ void transpose_kernel( const int* rowPtr, const int * colInd, const double* val,
int * rowInd, double* trans_val,
int row_num, int col_num, int nnz, int* colNnz, const int* colPtr) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(nnz < idx)
return;
// each thread has to find its row
int row;
for (int i = 0; i < row_num; i++)
if((idx >= rowPtr[i] && idx < rowPtr[i+1]) && rowPtr[i] != rowPtr[i+1]) {
row = i;
}
int my_ind = atomicSub(colNnz + colInd[idx], 1) - 1;
rowInd[colPtr[colInd[idx]] + my_ind] = row;
trans_val[colPtr[colInd[idx]] + my_ind] = val[idx];
}
SpMat SpMat::transpose() {
size_t buffer_size = 0;
// preallocate all required pointers
int* cscColPtr = NULL;
int* cscRowInd = NULL;
double* cscVal = NULL;
hipMalloc(&cscColPtr, (cols+1)*sizeof(int));
hipMalloc(&cscRowInd, nnz*sizeof(int));
hipMalloc(&cscVal, nnz*sizeof(double));
// calculate the required space
cusparseStat = hipsparseCsr2cscEx2_bufferSize(cusparseH,
rows,
cols,
nnz,
val,
rowPtr,
colInd,
cscVal,
cscColPtr,
cscRowInd,
HIP_R_64F,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG1,
&buffer_size);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
// load the buffer
void* buffer = NULL;
hipMalloc ((void**)&buffer, buffer_size);
// execute transposition in the buffer
cusparseStat = hipsparseCsr2cscEx2(cusparseH,
rows,
cols,
nnz,
val,
rowPtr,
colInd,
cscVal,
cscColPtr,
cscRowInd,
HIP_R_64F,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG1,
buffer);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
//create new matrix with the pointers and return it
SpMat mat(cscColPtr,cscRowInd,cscVal, cols, rows, nnz, cusparseH);
CUDAFREE(buffer);
return mat;
}
// destructor for constructed arrays
SpMat::~SpMat() {
CUDAFREE(rowPtr);
CUDAFREE(colInd);
CUDAFREE(val);
}
|
bd2ac1abe4c447fcdf5f2b8086bc147eefe9d083.cu
|
#include "SpMat.h"
#include "utils.h"
#include "stdio.h"
//method for calculating number of non-zero elements in a row (DEPRICATED)
__global__ void nnz_in_row(const double* data_partial, const int n, const int cols, int* nnz) {
int i = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(n-1 < i)
return;
if(abs(data_partial[i]) > ZERO)
atomicAdd(nnz, 1);
}
//method for computing the cummulative sum of non-zero elements along the rows (DEPRICATED)
__global__ void cum_sum(const int * rowNnz, const int rows, int * cumsum) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(rows < idx)
return;
if(idx == rows-1)
cumsum[0] = 0;
else
cumsum[idx+1] = rowNnz[idx];
for(int stride = 1; stride < rows; stride*=2) {
__syncthreads();
if(stride < idx)
cumsum[idx] = cumsum[idx] + cumsum[idx-stride];
}
__syncthreads();
if(idx == rows)
cumsum[idx] = cumsum[idx] + rowNnz[idx-1];
}
//method for extracting column index and according value from each row (DEPRICATED)
__global__ void get_ind_val(const double* data_partial, const int n, const int cols, int * colInd, double * val, int& nnz) {
int i = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(n-1 < i)
return;
if(!(abs(data_partial[i]) > ZERO))
return;
int my_ind;
my_ind = atomicSub(&nnz, 1) - 1;
colInd[my_ind] = i;
val[my_ind] = data_partial[i];
}
// This constructor is DEPRICATED
// it basically loads the data batchwise to the GPU and computes the CSR representation of the Matrix
SpMat::SpMat(int rows, int cols, double * data) : rows(rows), cols(cols) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(dimBlock.x*dimBlock.y / (cols*BULK_SIZE) + 1);
double * data_partial;
int *RowNonzero;
int *nnz_elem;
cudaMalloc(&nnz_elem, sizeof(int));
cudaMalloc(&data_partial, cols * BULK_SIZE * sizeof(double));
cudaMalloc(&RowNonzero, rows*sizeof(int));
int i_it_num = rows % BULK_SIZE == 0 ? rows/BULK_SIZE : rows/BULK_SIZE + 1;
int j_it_num, elem_num;
for(int i = 0; i < i_it_num; i++) {
elem_num = (BULK_SIZE*cols) * (i+1) < rows*cols ? (BULK_SIZE*cols) : rows*cols - (BULK_SIZE*cols)*(i);
cudaMemcpy(data_partial, data + (BULK_SIZE*cols) * i, elem_num * sizeof(double), cudaMemcpyDefault);
j_it_num = BULK_SIZE*(i+1) < rows ? BULK_SIZE : rows - BULK_SIZE*i;
for (int j = 0; j < j_it_num; j++)
nnz_in_row<<<dimGrid, dimBlock>>>(data_partial + cols*j, cols, cols, (RowNonzero + j) + BULK_SIZE*i);
}
cudaMalloc(&rowPtr, (rows + 1) * sizeof(int));
cum_sum<<<dimGrid, dimBlock>>>(RowNonzero, rows, rowPtr);
cudaMemcpy(&nnz,rowPtr + rows,sizeof(int),cudaMemcpyDefault);
cudaMalloc(&colInd, (nnz) * sizeof(int));
cudaMalloc(&val, (nnz) * sizeof(double));
printf("Matrix has %d Non-Zero Elements\n",nnz);
int offset, row_num;
for(int i = 0; i < i_it_num; i++) {
elem_num = (BULK_SIZE*cols) * (i+1) < rows*cols ? (BULK_SIZE*cols) : rows*cols - (BULK_SIZE*cols)*(i);
cudaMemcpy(data_partial, data + (BULK_SIZE*cols) * i, elem_num * sizeof(double), cudaMemcpyDefault);
j_it_num = BULK_SIZE*(i+1) < rows ? BULK_SIZE : rows - BULK_SIZE*i;
for (int j = 0; j < j_it_num; j++) {
row_num = j + BULK_SIZE*i;
cudaMemcpy(&offset, rowPtr + row_num, sizeof(int), cudaMemcpyDefault);
get_ind_val<<<dimGrid, dimBlock>>>(data_partial + cols*j, cols, cols, colInd + offset, val + offset, RowNonzero[row_num]);
}
}
CUDAFREE(RowNonzero);
CUDAFREE(data_partial);
}
//kernel for calculating the dot product between x and A and storing the results in y (DEPRICATED)
__global__ void dot_kernel( const int * rowPtr, const int * colInd, const double* val,
const double* x, double* y, int row_num, int col_num, double * y_nnz){
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(rowPtr[row_num] - 1 < idx)
return;
// each thread has to determine its row number (which was a bad idea)
int row;
for (int i = 0; i < row_num; i++)
if((idx >= rowPtr[i] && idx < rowPtr[i+1]) && rowPtr[i] != rowPtr[i+1]) {
row = i;
}
// each thread has to calculate its value
y_nnz[idx] = x[colInd[idx]]*val[idx];
if(idx != rowPtr[row])
return;
//first thread of a row sums all entrys in the row and stores result in the according entry in y
int n = rowPtr[row+1] - rowPtr[row];
for(int i = 0; i < n; i++)
y[row] += y_nnz[idx+i];
}
// method that calculates dot product between this matrix and GPUVector x and stores results to GPUVector y
void SpMat::dot(const GPUVector & x,GPUVector & y ) {
assert(x.n == cols);
size_t buffer_size;
double h_one = 1.0;
const double h_zero = 0.0;
// calculate buffer size
cusparseStat = cusparseCsrmvEx_bufferSize(cusparseH,
CUSPARSE_ALG_MERGE_PATH,
CUSPARSE_OPERATION_NON_TRANSPOSE,
rows,
cols,
nnz,
&h_one, CUDA_R_64F,
descrA,
val, CUDA_R_64F,
rowPtr,
colInd,
x.elements, CUDA_R_64F,
&h_zero, CUDA_R_64F,
y.elements, CUDA_R_64F,
CUDA_R_64F,
&buffer_size);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
// allocate buffer for calculation
void* buffer;
cudaMalloc ((void**)&buffer, buffer_size);
cusparseStat = cusparseCsrmvEx(cusparseH,
CUSPARSE_ALG_MERGE_PATH,
CUSPARSE_OPERATION_NON_TRANSPOSE,
rows,
cols,
nnz,
&h_one, CUDA_R_64F,
descrA,
val, CUDA_R_64F,
rowPtr,
colInd,
x.elements, CUDA_R_64F,
&h_zero, CUDA_R_64F,
y.elements, CUDA_R_64F,
CUDA_R_64F,
buffer);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
CUDAFREE(buffer);
}
// star operator for calling the dot product
GPUVector SpMat::operator*(const GPUVector &b) {
GPUVector y(b.handle, rows);
dot(b,y);
return y;
}
// kernel for calculating the number of nnz elements in each column of matrix A
__global__ void transpose_row_nnz(const int * colInd, int cols, int nnz, int* colNnz, int* cumsum) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(nnz - 1 < idx)
return;
// increment number of ellements in your column
atomicAdd(colNnz + colInd[idx],1);
if(cols + 1 < idx)
return;
if(idx == cols-1)
cumsum[0] = 0;
else
cumsum[idx+1] = colNnz[idx];
// use predicate sum approach from lecture to compute cummulative sum
for(int stride = 1; stride < cols; stride*=2) {
__syncthreads();
if(stride < idx)
cumsum[idx] = cumsum[idx] + cumsum[idx-stride];
}
__syncthreads();
if(idx == cols)
cumsum[idx] = cumsum[idx] + colNnz[idx-1];
}
// kernel for transposing the entries in matrix A
__global__ void transpose_kernel( const int* rowPtr, const int * colInd, const double* val,
int * rowInd, double* trans_val,
int row_num, int col_num, int nnz, int* colNnz, const int* colPtr) {
int idx = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if(nnz < idx)
return;
// each thread has to find its row
int row;
for (int i = 0; i < row_num; i++)
if((idx >= rowPtr[i] && idx < rowPtr[i+1]) && rowPtr[i] != rowPtr[i+1]) {
row = i;
}
int my_ind = atomicSub(colNnz + colInd[idx], 1) - 1;
rowInd[colPtr[colInd[idx]] + my_ind] = row;
trans_val[colPtr[colInd[idx]] + my_ind] = val[idx];
}
SpMat SpMat::transpose() {
size_t buffer_size = 0;
// preallocate all required pointers
int* cscColPtr = NULL;
int* cscRowInd = NULL;
double* cscVal = NULL;
cudaMalloc(&cscColPtr, (cols+1)*sizeof(int));
cudaMalloc(&cscRowInd, nnz*sizeof(int));
cudaMalloc(&cscVal, nnz*sizeof(double));
// calculate the required space
cusparseStat = cusparseCsr2cscEx2_bufferSize(cusparseH,
rows,
cols,
nnz,
val,
rowPtr,
colInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_64F,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
&buffer_size);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
// load the buffer
void* buffer = NULL;
cudaMalloc ((void**)&buffer, buffer_size);
// execute transposition in the buffer
cusparseStat = cusparseCsr2cscEx2(cusparseH,
rows,
cols,
nnz,
val,
rowPtr,
colInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_64F,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
buffer);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
//create new matrix with the pointers and return it
SpMat mat(cscColPtr,cscRowInd,cscVal, cols, rows, nnz, cusparseH);
CUDAFREE(buffer);
return mat;
}
// destructor for constructed arrays
SpMat::~SpMat() {
CUDAFREE(rowPtr);
CUDAFREE(colInd);
CUDAFREE(val);
}
|
7a6a07b0e6828562a9420cd5e6052348cbd6ea51.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <hip/hip_runtime_api.h>
#include <mpi.h>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include "../src/communicator.h"
#include "../src/distributed_join.cuh"
#include "../src/error.cuh"
#include "../src/generate_table.cuh"
#include "../src/registered_memory_resource.hpp"
#include "../src/topology.cuh"
static std::string key_type = "int64_t";
static std::string payload_type = "int64_t";
static cudf::size_type BUILD_TABLE_NROWS_EACH_RANK = 100'000'000;
static cudf::size_type PROBE_TABLE_NROWS_EACH_RANK = 100'000'000;
static double SELECTIVITY = 0.3;
static bool IS_BUILD_TABLE_KEY_UNIQUE = true;
static int OVER_DECOMPOSITION_FACTOR = 1;
static std::string COMMUNICATOR_NAME = "UCX";
static std::string REGISTRATION_METHOD = "preregistered";
static int64_t COMMUNICATOR_BUFFER_SIZE = 1'600'000'000LL;
static bool COMPRESSION = false;
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--key-type")) { key_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--payload-type")) { payload_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--build-table-nrows")) {
BUILD_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--probe-table-nrows")) {
PROBE_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--selectivity")) { SELECTIVITY = atof(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--duplicate-build-keys")) { IS_BUILD_TABLE_KEY_UNIQUE = false; }
if (!strcmp(argv[iarg], "--over-decomposition-factor")) {
OVER_DECOMPOSITION_FACTOR = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--communicator")) { COMMUNICATOR_NAME = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--compression")) { COMPRESSION = true; }
if (!strcmp(argv[iarg], "--registration-method")) { REGISTRATION_METHOD = argv[iarg + 1]; }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Key type: " << key_type << std::endl;
std::cout << "Payload type: " << payload_type << std::endl;
std::cout << "Number of rows in the build table: "
<< static_cast<uint64_t>(BUILD_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Number of rows in the probe table: "
<< static_cast<uint64_t>(PROBE_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Selectivity: " << SELECTIVITY << std::endl;
std::cout << "Keys in build table are unique: " << IS_BUILD_TABLE_KEY_UNIQUE << std::endl;
std::cout << "Over-decomposition factor: " << OVER_DECOMPOSITION_FACTOR << std::endl;
std::cout << "Communicator: " << COMMUNICATOR_NAME << std::endl;
if (COMMUNICATOR_NAME == "UCX")
std::cout << "Registration method: " << REGISTRATION_METHOD << std::endl;
std::cout << "Compression: " << COMPRESSION << std::endl;
std::cout << "================================" << std::endl;
}
int main(int argc, char *argv[])
{
/* Initialize topology */
setup_topology(argc, argv);
/* Parse command line arguments */
parse_command_line_arguments(argc, argv);
report_configuration();
cudf::size_type RAND_MAX_VAL =
::max(BUILD_TABLE_NROWS_EACH_RANK, PROBE_TABLE_NROWS_EACH_RANK) * 2;
/* Initialize communicator and memory pool */
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
Communicator *communicator{nullptr};
// `registered_mr` holds reference to the registered memory resource, and *nullptr* if registered
// memory resource is not used.
registered_memory_resource *registered_mr{nullptr};
// pool_mr need to live on heap because for registered memory resources, the memory pool needs
// to deallocated before UCX cleanup, which can be achieved by calling the destructor of
// `poll_mr`.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(communicator,
registered_mr,
pool_mr,
COMMUNICATOR_NAME,
REGISTRATION_METHOD,
COMMUNICATOR_BUFFER_SIZE);
/* Warmup nvcomp */
if (COMPRESSION) { warmup_nvcomp(); }
/* Generate build table and probe table on each rank */
std::unique_ptr<cudf::table> left;
std::unique_ptr<cudf::table> right;
#define generate_tables(KEY_T, PAYLOAD_T) \
{ \
std::tie(left, right) = \
generate_tables_distributed<KEY_T, PAYLOAD_T>(BUILD_TABLE_NROWS_EACH_RANK, \
PROBE_TABLE_NROWS_EACH_RANK, \
SELECTIVITY, \
RAND_MAX_VAL, \
IS_BUILD_TABLE_KEY_UNIQUE, \
communicator); \
}
#define generate_tables_key_type(KEY_T) \
{ \
if (payload_type == "int64_t") { \
generate_tables(KEY_T, int64_t) \
} else if (payload_type == "int32_t") { \
generate_tables(KEY_T, int32_t) \
} else { \
throw std::runtime_error("Unknown payload type"); \
} \
}
if (key_type == "int64_t") {
generate_tables_key_type(int64_t)
} else if (key_type == "int32_t") {
generate_tables_key_type(int32_t)
} else {
throw std::runtime_error("Unknown key type");
}
/* Distributed join */
CUDA_RT_CALL(hipDeviceSynchronize());
MPI_Barrier(MPI_COMM_WORLD);
hipProfilerStart();
double start = MPI_Wtime();
std::unique_ptr<cudf::table> join_result =
distributed_inner_join(left->view(),
right->view(),
{0},
{0},
{std::pair<cudf::size_type, cudf::size_type>(0, 0)},
communicator,
OVER_DECOMPOSITION_FACTOR,
COMPRESSION);
MPI_Barrier(MPI_COMM_WORLD);
double stop = MPI_Wtime();
hipProfilerStop();
if (mpi_rank == 0) { std::cout << "Elasped time (s) " << stop - start << std::endl; }
/* Cleanup */
left.reset();
right.reset();
join_result.reset();
CUDA_RT_CALL(hipDeviceSynchronize());
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, COMMUNICATOR_NAME, REGISTRATION_METHOD);
MPI_CALL(MPI_Finalize());
return 0;
}
|
7a6a07b0e6828562a9420cd5e6052348cbd6ea51.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <cuda_profiler_api.h>
#include <mpi.h>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include "../src/communicator.h"
#include "../src/distributed_join.cuh"
#include "../src/error.cuh"
#include "../src/generate_table.cuh"
#include "../src/registered_memory_resource.hpp"
#include "../src/topology.cuh"
static std::string key_type = "int64_t";
static std::string payload_type = "int64_t";
static cudf::size_type BUILD_TABLE_NROWS_EACH_RANK = 100'000'000;
static cudf::size_type PROBE_TABLE_NROWS_EACH_RANK = 100'000'000;
static double SELECTIVITY = 0.3;
static bool IS_BUILD_TABLE_KEY_UNIQUE = true;
static int OVER_DECOMPOSITION_FACTOR = 1;
static std::string COMMUNICATOR_NAME = "UCX";
static std::string REGISTRATION_METHOD = "preregistered";
static int64_t COMMUNICATOR_BUFFER_SIZE = 1'600'000'000LL;
static bool COMPRESSION = false;
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--key-type")) { key_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--payload-type")) { payload_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--build-table-nrows")) {
BUILD_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--probe-table-nrows")) {
PROBE_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--selectivity")) { SELECTIVITY = atof(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--duplicate-build-keys")) { IS_BUILD_TABLE_KEY_UNIQUE = false; }
if (!strcmp(argv[iarg], "--over-decomposition-factor")) {
OVER_DECOMPOSITION_FACTOR = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--communicator")) { COMMUNICATOR_NAME = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--compression")) { COMPRESSION = true; }
if (!strcmp(argv[iarg], "--registration-method")) { REGISTRATION_METHOD = argv[iarg + 1]; }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Key type: " << key_type << std::endl;
std::cout << "Payload type: " << payload_type << std::endl;
std::cout << "Number of rows in the build table: "
<< static_cast<uint64_t>(BUILD_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Number of rows in the probe table: "
<< static_cast<uint64_t>(PROBE_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Selectivity: " << SELECTIVITY << std::endl;
std::cout << "Keys in build table are unique: " << IS_BUILD_TABLE_KEY_UNIQUE << std::endl;
std::cout << "Over-decomposition factor: " << OVER_DECOMPOSITION_FACTOR << std::endl;
std::cout << "Communicator: " << COMMUNICATOR_NAME << std::endl;
if (COMMUNICATOR_NAME == "UCX")
std::cout << "Registration method: " << REGISTRATION_METHOD << std::endl;
std::cout << "Compression: " << COMPRESSION << std::endl;
std::cout << "================================" << std::endl;
}
int main(int argc, char *argv[])
{
/* Initialize topology */
setup_topology(argc, argv);
/* Parse command line arguments */
parse_command_line_arguments(argc, argv);
report_configuration();
cudf::size_type RAND_MAX_VAL =
std::max(BUILD_TABLE_NROWS_EACH_RANK, PROBE_TABLE_NROWS_EACH_RANK) * 2;
/* Initialize communicator and memory pool */
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
Communicator *communicator{nullptr};
// `registered_mr` holds reference to the registered memory resource, and *nullptr* if registered
// memory resource is not used.
registered_memory_resource *registered_mr{nullptr};
// pool_mr need to live on heap because for registered memory resources, the memory pool needs
// to deallocated before UCX cleanup, which can be achieved by calling the destructor of
// `poll_mr`.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(communicator,
registered_mr,
pool_mr,
COMMUNICATOR_NAME,
REGISTRATION_METHOD,
COMMUNICATOR_BUFFER_SIZE);
/* Warmup nvcomp */
if (COMPRESSION) { warmup_nvcomp(); }
/* Generate build table and probe table on each rank */
std::unique_ptr<cudf::table> left;
std::unique_ptr<cudf::table> right;
#define generate_tables(KEY_T, PAYLOAD_T) \
{ \
std::tie(left, right) = \
generate_tables_distributed<KEY_T, PAYLOAD_T>(BUILD_TABLE_NROWS_EACH_RANK, \
PROBE_TABLE_NROWS_EACH_RANK, \
SELECTIVITY, \
RAND_MAX_VAL, \
IS_BUILD_TABLE_KEY_UNIQUE, \
communicator); \
}
#define generate_tables_key_type(KEY_T) \
{ \
if (payload_type == "int64_t") { \
generate_tables(KEY_T, int64_t) \
} else if (payload_type == "int32_t") { \
generate_tables(KEY_T, int32_t) \
} else { \
throw std::runtime_error("Unknown payload type"); \
} \
}
if (key_type == "int64_t") {
generate_tables_key_type(int64_t)
} else if (key_type == "int32_t") {
generate_tables_key_type(int32_t)
} else {
throw std::runtime_error("Unknown key type");
}
/* Distributed join */
CUDA_RT_CALL(cudaDeviceSynchronize());
MPI_Barrier(MPI_COMM_WORLD);
cudaProfilerStart();
double start = MPI_Wtime();
std::unique_ptr<cudf::table> join_result =
distributed_inner_join(left->view(),
right->view(),
{0},
{0},
{std::pair<cudf::size_type, cudf::size_type>(0, 0)},
communicator,
OVER_DECOMPOSITION_FACTOR,
COMPRESSION);
MPI_Barrier(MPI_COMM_WORLD);
double stop = MPI_Wtime();
cudaProfilerStop();
if (mpi_rank == 0) { std::cout << "Elasped time (s) " << stop - start << std::endl; }
/* Cleanup */
left.reset();
right.reset();
join_result.reset();
CUDA_RT_CALL(cudaDeviceSynchronize());
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, COMMUNICATOR_NAME, REGISTRATION_METHOD);
MPI_CALL(MPI_Finalize());
return 0;
}
|
2ee2bb075630d8e3410b3003df568c822255d0e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "SceNodes.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[5];
// parameter set for cells that are going to divide
__constant__ double sceIntraParaDiv[5];
__constant__ double sceDivProPara;
__constant__ double sceCartPara[5];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
__constant__ double cartGrowDirVec[3];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
//
__constant__ uint cellNodeBeginPos_M;
__constant__ uint allNodeCountPerCell_M;
__constant__ uint membrThreshold_M;
__constant__ double sceInterBPara_M[5];
__constant__ int sceInterBPara_Jones_On_M ; //Ali
__constant__ double sceInterBPara_Jones_M[3] ; //Ali
__constant__ double sceIntnlBPara_M[5];
__constant__ double sceIntraPara_M[5];
__constant__ double sceIntraParaDiv_M[5];
__constant__ double growthPrgrCriVal_M;
__constant__ double maxAdhBondLen_M;
__constant__ double minAdhBondLen_M;
__constant__ double bondStiff_M;
__constant__ double bondStiff_Mitotic;
__constant__ double bondAdhCriLen_M;
// #define DebugMode
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes() {
readDomainPara();
}
void SceNodes::readDomainPara() {
domainPara.minX = globalConfigVars.getConfigValue("DOMAIN_XMIN").toDouble();
domainPara.maxX = globalConfigVars.getConfigValue("DOMAIN_XMAX").toDouble();
domainPara.minY = globalConfigVars.getConfigValue("DOMAIN_YMIN").toDouble();
domainPara.maxY = globalConfigVars.getConfigValue("DOMAIN_YMAX").toDouble();
//domainPara.minZ = globalConfigVars.getConfigValue("DOMAIN_ZMIN").toDouble();
//domainPara.maxZ = globalConfigVars.getConfigValue("DOMAIN_ZMAX").toDouble();
domainPara.gridSpacing = getMaxEffectiveRange();
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
//domainPara.ZBucketSize = (domainPara.maxZ - domainPara.minZ)
// / domainPara.gridSpacing + 1;
}
void SceNodes::readMechPara() {
double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
mechPara.sceInterParaCPU[0] = U0;
mechPara.sceInterParaCPU[1] = V0;
mechPara.sceInterParaCPU[2] = k1;
mechPara.sceInterParaCPU[3] = k2;
double interLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
mechPara.sceInterParaCPU[4] = interLinkEffectiveRange;
}
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
mechPara.sceIntraParaCPU[0] = U0_Intra;
mechPara.sceIntraParaCPU[1] = V0_Intra;
mechPara.sceIntraParaCPU[2] = k1_Intra;
mechPara.sceIntraParaCPU[3] = k2_Intra;
double intraLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
mechPara.sceIntraParaCPU[4] = intraLinkEffectiveRange;
}
if (controlPara.simuType == Disc) {
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_U0_Div_DivFactor").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_V0_Div_DivFactor").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k1_Div_DivFactor").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k2_Div_DivFactor").toDouble();
double growthProgressThreshold = globalConfigVars.getConfigValue(
"GrowthProgressThreshold").toDouble();
mechPara.sceIntraParaDivCPU[0] = U0_Intra_Div;
mechPara.sceIntraParaDivCPU[1] = V0_Intra_Div;
mechPara.sceIntraParaDivCPU[2] = k1_Intra_Div;
mechPara.sceIntraParaDivCPU[3] = k2_Intra_Div;
mechPara.sceIntraParaDivCPU[4] = growthProgressThreshold;
}
}
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxCartNodeCount, uint maxTotalECMCount, uint maxNodeInECM,
uint maxTotalCellCount, uint maxNodeInCell, bool isStab) {
initControlPara(isStab);
readDomainPara();
uint maxTotalNodeCount;
if (controlPara.simuType != Disc_M) {
initNodeAllocPara(totalBdryNodeCount, maxProfileNodeCount,
maxCartNodeCount, maxTotalECMCount, maxNodeInECM,
maxTotalCellCount, maxNodeInCell);
maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxCartNodeCount + allocPara.maxTotalECMNodeCount
+ allocPara.maxTotalCellNodeCount;
} else {
uint maxEpiNodeCount = globalConfigVars.getConfigValue(
"MaxEpiNodeCountPerCell").toInt();
uint maxInternalNodeCount = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt() - maxEpiNodeCount;
initNodeAllocPara_M(totalBdryNodeCount, maxTotalCellCount,
maxEpiNodeCount, maxInternalNodeCount);
maxTotalNodeCount = allocPara_M.maxTotalNodeCount;
}
allocSpaceForNodes(maxTotalNodeCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
if (controlPara.simuType != Disc_M) {
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara.startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCart) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosECM) {
hostTmpVector[i] = Cart;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - allocPara.startPosECM)
/ allocPara.maxNodePerECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - allocPara.startPosCells)
/ allocPara.maxNodeOfOneCell;
}
hostTmpVector2[i] = false;
}
} else {
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
uint cellRank = tmp / allocPara_M.bdryNodeCount;
uint nodeRank = tmp % allocPara_M.bdryNodeCount;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
} else {
hostTmpVector[i] = CellIntnl;
}
hostTmpVector3[i] = cellRank;
}
hostTmpVector2[i] = false;
}
}
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
infoVecs.nodeCellRank = hostTmpVector3;
std::cout << " I am in SceNodes constructor with long input which includes copyParaToGPUConstMem function " << endl ;
copyParaToGPUConstMem();
}
SceNodes::SceNodes(uint maxTotalCellCount, uint maxAllNodePerCell) {
//initControlPara (isStab);
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
readDomainPara();
uint maxTotalNodeCount = maxTotalCellCount * maxAllNodePerCell;
uint maxMembrNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
initNodeAllocPara_M(0, maxTotalCellCount, maxMembrNodeCountPerCell,
maxIntnlNodeCountPerCell);
std::cout << " Number of boundary nodes = " << allocPara_M.bdryNodeCount
<< std::endl;
std::cout << " Max number of cells in domain = "
<< allocPara_M.maxCellCount << std::endl;
std::cout << " Max all nodes per cell = "
<< allocPara_M.maxAllNodePerCell << std::endl;
std::cout << " Max membrane node per cell= "
<< allocPara_M.maxMembrNodePerCell << std::endl;
std::cout << " Max internal node per cell= "
<< allocPara_M.maxIntnlNodePerCell << std::endl;
std::cout << " Max total number of nodes in domain = "
<< allocPara_M.maxTotalNodeCount << std::endl;
allocSpaceForNodes(maxTotalNodeCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
uint nodeRank;
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
nodeRank = tmp % allocPara_M.maxAllNodePerCell;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
//std::cout << "0";
} else {
hostTmpVector[i] = CellIntnl;
//std::cout << "1";
}
}
hostTmpVector2[i] = false;
if (nodeRank == 0) {
//std::cout << std::endl;
}
}
//std::cout << "finished" << std::endl;
//std::cout.flush();
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
thrust::host_vector<int> bondVec(maxTotalNodeCount, -1);
infoVecs.nodeAdhereIndex = bondVec;
infoVecs.membrIntnlIndex = bondVec;
infoVecs.nodeAdhIndxHostCopy = bondVec;
//std::cout << "copy finished!" << std::endl;
//std::cout.flush();
copyParaToGPUConstMem_M();
std::cout << " I am in SceNodes constructor with short input which includes copyParaToGPUConstMem_M function " << endl ;
//std::cout << "at the end" << std::endl;
//std::cout.flush();
}
void SceNodes::copyParaToGPUConstMem() {
readMechPara();
hipMemcpyToSymbol(sceInterPara, mechPara.sceInterParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraPara, mechPara.sceIntraParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraParaDiv, mechPara.sceIntraParaDivCPU,
5 * sizeof(double));
hipMemcpyToSymbol(ProfilebeginPos, &allocPara.startPosProfile,
sizeof(uint));
hipMemcpyToSymbol(ECMbeginPos, &allocPara.startPosECM, sizeof(uint));
hipMemcpyToSymbol(cellNodeBeginPos, &allocPara.startPosCells,
sizeof(uint));
hipMemcpyToSymbol(nodeCountPerECM, &allocPara.maxNodePerECM, sizeof(uint));
hipMemcpyToSymbol(nodeCountPerCell, &allocPara.maxNodeOfOneCell,
sizeof(uint));
hipMemcpyToSymbol(sceCartPara, mechPara.sceCartParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceProfilePara, mechPara.sceProfileParaCPU,
7 * sizeof(double));
hipMemcpyToSymbol(sceInterDiffPara, mechPara.sceInterDiffParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceECMPara, mechPara.sceECMParaCPU, 5 * sizeof(double));
}
void SceNodes::copyParaToGPUConstMem_M() {
readParas_M();
hipMemcpyToSymbol(cellNodeBeginPos_M, &allocPara_M.bdryNodeCount,
sizeof(uint));
hipMemcpyToSymbol(allNodeCountPerCell_M, &allocPara_M.maxAllNodePerCell,
sizeof(uint));
hipMemcpyToSymbol(membrThreshold_M, &allocPara_M.maxMembrNodePerCell,
sizeof(uint));
hipMemcpyToSymbol(bondAdhCriLen_M, &mechPara_M.bondAdhCriLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(bondStiff_M, &mechPara_M.bondStiffCPU_M, sizeof(double));
hipMemcpyToSymbol(bondStiff_Mitotic, &mechPara_M.bondStiffCPU_Mitotic, sizeof(double));//Ali June 16
hipMemcpyToSymbol(growthPrgrCriVal_M, &mechPara_M.growthPrgrCriValCPU_M,
sizeof(double));
hipMemcpyToSymbol(maxAdhBondLen_M, &mechPara_M.maxAdhBondLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(minAdhBondLen_M, &mechPara_M.minAdhBondLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(sceInterBPara_M, mechPara_M.sceInterBParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceInterBPara_Jones_On_M, &mechPara_M.sceInterBParaCPU_Jones_On_M,
sizeof(int)); //Ali
hipMemcpyToSymbol(sceInterBPara_Jones_M, mechPara_M.sceInterBParaCPU_Jones_M,
3 * sizeof(double)); //Ali
hipMemcpyToSymbol(sceIntnlBPara_M, mechPara_M.sceIntnlBParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraPara_M, mechPara_M.sceIntraParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraParaDiv_M, mechPara_M.sceIntraParaDivCPU_M,
5 * sizeof(double));
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
domainPara.minX = domainMinX;
domainPara.maxX = domainMaxX;
domainPara.minY = domainMinY;
domainPara.maxY = domainMaxY;
domainPara.gridSpacing = domainBucketSize;
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
domainPara.totalBucketCount = domainPara.XBucketSize
* domainPara.YBucketSize;
auxVecs.keyBegin.resize(domainPara.totalBucketCount);
auxVecs.keyEnd.resize(domainPara.totalBucketCount);
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
int node1 = bucketValuesCPU[i];
int node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
iterationCounter++;
}
}
return result;
}
void SceNodes::readParas_M() {
//////////////////////
//// Block 1 /////////
//////////////////////
double U0_InterB =
globalConfigVars.getConfigValue("SceInterB_U0").toDouble();
double V0_InterB =
globalConfigVars.getConfigValue("SceInterB_V0").toDouble();
double k1_InterB =
globalConfigVars.getConfigValue("SceInterB_k1").toDouble();
double k2_InterB =
globalConfigVars.getConfigValue("SceInterB_k2").toDouble();
double interBEffectiveRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
mechPara_M.sceInterBParaCPU_M[0] = U0_InterB;
mechPara_M.sceInterBParaCPU_M[1] = V0_InterB;
mechPara_M.sceInterBParaCPU_M[2] = k1_InterB;
mechPara_M.sceInterBParaCPU_M[3] = k2_InterB;
mechPara_M.sceInterBParaCPU_M[4] = interBEffectiveRange;
//Ali
//////////////////////
//// Block 1.5 /////////
//////////////////////
int On_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_On").toDouble();
double eps_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_eps").toDouble();
double sig_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_sig").toDouble();
double interBEffectiveRange_Jones = globalConfigVars.getConfigValue(
"InterBEffectiveRange_Jones").toDouble();
mechPara_M.sceInterBParaCPU_Jones_On_M = On_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[0] = eps_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[1] = sig_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[2] = interBEffectiveRange_Jones;
//Ali
//////////////////////
//// Block 2 /////////
//////////////////////
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
mechPara_M.sceIntnlBParaCPU_M[0] = U0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[1] = V0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[2] = k1_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[3] = k2_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
mechPara_M.sceIntraParaCPU_M[0] = U0_Intra;
mechPara_M.sceIntraParaCPU_M[1] = V0_Intra;
mechPara_M.sceIntraParaCPU_M[2] = k1_Intra;
mechPara_M.sceIntraParaCPU_M[3] = k2_Intra;
mechPara_M.sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
mechPara_M.sceIntraParaDivCPU_M[0] = U0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[1] = V0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[2] = k1_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[3] = k2_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
//////////////////////
//// Block 5 /////////
//////////////////////
double bondAdhCriLen =
globalConfigVars.getConfigValue("BondAdhCriLen").toDouble();
mechPara_M.bondAdhCriLenCPU_M = bondAdhCriLen;
double bondStiff = globalConfigVars.getConfigValue("BondStiff").toDouble();
mechPara_M.bondStiffCPU_M = bondStiff;
//Ali June 16
double bondStiff_Mitotic = globalConfigVars.getConfigValue("BondStiff_Mitotic").toDouble();
mechPara_M.bondStiffCPU_Mitotic = bondStiff_Mitotic;
double growthPrgrCriVal = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
mechPara_M.growthPrgrCriValCPU_M = growthPrgrCriVal;
double maxAdhBondLen =
globalConfigVars.getConfigValue("MaxAdhBondLen").toDouble();
mechPara_M.maxAdhBondLenCPU_M = maxAdhBondLen;
double minAdhBondLen =
globalConfigVars.getConfigValue("MinAdhBondLen").toDouble();
mechPara_M.minAdhBondLenCPU_M = minAdhBondLen;
}
void SceNodes::debugNAN() {
uint totalActiveNodeC = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double res = thrust::reduce(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocX.begin() + totalActiveNodeC);
if (isnan(res)) {
std::cout << "fatal error! NAN found" << std::endl;
std::cout.flush();
exit(0);
}
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs_M() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint offSet = allocPara_M.bdryNodeCount;
uint memThreshold = allocPara_M.maxMembrNodePerCell;
int size = bucketKeysCPU.size();
int node1, node2, cellRank1, cellRank2, nodeRank1, nodeRank2;
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
node1 = bucketValuesCPU[i];
node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
cellRank1 = (node1 - offSet) / maxNodePerCell;
nodeRank1 = (node1 - offSet) % maxNodePerCell;
cellRank2 = (node2 - offSet) / maxNodePerCell;
nodeRank2 = (node2 - offSet) % maxNodePerCell;
if (nodeRank1 >= memThreshold && nodeRank2 >= memThreshold
&& cellRank1 == cellRank2) {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
}
iterationCounter++;
}
}
return result;
}
void SceNodes::initValues(std::vector<CVector>& initBdryCellNodePos,
std::vector<CVector>& initProfileNodePos,
std::vector<CVector>& initCartNodePos,
std::vector<CVector>& initECMNodePos,
std::vector<CVector>& initFNMCellNodePos,
std::vector<CVector>& initMXCellNodePos) {
uint FNMNodeCount = initFNMCellNodePos.size();
uint MXNodeCount = initMXCellNodePos.size();
uint beginAddressOfProfile = allocPara.startPosProfile;
uint beginAddressOfCart = allocPara.startPosCart;
// find the begining position of ECM.
uint beginAddressOfECM = allocPara.startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = allocPara.startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCount;
std::vector<double> initBdryCellNodePosX = getArrayXComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
infoVecs.nodeLocX.begin());
std::vector<double> initBdryCellNodePosY = getArrayYComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
infoVecs.nodeLocY.begin());
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initProfileNodePosX = getArrayXComp(initProfileNodePos);
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfProfile);
std::vector<double> initProfileNodePosY = getArrayYComp(initProfileNodePos);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfProfile);
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initCartNodePosX = getArrayXComp(initCartNodePos);
thrust::copy(initCartNodePosX.begin(), initCartNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfCart);
std::vector<double> initCartNodePosY = getArrayYComp(initCartNodePos);
thrust::copy(initCartNodePosY.begin(), initCartNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfCart);
// copy x and y position of nodes of ECM to actual node position.
std::vector<double> initECMNodePosX = getArrayXComp(initECMNodePos);
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfECM);
std::vector<double> initECMNodePosY = getArrayYComp(initECMNodePos);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfECM);
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(infoVecs.nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// copy x and y position of nodes of FNM cells to actual node position.
std::vector<double> initFNMCellNodePosX = getArrayXComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfFNM);
std::vector<double> initFNMCellNodePosY = getArrayYComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfFNM);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfFNM,
infoVecs.nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
std::vector<double> initMXCellNodePosX = getArrayXComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfMX);
std::vector<double> initMXCellNodePosY = getArrayYComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfMX);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfMX,
infoVecs.nodeCellType.begin() + beginAddressOfMX + MXNodeCount, MX);
}
void SceNodes::initValues_M(std::vector<bool>& initIsActive,
std::vector<CVector>& initCellNodePos,
std::vector<SceNodeType>& nodeTypes) {
std::vector<double> initCellNodePosX = getArrayXComp(initCellNodePos);
std::vector<double> initCellNodePosY = getArrayYComp(initCellNodePos);
thrust::copy(initCellNodePosX.begin(), initCellNodePosX.end(),
infoVecs.nodeLocX.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initCellNodePosY.begin(), initCellNodePosY.end(),
infoVecs.nodeLocY.begin() + allocPara_M.bdryNodeCount);
thrust::copy(nodeTypes.begin(), nodeTypes.end(),
infoVecs.nodeCellType.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initIsActive.begin(), initIsActive.end(),
infoVecs.nodeIsActive.begin() + allocPara_M.bdryNodeCount);
}
VtkAnimationData SceNodes::obtainAnimationData(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<double> hostTmpVectorForceX;
thrust::host_vector<double> hostTmpVectorForceY;
thrust::host_vector<double> hostTmpVectorForceZ;
thrust::host_vector<double> hostTmpVectorVelVal;
assert(hostTmpVectorLocX.size() == hostTmpVectorLocY.size());
assert(hostTmpVectorLocY.size() == hostTmpVectorLocZ.size());
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<double> hostTmpVectorNodeStress;
if (aniCri.animationType != CellType) {
hostTmpVectorForceX = infoVecs.nodeInterForceX;
hostTmpVectorForceY = infoVecs.nodeInterForceY;
hostTmpVectorForceZ = infoVecs.nodeInterForceZ;
assert(hostTmpVectorForceX.size() == hostTmpVectorLocX.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceY.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceZ.size());
uint vecSize = hostTmpVectorForceX.size();
hostTmpVectorVelVal.resize(vecSize);
for (uint i = 0; i < vecSize; i++) {
hostTmpVectorVelVal[i] = sqrt(
hostTmpVectorForceX[i] * hostTmpVectorForceX[i]
+ hostTmpVectorForceY[i] * hostTmpVectorForceY[i]
+ hostTmpVectorForceZ[i] * hostTmpVectorForceZ[i]);
}
}
if (aniCri.animationType == Force) {
vtkData.isArrowIncluded = true;
} else {
vtkData.isArrowIncluded = false;
}
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node1Z = hostTmpVectorLocZ[node1Index];
SceNodeType node1T = hostTmpVectorNodeType[node1Index];
uint node1R = hostTmpVectorNodeRank[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
double node2Z = hostTmpVectorLocZ[node2Index];
SceNodeType node2T = hostTmpVectorNodeType[node2Index];
uint node2R = hostTmpVectorNodeRank[node2Index];
if (aniCri.isPairQualify(node1Index, node2Index, node1X, node1Y, node1Z,
node1T, node1R, node2X, node2Y, node2Z, node2T, node2R)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
if (hostTmpVectorVelVal[node1Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node1T);
}
ptAniData.pos = CVector(node1X, node1Y, node1Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
if (hostTmpVectorVelVal[node2Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node2T);
}
ptAniData.pos = CVector(node2X, node2Y, node2Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
uint profileStartIndex = allocPara.startPosProfile;
uint profileEndIndex = profileStartIndex
+ allocPara.currentActiveProfileNodeCount;
for (uint i = profileStartIndex; i < profileEndIndex; i++) {
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
if (i != profileEndIndex - 1) {
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
uint cartStartIndex = allocPara.startPosCart;
uint cartEndIndex = cartStartIndex + allocPara.maxCartNodeCount;
for (uint i = cartStartIndex; i < cartEndIndex; i++) {
bool isActive = infoVecs.nodeIsActive[i];
if (!isActive) {
continue;
}
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
bool isNextActive;
if (i == cartEndIndex - 1) {
isNextActive = false;
} else {
isNextActive = infoVecs.nodeIsActive[i + 1];
}
if (isNextActive) {
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
return vtkData;
}
// TODO
VtkAnimationData SceNodes::obtainAnimationData_M(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs_M();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<bool> hostIsActiveVec = infoVecs.nodeIsActive;
thrust::host_vector<int> hostBondVec = infoVecs.nodeAdhereIndex;
thrust::host_vector<double> hostMembrTenMag = infoVecs.membrTensionMag;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
uint activeCellCount = allocPara_M.currentActiveCellCount;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_M.maxMembrNodePerCell;
uint beginIndx = allocPara_M.bdryNodeCount;
//uint endIndx = beginIndx + activeCellCount * maxNodePerCell;
//uint cellRank1, nodeRank1, cellRank2, nodeRank2;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
vtkData.bondsInfo = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node1X, node1Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node2X, node2Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
return vtkData;
}
void SceNodes::findBucketBounds() {
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds_M() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds3D() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::prepareSceForceComputation() {
buildBuckets2D();
extendBuckets2D();
findBucketBounds();
}
void SceNodes::prepareSceForceComputation_M() {
buildBuckets2D_M();
extendBuckets2D_M();
findBucketBounds_M();
}
void SceNodes::prepareSceForceComputation3D() {
buildBuckets3D();
extendBuckets3D();
findBucketBounds3D();
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<SceNodeType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % allocPara.maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / allocPara.maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(),
infoVecs.nodeCellType.begin()))
+ shiftStartPosNewCell);
// total number of cells has increased.
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes;
if (controlPara.simuType != Disc_M) {
totalActiveNodes = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
} else {
totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
}
auxVecs.bucketKeys.resize(totalActiveNodes);
auxVecs.bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(), auxVecs.bucketKeys.end(),
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.end(), UINT_MAX);
auxVecs.bucketKeys.erase(auxVecs.bucketKeys.end() - numberOfOutOfRange,
auxVecs.bucketKeys.end());
auxVecs.bucketValues.erase(auxVecs.bucketValues.end() - numberOfOutOfRange,
auxVecs.bucketValues.end());
}
void SceNodes::buildBuckets2D_M() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
void SceNodes::buildBuckets3D() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
BucketIndexer3D(domainPara.minX, domainPara.maxX, domainPara.minY,
domainPara.maxY, domainPara.minZ, domainPara.maxZ,
domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
double computeDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraForceDiv(double& xPos, double& yPos, double& zPos,
double& xPos2, double& yPos2, double& zPos2, double& growPro,
double& xRes, double& yRes, double& zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
if (growPro > sceIntraParaDiv[4]) {
double intraPara0 = growPro * (sceIntraParaDiv[0])
+ (1.0 - growPro) * sceIntraPara[0];
double intraPara1 = growPro * (sceIntraParaDiv[1])
+ (1.0 - growPro) * sceIntraPara[1];
double intraPara2 = growPro * (sceIntraParaDiv[2])
+ (1.0 - growPro) * sceIntraPara[2];
double intraPara3 = growPro * (sceIntraParaDiv[3])
+ (1.0 - growPro) * sceIntraPara[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraDiv_M(double& xPos, double& yPos, double& xPos2,
double& yPos2, double& growPro, double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (growPro > growthPrgrCriVal_M) {
if (linkLength > sceIntraParaDiv_M[4]) {
forceValue = 0;
} else {
double percent = (growPro - growthPrgrCriVal_M)
/ (1.0 - growthPrgrCriVal_M);
double intraPara0 = percent * (sceIntraParaDiv_M[0])
+ (1.0 - percent) * sceIntraPara_M[0];
double intraPara1 = percent * (sceIntraParaDiv_M[1])
+ (1.0 - percent) * sceIntraPara_M[1];
double intraPara2 = percent * (sceIntraParaDiv_M[2])
+ (1.0 - percent) * sceIntraPara_M[2];
double intraPara3 = percent * (sceIntraParaDiv_M[3])
+ (1.0 - percent) * sceIntraPara_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength > sceIntraPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara_M[0] / sceIntraPara_M[2]
* exp(-linkLength / sceIntraPara_M[2])
+ sceIntraPara_M[1] / sceIntraPara_M[3]
* exp(-linkLength / sceIntraPara_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddIntraB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceIntnlBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntnlBPara_M[0] / sceIntnlBPara_M[2]
* exp(-linkLength / sceIntnlBPara_M[2])
+ sceIntnlBPara_M[1] / sceIntnlBPara_M[3]
* exp(-linkLength / sceIntnlBPara_M[3]);
}
//if (forceValue > 0) {
// forceValue = 0;
//}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddInter_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceInterBPara_M[0] / sceInterBPara_M[2]
* exp(-linkLength / sceInterBPara_M[2])
+ sceInterBPara_M[1] / sceInterBPara_M[3]
* exp(-linkLength / sceInterBPara_M[3]);
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calAndAddInter_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_Jones_M[2]) {
forceValue = 0;
} else {
forceValue =24*sceInterBPara_Jones_M[0]/linkLength*pow(sceInterBPara_Jones_M[1]/linkLength,6)*
( 1.0-2 *pow(sceInterBPara_Jones_M[1]/linkLength,6) ) ;
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddInterForceDisc(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double& interForceX, double& interForceY,
double& interForceZ) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
double fX = forceValue * (xPos2 - xPos) / linkLength;
double fY = forceValue * (yPos2 - yPos) / linkLength;
double fZ = forceValue * (zPos2 - zPos) / linkLength;
xRes = xRes + fX;
yRes = yRes + fY;
zRes = zRes + fZ;
interForceX = interForceX + fX;
interForceY = interForceY + fY;
interForceZ = interForceZ + fZ;
}
__device__
void calculateAndAddCartForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceCartPara[4]) {
forceValue = 0;
} else {
forceValue = -sceCartPara[0] / sceCartPara[2]
* exp(-linkLength / sceCartPara[2])
+ sceCartPara[1] / sceCartPara[3]
* exp(-linkLength / sceCartPara[3]);
if (linkLength > 1.0e-12) {
//double dotProduct = (xPos2 - xPos) / linkLength * cartGrowDirVec[0]
// + (yPos2 - yPos) / linkLength * cartGrowDirVec[1]
// + (zPos2 - zPos) / linkLength * cartGrowDirVec[2];
//forceValue = forceValue * dotProduct;
// this is just a temperary solution -- the direction should not be fixed.
xRes = xRes - forceValue * cartGrowDirVec[0];
yRes = yRes - forceValue * cartGrowDirVec[1];
zRes = zRes - forceValue * cartGrowDirVec[2];
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
if (forceValue > 0) {
//forceValue = forceValue * 0.01;
forceValue = 0;
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.2;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos
|| nodeGlobalRank2 < cellNodeBeginPos) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
//Ali
__device__
bool Is_Lennard_Jones() {
if (sceInterBPara_Jones_On_M==1) {
return true ;
}
else {
return false ;
}
}
__device__
bool isSameCell_m(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos_M) / allNodeCountPerCell_M
== (nodeGlobalRank2 - cellNodeBeginPos_M) / allNodeCountPerCell_M) {
return true;
} else {
return false;
}
}
__device__
bool bothInternal(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 >= membrThreshold_M && nodeRank2 >= membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembr(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembrDiffCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 == cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
__device__
bool isNodeOnMembrane(uint nodeGlobalRank) {
uint nodeRank = (nodeGlobalRank - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeGlobalRank >= cellNodeBeginPos_M && nodeRank < membrThreshold_M){
return true;
} else{
return false;
}
}
*/
__device__
bool sameCellMemIntnl(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 != cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if ((nodeRank1 < membrThreshold_M && nodeRank2 >= membrThreshold_M)
|| (nodeRank2 < membrThreshold_M && nodeRank1 >= membrThreshold_M)) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(SceNodeType &type1, SceNodeType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void attemptToAdhere(bool& isSuccess, uint& index, double& dist,
uint& nodeRank2, double& xPos1, double& yPos1, double& xPos2,
double& yPos2) {
double length = computeDist2D(xPos1, yPos1, xPos2, yPos2);
if (length <= bondAdhCriLen_M) {
if (isSuccess) {
if (length < dist) {
dist = length;
index = nodeRank2;
}
} else {
isSuccess = true;
index = nodeRank2;
dist = length;
}
}
}
__device__
void handleAdhesionForce_M(int& adhereIndex, double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
if (curLen > maxAdhBondLen_M) {
adhereIndex = -1;
return;
} else {
if (curLen > minAdhBondLen_M) {
double forceValue = (curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = xRes + forceValue * (curAdherePosX - xPos) / curLen;
yRes = yRes + forceValue * (curAdherePosY - yPos) / curLen;
}
}
}
//Ali June 16
__device__
double getMitoticAdhCoef(double& growProg, double& growProgNeigh){
double alpha = 1.0;
if (growProg > growthPrgrCriVal_M && growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - ( 0.5*(growProg+growProgNeigh)-growthPrgrCriVal_M )/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProg > growthPrgrCriVal_M){
alpha = 1.0 - (growProg-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - (growProgNeigh-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
return alpha;
}
__device__
void calculateForceBetweenLinkNodes(double &xLoc, double &yLoc, double &zLoc,
double &xLocLeft, double &yLocLeft, double &zLocLeft, double &xLocRight,
double &yLocRight, double &zLocRight, double &xVel, double &yVel,
double &zVel) {
double linkLengthLeft = computeDist(xLoc, yLoc, zLoc, xLocLeft, yLocLeft,
zLocLeft);
double forceValueLeft = sceProfilePara[5]
* (linkLengthLeft - sceProfilePara[6]);
xVel = xVel + forceValueLeft * (xLocLeft - xLoc) / linkLengthLeft;
yVel = yVel + forceValueLeft * (yLocLeft - yLoc) / linkLengthLeft;
zVel = zVel + forceValueLeft * (zLocLeft - zLoc) / linkLengthLeft;
double linkLengthRight = computeDist(xLoc, yLoc, zLoc, xLocRight, yLocRight,
zLocRight);
double forceValueRight = sceProfilePara[5]
* (linkLengthRight - sceProfilePara[6]);
xVel = xVel + forceValueRight * (xLocRight - xLoc) / linkLengthRight;
yVel = yVel + forceValueRight * (yLocRight - yLoc) / linkLengthRight;
zVel = zVel + forceValueRight * (zLocRight - zLoc) / linkLengthRight;
}
__device__
void handleSceForceNodesBasic(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress) {
if (isSameCell(nodeRank1, nodeRank2)) {
calculateAndAddIntraForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
__device__
void handleSceForceNodesDisc(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double& interForceX,
double& interForceY, double& interForceZ, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress,
double* _nodeGrowProAddr) {
if (isSameCell(nodeRank1, nodeRank2)) {
calAndAddIntraForceDiv(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2],
_nodeGrowProAddr[nodeRank2], xRes, yRes, zRes);
} else {
calAndAddInterForceDisc(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes, interForceX, interForceY, interForceZ);
}
}
__device__
void handleSceForceNodesDisc_M(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes,
double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeGrowProAddr) {
if (isSameCell_m(nodeRank1, nodeRank2)) {
if (bothInternal(nodeRank1, nodeRank2)) {
// both nodes are internal type.
calAndAddIntraDiv_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeGrowProAddr[nodeRank2],
xRes, yRes);
} else if (bothMembr(nodeRank1, nodeRank2)) {
// both nodes epithilium type. no sce force applied.
// nothing to do here.
} else {
// one node is epithilium type the other is internal type.
calAndAddIntraB_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
} else {
if (bothMembr(nodeRank1, nodeRank2)) {
calAndAddInter_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = auxVecs.bucketValues.size();
auxVecs.bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
auxVecs.bucketValuesIncludingNeighbor.resize(
valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), UINT_MAX);
int sizeBeforeShrink = auxVecs.bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(),
auxVecs.bucketValuesIncludingNeighbor.begin());
auxVecs.bucketKeysExpanded.erase(
auxVecs.bucketKeysExpanded.begin() + numberInsideRange,
auxVecs.bucketKeysExpanded.end());
auxVecs.bucketValuesIncludingNeighbor.erase(
auxVecs.bucketValuesIncludingNeighbor.begin() + numberInsideRange,
auxVecs.bucketValuesIncludingNeighbor.end());
}
void SceNodes::extendBuckets2D_M() {
endIndxExt_M = endIndx_M * 9;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(9);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::extendBuckets3D() {
endIndxExt_M = endIndx_M * 27;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(27);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M; // this is NOT numerical addition!
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NgbrFunc3D(domainPara.XBucketSize, domainPara.YBucketSize,
domainPara.ZBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::applySceForcesBasic() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceBasic(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress));
}
void SceNodes::applySceForcesDisc() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceDisc(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeGrowProAddr));
}
void SceNodes::applySceForcesDisc_M() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
int* membrIntnlAddress = thrust::raw_pointer_cast(
&infoVecs.membrIntnlIndex[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin() + endIndx_M),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin() + endIndx_M))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()))),
AddForceDisc_M(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeAdhIdxAddress, membrIntnlAddress, nodeGrowProAddr));
}
const SceDomainPara& SceNodes::getDomainPara() const {
return domainPara;
}
void SceNodes::setDomainPara(const SceDomainPara& domainPara) {
this->domainPara = domainPara;
}
const NodeAllocPara& SceNodes::getAllocPara() const {
return allocPara;
}
void SceNodes::setAllocPara(const NodeAllocPara& allocPara) {
this->allocPara = allocPara;
}
const NodeAuxVecs& SceNodes::getAuxVecs() const {
return auxVecs;
}
void SceNodes::setAuxVecs(const NodeAuxVecs& auxVecs) {
this->auxVecs = auxVecs;
}
NodeInfoVecs& SceNodes::getInfoVecs() {
return infoVecs;
}
std::vector<std::vector<int> > SceNodes::obtainLabelMatrix(
PixelizePara& pixelPara) {
std::vector<std::vector<int> > result;
std::vector<NodeWithLabel> nodeLabels;
ResAnalysisHelper resHelper;
resHelper.setPixelPara(pixelPara);
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<uint> hostTmpVectorIsActive = infoVecs.nodeIsActive;
uint startIndex = allocPara.startPosCells;
uint endIndex = startIndex
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
for (uint i = startIndex; i < endIndex; i++) {
if (hostTmpVectorIsActive[i] == true) {
NodeWithLabel nodeLabel;
nodeLabel.cellRank = hostTmpVectorNodeRank[i];
nodeLabel.position = CVector(hostTmpVectorLocX[i],
hostTmpVectorLocY[i], hostTmpVectorLocZ[i]);
nodeLabels.push_back(nodeLabel);
}
}
result = resHelper.outputLabelMatrix(nodeLabels);
return result;
}
void SceNodes::initControlPara(bool isStab) {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
controlPara.controlSwitchs.outputBmpImg = globalConfigVars.getSwitchState(
"Switch_OutputBMP");
controlPara.controlSwitchs.outputLabelMatrix =
globalConfigVars.getSwitchState("Switch_OutputLabelMatrix");
controlPara.controlSwitchs.outputStat = globalConfigVars.getSwitchState(
"Switch_OutputStat");
controlPara.controlSwitchs.outputVtkFile = globalConfigVars.getSwitchState(
"Switch_OutputVtk");
if (isStab) {
controlPara.controlSwitchs.stab = ON;
} else {
controlPara.controlSwitchs.stab = OFF;
}
}
void SceNodes::sceForcesPerfTesting() {
prepareSceForceComputation();
applySceForcesBasic();
}
void SceNodes::sceForcesPerfTesting_M() {
prepareSceForceComputation_M();
applySceForcesBasic_M();
}
void SceNodes::applySceForcesBasic_M() {
}
void SceNodes::sceForcesDisc() {
prepareSceForceComputation();
applySceForcesDisc();
}
void SceNodes::sceForcesDisc_M() {
#ifdef DebugMode
hipEvent_t start1, start2, start3, stop;
float elapsedTime1, elapsedTime2, elapsedTime3;
hipEventCreate(&start1);
hipEventCreate(&start2);
hipEventCreate(&start3);
hipEventCreate(&stop);
hipEventRecord(start1, 0);
#endif
cout << " confirm --- 1 ---" << endl;
cout.flush();
prepareSceForceComputation_M();
#ifdef DebugMode
hipEventRecord(start2, 0);
hipEventSynchronize(start2);
hipEventElapsedTime(&elapsedTime1, start1, start2);
#endif
cout << " --- 2 ---" << endl;
cout.flush();
applySceForcesDisc_M();
#ifdef DebugMode
hipEventRecord(start3, 0);
hipEventSynchronize(start3);
hipEventElapsedTime(&elapsedTime2, start2, start3);
#endif
cout << " --- 3 ---" << endl;
cout.flush();
processMembrAdh_M();
cout << " --- 4 ---" << endl;
cout.flush();
copyExtForces_M();//AAMIRI
#ifdef DebugMode
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime3, start3, stop);
std::cout << "time spent in Node logic: " << elapsedTime1 << " "
<< elapsedTime2 << " " << elapsedTime3 << std::endl;
#endif
}
double SceNodes::getMaxEffectiveRange() {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
SimulationType type = parseTypeFromConfig(simuTypeConfigValue);
if (type != Disc_M) {
double interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
double maxEffectiveRange = interLinkEffectiveRange;
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
if (intraLinkEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = intraLinkEffectiveRange;
}
double cartEffectiveRange = 0;
// cartilage effective range does not apply for other types of simulation.
try {
cartEffectiveRange = globalConfigVars.getConfigValue(
"CartForceEffectiveRange").toDouble();
} catch (SceException &exce) {
}
if (cartEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = cartEffectiveRange;
}
return maxEffectiveRange;
} else {
double membrMembrEffRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
double membrIntnlEffRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
double intnlIntnlEffRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
double intnlDivEffRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
double maxEffRange = 0;
std::vector<double> ranges;
ranges.push_back(membrMembrEffRange);
// all these are now
//ranges.push_back(membrIntnlEffRange);
//ranges.push_back(intnlIntnlEffRange);
//ranges.push_back(intnlDivEffRange);
maxEffRange = *std::max_element(ranges.begin(), ranges.end());
return maxEffRange;
}
}
void SceNodes::setInfoVecs(const NodeInfoVecs& infoVecs) {
this->infoVecs = infoVecs;
}
void SceNodes::allocSpaceForNodes(uint maxTotalNodeCount) {
infoVecs.nodeLocX.resize(maxTotalNodeCount);
infoVecs.nodeLocY.resize(maxTotalNodeCount);
infoVecs.nodeLocZ.resize(maxTotalNodeCount);
infoVecs.nodeVelX.resize(maxTotalNodeCount);
infoVecs.nodeVelY.resize(maxTotalNodeCount);
infoVecs.nodeVelZ.resize(maxTotalNodeCount);
infoVecs.nodeF_MI_M_x.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_y.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_T.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_N.resize(maxTotalNodeCount); //Ali
infoVecs.nodeVelTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeVelNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeCurvature.resize(maxTotalNodeCount, 0.0);//AAMIRI
infoVecs.nodeExtForceX.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceY.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeMaxForce.resize(maxTotalNodeCount);
infoVecs.nodeCellType.resize(maxTotalNodeCount);
infoVecs.nodeCellRank.resize(maxTotalNodeCount);
infoVecs.nodeIsActive.resize(maxTotalNodeCount);
if (controlPara.simuType == Disc
|| controlPara.simuType == SingleCellTest) {
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.nodeInterForceX.resize(maxTotalNodeCount);
infoVecs.nodeInterForceY.resize(maxTotalNodeCount);
infoVecs.nodeInterForceZ.resize(maxTotalNodeCount);
}
if (controlPara.simuType == Disc_M) {
infoVecs.nodeAdhereIndex.resize(maxTotalNodeCount);
infoVecs.nodeAdhIndxHostCopy.resize(maxTotalNodeCount);
infoVecs.membrIntnlIndex.resize(maxTotalNodeCount);
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.membrTensionMag.resize(maxTotalNodeCount, 0);
infoVecs.membrTenMagRi.resize(maxTotalNodeCount, 0);
infoVecs.membrDistToRi.resize(maxTotalNodeCount, 0);//AAMIRI
infoVecs.membrLinkRiMidX.resize(maxTotalNodeCount, 0);
infoVecs.membrLinkRiMidY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightY.resize(maxTotalNodeCount, 0);
auxVecs.bucketKeys.resize(maxTotalNodeCount);
auxVecs.bucketValues.resize(maxTotalNodeCount);
auxVecs.bucketKeysExpanded.resize(maxTotalNodeCount * 9);
auxVecs.bucketValuesIncludingNeighbor.resize(maxTotalNodeCount * 9);
}
}
void SceNodes::initNodeAllocPara(uint totalBdryNodeCount,
uint maxProfileNodeCount, uint maxCartNodeCount, uint maxTotalECMCount,
uint maxNodeInECM, uint maxTotalCellCount, uint maxNodeInCell) {
allocPara.maxCellCount = maxTotalCellCount;
allocPara.maxNodeOfOneCell = maxNodeInCell;
allocPara.maxNodePerECM = maxNodeInECM;
allocPara.maxECMCount = maxTotalECMCount;
allocPara.maxProfileNodeCount = maxProfileNodeCount;
allocPara.maxCartNodeCount = maxCartNodeCount;
allocPara.currentActiveProfileNodeCount = 0;
allocPara.currentActiveCartNodeCount = 0;
allocPara.BdryNodeCount = totalBdryNodeCount;
allocPara.currentActiveCellCount = 0;
allocPara.maxTotalECMNodeCount = allocPara.maxECMCount
* allocPara.maxNodePerECM;
allocPara.currentActiveECM = 0;
allocPara.maxTotalCellNodeCount = maxTotalCellCount
* allocPara.maxNodeOfOneCell;
allocPara.startPosProfile = totalBdryNodeCount;
allocPara.startPosCart = allocPara.startPosProfile
+ allocPara.maxProfileNodeCount;
allocPara.startPosECM = allocPara.startPosCart + allocPara.maxCartNodeCount;
allocPara.startPosCells = allocPara.startPosECM
+ allocPara.maxTotalECMNodeCount;
}
void SceNodes::initNodeAllocPara_M(uint totalBdryNodeCount,
uint maxTotalCellCount, uint maxEpiNodePerCell,
uint maxInternalNodePerCell) {
allocPara_M.bdryNodeCount = totalBdryNodeCount;
allocPara_M.currentActiveCellCount = 0;
allocPara_M.maxCellCount = maxTotalCellCount;
allocPara_M.maxAllNodePerCell = maxEpiNodePerCell + maxInternalNodePerCell;
allocPara_M.maxMembrNodePerCell = maxEpiNodePerCell;
allocPara_M.maxIntnlNodePerCell = maxInternalNodePerCell;
allocPara_M.maxTotalNodeCount = allocPara_M.maxAllNodePerCell
* allocPara_M.maxCellCount;
}
void SceNodes::removeNodes(int cellRank, vector<uint> &removeSeq) {
uint cellBeginIndex = allocPara.startPosCells
+ cellRank * allocPara.maxNodeOfOneCell;
uint cellEndIndex = cellBeginIndex + allocPara.maxNodeOfOneCell;
thrust::host_vector<double> cellXCoords(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYCoords(allocPara.maxNodeOfOneCell);
thrust::copy(infoVecs.nodeLocX.begin() + cellBeginIndex,
infoVecs.nodeLocX.begin() + cellEndIndex, cellXCoords.begin());
thrust::copy(infoVecs.nodeLocY.begin() + cellBeginIndex,
infoVecs.nodeLocY.begin() + cellEndIndex, cellYCoords.begin());
vector<bool> isRemove(allocPara.maxNodeOfOneCell, false);
/*
std::cout << "before, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXCoords[i] << " ";
}
std::cout << "]" << endl;
std::cout << "before, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYCoords[i] << " ";
}
std::cout << "]" << endl;
*/
for (uint i = 0; i < removeSeq.size(); i++) {
isRemove[removeSeq[i]] = true;
}
thrust::host_vector<double> cellXRemoved(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYRemoved(allocPara.maxNodeOfOneCell);
uint curIndex = 0;
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
if (isRemove[i] == false) {
cellXRemoved[curIndex] = cellXCoords[i];
cellYRemoved[curIndex] = cellYCoords[i];
curIndex++;
}
}
/*
std::cout << "after, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXRemoved[i] << " ";
}
std::cout << "]" << endl;
std::cout << "after, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYRemoved[i] << " ";
}
std::cout << "]" << endl;
*/
thrust::copy(cellXRemoved.begin(), cellXRemoved.end(),
infoVecs.nodeLocX.begin() + cellBeginIndex);
thrust::copy(cellYRemoved.begin(), cellYRemoved.end(),
infoVecs.nodeLocY.begin() + cellBeginIndex);
}
void SceNodes::processMembrAdh_M() {
keepAdhIndxCopyInHost_M();
applyMembrAdh_M();
removeInvalidPairs_M();
}
void SceNodes::keepAdhIndxCopyInHost_M() {
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::copy(infoVecs.nodeAdhereIndex.begin(),
infoVecs.nodeAdhereIndex.begin() + maxTotalNode,
infoVecs.nodeAdhIndxHostCopy.begin());
}
void SceNodes::removeInvalidPairs_M() {
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
uint curActiveNodeCt = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<int> iBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin()))
+ curActiveNodeCt, infoVecs.nodeAdhereIndex.begin(),
AdjustAdh(nodeAdhIdxAddress));
}
void SceNodes::applyMembrAdh_M() {
thrust::counting_iterator<uint> iBegin(0);
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdh(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr));
}
//AAMIRI
void SceNodes::copyExtForces_M(){
thrust::copy(infoVecs.nodeVelX.begin(), infoVecs.nodeVelX.end(),
infoVecs.nodeExtForceX.begin());
thrust::copy(infoVecs.nodeVelY.begin(), infoVecs.nodeVelY.end(),
infoVecs.nodeExtForceY.begin());
}
|
2ee2bb075630d8e3410b3003df568c822255d0e4.cu
|
#include "SceNodes.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[5];
// parameter set for cells that are going to divide
__constant__ double sceIntraParaDiv[5];
__constant__ double sceDivProPara;
__constant__ double sceCartPara[5];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
__constant__ double cartGrowDirVec[3];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
//
__constant__ uint cellNodeBeginPos_M;
__constant__ uint allNodeCountPerCell_M;
__constant__ uint membrThreshold_M;
__constant__ double sceInterBPara_M[5];
__constant__ int sceInterBPara_Jones_On_M ; //Ali
__constant__ double sceInterBPara_Jones_M[3] ; //Ali
__constant__ double sceIntnlBPara_M[5];
__constant__ double sceIntraPara_M[5];
__constant__ double sceIntraParaDiv_M[5];
__constant__ double growthPrgrCriVal_M;
__constant__ double maxAdhBondLen_M;
__constant__ double minAdhBondLen_M;
__constant__ double bondStiff_M;
__constant__ double bondStiff_Mitotic;
__constant__ double bondAdhCriLen_M;
// #define DebugMode
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes() {
readDomainPara();
}
void SceNodes::readDomainPara() {
domainPara.minX = globalConfigVars.getConfigValue("DOMAIN_XMIN").toDouble();
domainPara.maxX = globalConfigVars.getConfigValue("DOMAIN_XMAX").toDouble();
domainPara.minY = globalConfigVars.getConfigValue("DOMAIN_YMIN").toDouble();
domainPara.maxY = globalConfigVars.getConfigValue("DOMAIN_YMAX").toDouble();
//domainPara.minZ = globalConfigVars.getConfigValue("DOMAIN_ZMIN").toDouble();
//domainPara.maxZ = globalConfigVars.getConfigValue("DOMAIN_ZMAX").toDouble();
domainPara.gridSpacing = getMaxEffectiveRange();
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
//domainPara.ZBucketSize = (domainPara.maxZ - domainPara.minZ)
// / domainPara.gridSpacing + 1;
}
void SceNodes::readMechPara() {
double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
mechPara.sceInterParaCPU[0] = U0;
mechPara.sceInterParaCPU[1] = V0;
mechPara.sceInterParaCPU[2] = k1;
mechPara.sceInterParaCPU[3] = k2;
double interLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
mechPara.sceInterParaCPU[4] = interLinkEffectiveRange;
}
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
mechPara.sceIntraParaCPU[0] = U0_Intra;
mechPara.sceIntraParaCPU[1] = V0_Intra;
mechPara.sceIntraParaCPU[2] = k1_Intra;
mechPara.sceIntraParaCPU[3] = k2_Intra;
double intraLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
mechPara.sceIntraParaCPU[4] = intraLinkEffectiveRange;
}
if (controlPara.simuType == Disc) {
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_U0_Div_DivFactor").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_V0_Div_DivFactor").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k1_Div_DivFactor").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k2_Div_DivFactor").toDouble();
double growthProgressThreshold = globalConfigVars.getConfigValue(
"GrowthProgressThreshold").toDouble();
mechPara.sceIntraParaDivCPU[0] = U0_Intra_Div;
mechPara.sceIntraParaDivCPU[1] = V0_Intra_Div;
mechPara.sceIntraParaDivCPU[2] = k1_Intra_Div;
mechPara.sceIntraParaDivCPU[3] = k2_Intra_Div;
mechPara.sceIntraParaDivCPU[4] = growthProgressThreshold;
}
}
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxCartNodeCount, uint maxTotalECMCount, uint maxNodeInECM,
uint maxTotalCellCount, uint maxNodeInCell, bool isStab) {
initControlPara(isStab);
readDomainPara();
uint maxTotalNodeCount;
if (controlPara.simuType != Disc_M) {
initNodeAllocPara(totalBdryNodeCount, maxProfileNodeCount,
maxCartNodeCount, maxTotalECMCount, maxNodeInECM,
maxTotalCellCount, maxNodeInCell);
maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxCartNodeCount + allocPara.maxTotalECMNodeCount
+ allocPara.maxTotalCellNodeCount;
} else {
uint maxEpiNodeCount = globalConfigVars.getConfigValue(
"MaxEpiNodeCountPerCell").toInt();
uint maxInternalNodeCount = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt() - maxEpiNodeCount;
initNodeAllocPara_M(totalBdryNodeCount, maxTotalCellCount,
maxEpiNodeCount, maxInternalNodeCount);
maxTotalNodeCount = allocPara_M.maxTotalNodeCount;
}
allocSpaceForNodes(maxTotalNodeCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
if (controlPara.simuType != Disc_M) {
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara.startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCart) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosECM) {
hostTmpVector[i] = Cart;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - allocPara.startPosECM)
/ allocPara.maxNodePerECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - allocPara.startPosCells)
/ allocPara.maxNodeOfOneCell;
}
hostTmpVector2[i] = false;
}
} else {
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
uint cellRank = tmp / allocPara_M.bdryNodeCount;
uint nodeRank = tmp % allocPara_M.bdryNodeCount;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
} else {
hostTmpVector[i] = CellIntnl;
}
hostTmpVector3[i] = cellRank;
}
hostTmpVector2[i] = false;
}
}
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
infoVecs.nodeCellRank = hostTmpVector3;
std::cout << " I am in SceNodes constructor with long input which includes copyParaToGPUConstMem function " << endl ;
copyParaToGPUConstMem();
}
SceNodes::SceNodes(uint maxTotalCellCount, uint maxAllNodePerCell) {
//initControlPara (isStab);
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
readDomainPara();
uint maxTotalNodeCount = maxTotalCellCount * maxAllNodePerCell;
uint maxMembrNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
initNodeAllocPara_M(0, maxTotalCellCount, maxMembrNodeCountPerCell,
maxIntnlNodeCountPerCell);
std::cout << " Number of boundary nodes = " << allocPara_M.bdryNodeCount
<< std::endl;
std::cout << " Max number of cells in domain = "
<< allocPara_M.maxCellCount << std::endl;
std::cout << " Max all nodes per cell = "
<< allocPara_M.maxAllNodePerCell << std::endl;
std::cout << " Max membrane node per cell= "
<< allocPara_M.maxMembrNodePerCell << std::endl;
std::cout << " Max internal node per cell= "
<< allocPara_M.maxIntnlNodePerCell << std::endl;
std::cout << " Max total number of nodes in domain = "
<< allocPara_M.maxTotalNodeCount << std::endl;
allocSpaceForNodes(maxTotalNodeCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
uint nodeRank;
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
nodeRank = tmp % allocPara_M.maxAllNodePerCell;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
//std::cout << "0";
} else {
hostTmpVector[i] = CellIntnl;
//std::cout << "1";
}
}
hostTmpVector2[i] = false;
if (nodeRank == 0) {
//std::cout << std::endl;
}
}
//std::cout << "finished" << std::endl;
//std::cout.flush();
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
thrust::host_vector<int> bondVec(maxTotalNodeCount, -1);
infoVecs.nodeAdhereIndex = bondVec;
infoVecs.membrIntnlIndex = bondVec;
infoVecs.nodeAdhIndxHostCopy = bondVec;
//std::cout << "copy finished!" << std::endl;
//std::cout.flush();
copyParaToGPUConstMem_M();
std::cout << " I am in SceNodes constructor with short input which includes copyParaToGPUConstMem_M function " << endl ;
//std::cout << "at the end" << std::endl;
//std::cout.flush();
}
void SceNodes::copyParaToGPUConstMem() {
readMechPara();
cudaMemcpyToSymbol(sceInterPara, mechPara.sceInterParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraPara, mechPara.sceIntraParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraParaDiv, mechPara.sceIntraParaDivCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(ProfilebeginPos, &allocPara.startPosProfile,
sizeof(uint));
cudaMemcpyToSymbol(ECMbeginPos, &allocPara.startPosECM, sizeof(uint));
cudaMemcpyToSymbol(cellNodeBeginPos, &allocPara.startPosCells,
sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerECM, &allocPara.maxNodePerECM, sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerCell, &allocPara.maxNodeOfOneCell,
sizeof(uint));
cudaMemcpyToSymbol(sceCartPara, mechPara.sceCartParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceProfilePara, mechPara.sceProfileParaCPU,
7 * sizeof(double));
cudaMemcpyToSymbol(sceInterDiffPara, mechPara.sceInterDiffParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceECMPara, mechPara.sceECMParaCPU, 5 * sizeof(double));
}
void SceNodes::copyParaToGPUConstMem_M() {
readParas_M();
cudaMemcpyToSymbol(cellNodeBeginPos_M, &allocPara_M.bdryNodeCount,
sizeof(uint));
cudaMemcpyToSymbol(allNodeCountPerCell_M, &allocPara_M.maxAllNodePerCell,
sizeof(uint));
cudaMemcpyToSymbol(membrThreshold_M, &allocPara_M.maxMembrNodePerCell,
sizeof(uint));
cudaMemcpyToSymbol(bondAdhCriLen_M, &mechPara_M.bondAdhCriLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(bondStiff_M, &mechPara_M.bondStiffCPU_M, sizeof(double));
cudaMemcpyToSymbol(bondStiff_Mitotic, &mechPara_M.bondStiffCPU_Mitotic, sizeof(double));//Ali June 16
cudaMemcpyToSymbol(growthPrgrCriVal_M, &mechPara_M.growthPrgrCriValCPU_M,
sizeof(double));
cudaMemcpyToSymbol(maxAdhBondLen_M, &mechPara_M.maxAdhBondLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(minAdhBondLen_M, &mechPara_M.minAdhBondLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(sceInterBPara_M, mechPara_M.sceInterBParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceInterBPara_Jones_On_M, &mechPara_M.sceInterBParaCPU_Jones_On_M,
sizeof(int)); //Ali
cudaMemcpyToSymbol(sceInterBPara_Jones_M, mechPara_M.sceInterBParaCPU_Jones_M,
3 * sizeof(double)); //Ali
cudaMemcpyToSymbol(sceIntnlBPara_M, mechPara_M.sceIntnlBParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraPara_M, mechPara_M.sceIntraParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraParaDiv_M, mechPara_M.sceIntraParaDivCPU_M,
5 * sizeof(double));
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
domainPara.minX = domainMinX;
domainPara.maxX = domainMaxX;
domainPara.minY = domainMinY;
domainPara.maxY = domainMaxY;
domainPara.gridSpacing = domainBucketSize;
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
domainPara.totalBucketCount = domainPara.XBucketSize
* domainPara.YBucketSize;
auxVecs.keyBegin.resize(domainPara.totalBucketCount);
auxVecs.keyEnd.resize(domainPara.totalBucketCount);
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
int node1 = bucketValuesCPU[i];
int node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
iterationCounter++;
}
}
return result;
}
void SceNodes::readParas_M() {
//////////////////////
//// Block 1 /////////
//////////////////////
double U0_InterB =
globalConfigVars.getConfigValue("SceInterB_U0").toDouble();
double V0_InterB =
globalConfigVars.getConfigValue("SceInterB_V0").toDouble();
double k1_InterB =
globalConfigVars.getConfigValue("SceInterB_k1").toDouble();
double k2_InterB =
globalConfigVars.getConfigValue("SceInterB_k2").toDouble();
double interBEffectiveRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
mechPara_M.sceInterBParaCPU_M[0] = U0_InterB;
mechPara_M.sceInterBParaCPU_M[1] = V0_InterB;
mechPara_M.sceInterBParaCPU_M[2] = k1_InterB;
mechPara_M.sceInterBParaCPU_M[3] = k2_InterB;
mechPara_M.sceInterBParaCPU_M[4] = interBEffectiveRange;
//Ali
//////////////////////
//// Block 1.5 /////////
//////////////////////
int On_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_On").toDouble();
double eps_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_eps").toDouble();
double sig_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_sig").toDouble();
double interBEffectiveRange_Jones = globalConfigVars.getConfigValue(
"InterBEffectiveRange_Jones").toDouble();
mechPara_M.sceInterBParaCPU_Jones_On_M = On_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[0] = eps_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[1] = sig_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[2] = interBEffectiveRange_Jones;
//Ali
//////////////////////
//// Block 2 /////////
//////////////////////
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
mechPara_M.sceIntnlBParaCPU_M[0] = U0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[1] = V0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[2] = k1_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[3] = k2_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
mechPara_M.sceIntraParaCPU_M[0] = U0_Intra;
mechPara_M.sceIntraParaCPU_M[1] = V0_Intra;
mechPara_M.sceIntraParaCPU_M[2] = k1_Intra;
mechPara_M.sceIntraParaCPU_M[3] = k2_Intra;
mechPara_M.sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
mechPara_M.sceIntraParaDivCPU_M[0] = U0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[1] = V0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[2] = k1_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[3] = k2_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
//////////////////////
//// Block 5 /////////
//////////////////////
double bondAdhCriLen =
globalConfigVars.getConfigValue("BondAdhCriLen").toDouble();
mechPara_M.bondAdhCriLenCPU_M = bondAdhCriLen;
double bondStiff = globalConfigVars.getConfigValue("BondStiff").toDouble();
mechPara_M.bondStiffCPU_M = bondStiff;
//Ali June 16
double bondStiff_Mitotic = globalConfigVars.getConfigValue("BondStiff_Mitotic").toDouble();
mechPara_M.bondStiffCPU_Mitotic = bondStiff_Mitotic;
double growthPrgrCriVal = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
mechPara_M.growthPrgrCriValCPU_M = growthPrgrCriVal;
double maxAdhBondLen =
globalConfigVars.getConfigValue("MaxAdhBondLen").toDouble();
mechPara_M.maxAdhBondLenCPU_M = maxAdhBondLen;
double minAdhBondLen =
globalConfigVars.getConfigValue("MinAdhBondLen").toDouble();
mechPara_M.minAdhBondLenCPU_M = minAdhBondLen;
}
void SceNodes::debugNAN() {
uint totalActiveNodeC = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double res = thrust::reduce(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocX.begin() + totalActiveNodeC);
if (isnan(res)) {
std::cout << "fatal error! NAN found" << std::endl;
std::cout.flush();
exit(0);
}
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs_M() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint offSet = allocPara_M.bdryNodeCount;
uint memThreshold = allocPara_M.maxMembrNodePerCell;
int size = bucketKeysCPU.size();
int node1, node2, cellRank1, cellRank2, nodeRank1, nodeRank2;
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
node1 = bucketValuesCPU[i];
node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
cellRank1 = (node1 - offSet) / maxNodePerCell;
nodeRank1 = (node1 - offSet) % maxNodePerCell;
cellRank2 = (node2 - offSet) / maxNodePerCell;
nodeRank2 = (node2 - offSet) % maxNodePerCell;
if (nodeRank1 >= memThreshold && nodeRank2 >= memThreshold
&& cellRank1 == cellRank2) {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
}
iterationCounter++;
}
}
return result;
}
void SceNodes::initValues(std::vector<CVector>& initBdryCellNodePos,
std::vector<CVector>& initProfileNodePos,
std::vector<CVector>& initCartNodePos,
std::vector<CVector>& initECMNodePos,
std::vector<CVector>& initFNMCellNodePos,
std::vector<CVector>& initMXCellNodePos) {
uint FNMNodeCount = initFNMCellNodePos.size();
uint MXNodeCount = initMXCellNodePos.size();
uint beginAddressOfProfile = allocPara.startPosProfile;
uint beginAddressOfCart = allocPara.startPosCart;
// find the begining position of ECM.
uint beginAddressOfECM = allocPara.startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = allocPara.startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCount;
std::vector<double> initBdryCellNodePosX = getArrayXComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
infoVecs.nodeLocX.begin());
std::vector<double> initBdryCellNodePosY = getArrayYComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
infoVecs.nodeLocY.begin());
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initProfileNodePosX = getArrayXComp(initProfileNodePos);
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfProfile);
std::vector<double> initProfileNodePosY = getArrayYComp(initProfileNodePos);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfProfile);
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initCartNodePosX = getArrayXComp(initCartNodePos);
thrust::copy(initCartNodePosX.begin(), initCartNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfCart);
std::vector<double> initCartNodePosY = getArrayYComp(initCartNodePos);
thrust::copy(initCartNodePosY.begin(), initCartNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfCart);
// copy x and y position of nodes of ECM to actual node position.
std::vector<double> initECMNodePosX = getArrayXComp(initECMNodePos);
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfECM);
std::vector<double> initECMNodePosY = getArrayYComp(initECMNodePos);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfECM);
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(infoVecs.nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// copy x and y position of nodes of FNM cells to actual node position.
std::vector<double> initFNMCellNodePosX = getArrayXComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfFNM);
std::vector<double> initFNMCellNodePosY = getArrayYComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfFNM);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfFNM,
infoVecs.nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
std::vector<double> initMXCellNodePosX = getArrayXComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfMX);
std::vector<double> initMXCellNodePosY = getArrayYComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfMX);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfMX,
infoVecs.nodeCellType.begin() + beginAddressOfMX + MXNodeCount, MX);
}
void SceNodes::initValues_M(std::vector<bool>& initIsActive,
std::vector<CVector>& initCellNodePos,
std::vector<SceNodeType>& nodeTypes) {
std::vector<double> initCellNodePosX = getArrayXComp(initCellNodePos);
std::vector<double> initCellNodePosY = getArrayYComp(initCellNodePos);
thrust::copy(initCellNodePosX.begin(), initCellNodePosX.end(),
infoVecs.nodeLocX.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initCellNodePosY.begin(), initCellNodePosY.end(),
infoVecs.nodeLocY.begin() + allocPara_M.bdryNodeCount);
thrust::copy(nodeTypes.begin(), nodeTypes.end(),
infoVecs.nodeCellType.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initIsActive.begin(), initIsActive.end(),
infoVecs.nodeIsActive.begin() + allocPara_M.bdryNodeCount);
}
VtkAnimationData SceNodes::obtainAnimationData(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<double> hostTmpVectorForceX;
thrust::host_vector<double> hostTmpVectorForceY;
thrust::host_vector<double> hostTmpVectorForceZ;
thrust::host_vector<double> hostTmpVectorVelVal;
assert(hostTmpVectorLocX.size() == hostTmpVectorLocY.size());
assert(hostTmpVectorLocY.size() == hostTmpVectorLocZ.size());
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<double> hostTmpVectorNodeStress;
if (aniCri.animationType != CellType) {
hostTmpVectorForceX = infoVecs.nodeInterForceX;
hostTmpVectorForceY = infoVecs.nodeInterForceY;
hostTmpVectorForceZ = infoVecs.nodeInterForceZ;
assert(hostTmpVectorForceX.size() == hostTmpVectorLocX.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceY.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceZ.size());
uint vecSize = hostTmpVectorForceX.size();
hostTmpVectorVelVal.resize(vecSize);
for (uint i = 0; i < vecSize; i++) {
hostTmpVectorVelVal[i] = sqrt(
hostTmpVectorForceX[i] * hostTmpVectorForceX[i]
+ hostTmpVectorForceY[i] * hostTmpVectorForceY[i]
+ hostTmpVectorForceZ[i] * hostTmpVectorForceZ[i]);
}
}
if (aniCri.animationType == Force) {
vtkData.isArrowIncluded = true;
} else {
vtkData.isArrowIncluded = false;
}
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node1Z = hostTmpVectorLocZ[node1Index];
SceNodeType node1T = hostTmpVectorNodeType[node1Index];
uint node1R = hostTmpVectorNodeRank[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
double node2Z = hostTmpVectorLocZ[node2Index];
SceNodeType node2T = hostTmpVectorNodeType[node2Index];
uint node2R = hostTmpVectorNodeRank[node2Index];
if (aniCri.isPairQualify(node1Index, node2Index, node1X, node1Y, node1Z,
node1T, node1R, node2X, node2Y, node2Z, node2T, node2R)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
if (hostTmpVectorVelVal[node1Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node1T);
}
ptAniData.pos = CVector(node1X, node1Y, node1Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
if (hostTmpVectorVelVal[node2Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node2T);
}
ptAniData.pos = CVector(node2X, node2Y, node2Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
uint profileStartIndex = allocPara.startPosProfile;
uint profileEndIndex = profileStartIndex
+ allocPara.currentActiveProfileNodeCount;
for (uint i = profileStartIndex; i < profileEndIndex; i++) {
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
if (i != profileEndIndex - 1) {
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
uint cartStartIndex = allocPara.startPosCart;
uint cartEndIndex = cartStartIndex + allocPara.maxCartNodeCount;
for (uint i = cartStartIndex; i < cartEndIndex; i++) {
bool isActive = infoVecs.nodeIsActive[i];
if (!isActive) {
continue;
}
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
bool isNextActive;
if (i == cartEndIndex - 1) {
isNextActive = false;
} else {
isNextActive = infoVecs.nodeIsActive[i + 1];
}
if (isNextActive) {
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
return vtkData;
}
// TODO
VtkAnimationData SceNodes::obtainAnimationData_M(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs_M();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<bool> hostIsActiveVec = infoVecs.nodeIsActive;
thrust::host_vector<int> hostBondVec = infoVecs.nodeAdhereIndex;
thrust::host_vector<double> hostMembrTenMag = infoVecs.membrTensionMag;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
uint activeCellCount = allocPara_M.currentActiveCellCount;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_M.maxMembrNodePerCell;
uint beginIndx = allocPara_M.bdryNodeCount;
//uint endIndx = beginIndx + activeCellCount * maxNodePerCell;
//uint cellRank1, nodeRank1, cellRank2, nodeRank2;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
vtkData.bondsInfo = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node1X, node1Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node2X, node2Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
return vtkData;
}
void SceNodes::findBucketBounds() {
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds_M() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds3D() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::prepareSceForceComputation() {
buildBuckets2D();
extendBuckets2D();
findBucketBounds();
}
void SceNodes::prepareSceForceComputation_M() {
buildBuckets2D_M();
extendBuckets2D_M();
findBucketBounds_M();
}
void SceNodes::prepareSceForceComputation3D() {
buildBuckets3D();
extendBuckets3D();
findBucketBounds3D();
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<SceNodeType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % allocPara.maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / allocPara.maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(),
infoVecs.nodeCellType.begin()))
+ shiftStartPosNewCell);
// total number of cells has increased.
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes;
if (controlPara.simuType != Disc_M) {
totalActiveNodes = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
} else {
totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
}
auxVecs.bucketKeys.resize(totalActiveNodes);
auxVecs.bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(), auxVecs.bucketKeys.end(),
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.end(), UINT_MAX);
auxVecs.bucketKeys.erase(auxVecs.bucketKeys.end() - numberOfOutOfRange,
auxVecs.bucketKeys.end());
auxVecs.bucketValues.erase(auxVecs.bucketValues.end() - numberOfOutOfRange,
auxVecs.bucketValues.end());
}
void SceNodes::buildBuckets2D_M() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
void SceNodes::buildBuckets3D() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
BucketIndexer3D(domainPara.minX, domainPara.maxX, domainPara.minY,
domainPara.maxY, domainPara.minZ, domainPara.maxZ,
domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
double computeDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraForceDiv(double& xPos, double& yPos, double& zPos,
double& xPos2, double& yPos2, double& zPos2, double& growPro,
double& xRes, double& yRes, double& zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
if (growPro > sceIntraParaDiv[4]) {
double intraPara0 = growPro * (sceIntraParaDiv[0])
+ (1.0 - growPro) * sceIntraPara[0];
double intraPara1 = growPro * (sceIntraParaDiv[1])
+ (1.0 - growPro) * sceIntraPara[1];
double intraPara2 = growPro * (sceIntraParaDiv[2])
+ (1.0 - growPro) * sceIntraPara[2];
double intraPara3 = growPro * (sceIntraParaDiv[3])
+ (1.0 - growPro) * sceIntraPara[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraDiv_M(double& xPos, double& yPos, double& xPos2,
double& yPos2, double& growPro, double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (growPro > growthPrgrCriVal_M) {
if (linkLength > sceIntraParaDiv_M[4]) {
forceValue = 0;
} else {
double percent = (growPro - growthPrgrCriVal_M)
/ (1.0 - growthPrgrCriVal_M);
double intraPara0 = percent * (sceIntraParaDiv_M[0])
+ (1.0 - percent) * sceIntraPara_M[0];
double intraPara1 = percent * (sceIntraParaDiv_M[1])
+ (1.0 - percent) * sceIntraPara_M[1];
double intraPara2 = percent * (sceIntraParaDiv_M[2])
+ (1.0 - percent) * sceIntraPara_M[2];
double intraPara3 = percent * (sceIntraParaDiv_M[3])
+ (1.0 - percent) * sceIntraPara_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength > sceIntraPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara_M[0] / sceIntraPara_M[2]
* exp(-linkLength / sceIntraPara_M[2])
+ sceIntraPara_M[1] / sceIntraPara_M[3]
* exp(-linkLength / sceIntraPara_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddIntraB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceIntnlBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntnlBPara_M[0] / sceIntnlBPara_M[2]
* exp(-linkLength / sceIntnlBPara_M[2])
+ sceIntnlBPara_M[1] / sceIntnlBPara_M[3]
* exp(-linkLength / sceIntnlBPara_M[3]);
}
//if (forceValue > 0) {
// forceValue = 0;
//}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddInter_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceInterBPara_M[0] / sceInterBPara_M[2]
* exp(-linkLength / sceInterBPara_M[2])
+ sceInterBPara_M[1] / sceInterBPara_M[3]
* exp(-linkLength / sceInterBPara_M[3]);
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calAndAddInter_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_Jones_M[2]) {
forceValue = 0;
} else {
forceValue =24*sceInterBPara_Jones_M[0]/linkLength*pow(sceInterBPara_Jones_M[1]/linkLength,6)*
( 1.0-2 *pow(sceInterBPara_Jones_M[1]/linkLength,6) ) ;
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddInterForceDisc(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double& interForceX, double& interForceY,
double& interForceZ) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
double fX = forceValue * (xPos2 - xPos) / linkLength;
double fY = forceValue * (yPos2 - yPos) / linkLength;
double fZ = forceValue * (zPos2 - zPos) / linkLength;
xRes = xRes + fX;
yRes = yRes + fY;
zRes = zRes + fZ;
interForceX = interForceX + fX;
interForceY = interForceY + fY;
interForceZ = interForceZ + fZ;
}
__device__
void calculateAndAddCartForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceCartPara[4]) {
forceValue = 0;
} else {
forceValue = -sceCartPara[0] / sceCartPara[2]
* exp(-linkLength / sceCartPara[2])
+ sceCartPara[1] / sceCartPara[3]
* exp(-linkLength / sceCartPara[3]);
if (linkLength > 1.0e-12) {
//double dotProduct = (xPos2 - xPos) / linkLength * cartGrowDirVec[0]
// + (yPos2 - yPos) / linkLength * cartGrowDirVec[1]
// + (zPos2 - zPos) / linkLength * cartGrowDirVec[2];
//forceValue = forceValue * dotProduct;
// this is just a temperary solution -- the direction should not be fixed.
xRes = xRes - forceValue * cartGrowDirVec[0];
yRes = yRes - forceValue * cartGrowDirVec[1];
zRes = zRes - forceValue * cartGrowDirVec[2];
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
if (forceValue > 0) {
//forceValue = forceValue * 0.01;
forceValue = 0;
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.2;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos
|| nodeGlobalRank2 < cellNodeBeginPos) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
//Ali
__device__
bool Is_Lennard_Jones() {
if (sceInterBPara_Jones_On_M==1) {
return true ;
}
else {
return false ;
}
}
__device__
bool isSameCell_m(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos_M) / allNodeCountPerCell_M
== (nodeGlobalRank2 - cellNodeBeginPos_M) / allNodeCountPerCell_M) {
return true;
} else {
return false;
}
}
__device__
bool bothInternal(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 >= membrThreshold_M && nodeRank2 >= membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembr(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembrDiffCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 == cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
__device__
bool isNodeOnMembrane(uint nodeGlobalRank) {
uint nodeRank = (nodeGlobalRank - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeGlobalRank >= cellNodeBeginPos_M && nodeRank < membrThreshold_M){
return true;
} else{
return false;
}
}
*/
__device__
bool sameCellMemIntnl(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 != cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if ((nodeRank1 < membrThreshold_M && nodeRank2 >= membrThreshold_M)
|| (nodeRank2 < membrThreshold_M && nodeRank1 >= membrThreshold_M)) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(SceNodeType &type1, SceNodeType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void attemptToAdhere(bool& isSuccess, uint& index, double& dist,
uint& nodeRank2, double& xPos1, double& yPos1, double& xPos2,
double& yPos2) {
double length = computeDist2D(xPos1, yPos1, xPos2, yPos2);
if (length <= bondAdhCriLen_M) {
if (isSuccess) {
if (length < dist) {
dist = length;
index = nodeRank2;
}
} else {
isSuccess = true;
index = nodeRank2;
dist = length;
}
}
}
__device__
void handleAdhesionForce_M(int& adhereIndex, double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
if (curLen > maxAdhBondLen_M) {
adhereIndex = -1;
return;
} else {
if (curLen > minAdhBondLen_M) {
double forceValue = (curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = xRes + forceValue * (curAdherePosX - xPos) / curLen;
yRes = yRes + forceValue * (curAdherePosY - yPos) / curLen;
}
}
}
//Ali June 16
__device__
double getMitoticAdhCoef(double& growProg, double& growProgNeigh){
double alpha = 1.0;
if (growProg > growthPrgrCriVal_M && growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - ( 0.5*(growProg+growProgNeigh)-growthPrgrCriVal_M )/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProg > growthPrgrCriVal_M){
alpha = 1.0 - (growProg-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - (growProgNeigh-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
return alpha;
}
__device__
void calculateForceBetweenLinkNodes(double &xLoc, double &yLoc, double &zLoc,
double &xLocLeft, double &yLocLeft, double &zLocLeft, double &xLocRight,
double &yLocRight, double &zLocRight, double &xVel, double &yVel,
double &zVel) {
double linkLengthLeft = computeDist(xLoc, yLoc, zLoc, xLocLeft, yLocLeft,
zLocLeft);
double forceValueLeft = sceProfilePara[5]
* (linkLengthLeft - sceProfilePara[6]);
xVel = xVel + forceValueLeft * (xLocLeft - xLoc) / linkLengthLeft;
yVel = yVel + forceValueLeft * (yLocLeft - yLoc) / linkLengthLeft;
zVel = zVel + forceValueLeft * (zLocLeft - zLoc) / linkLengthLeft;
double linkLengthRight = computeDist(xLoc, yLoc, zLoc, xLocRight, yLocRight,
zLocRight);
double forceValueRight = sceProfilePara[5]
* (linkLengthRight - sceProfilePara[6]);
xVel = xVel + forceValueRight * (xLocRight - xLoc) / linkLengthRight;
yVel = yVel + forceValueRight * (yLocRight - yLoc) / linkLengthRight;
zVel = zVel + forceValueRight * (zLocRight - zLoc) / linkLengthRight;
}
__device__
void handleSceForceNodesBasic(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress) {
if (isSameCell(nodeRank1, nodeRank2)) {
calculateAndAddIntraForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
__device__
void handleSceForceNodesDisc(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double& interForceX,
double& interForceY, double& interForceZ, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress,
double* _nodeGrowProAddr) {
if (isSameCell(nodeRank1, nodeRank2)) {
calAndAddIntraForceDiv(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2],
_nodeGrowProAddr[nodeRank2], xRes, yRes, zRes);
} else {
calAndAddInterForceDisc(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes, interForceX, interForceY, interForceZ);
}
}
__device__
void handleSceForceNodesDisc_M(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes,
double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeGrowProAddr) {
if (isSameCell_m(nodeRank1, nodeRank2)) {
if (bothInternal(nodeRank1, nodeRank2)) {
// both nodes are internal type.
calAndAddIntraDiv_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeGrowProAddr[nodeRank2],
xRes, yRes);
} else if (bothMembr(nodeRank1, nodeRank2)) {
// both nodes epithilium type. no sce force applied.
// nothing to do here.
} else {
// one node is epithilium type the other is internal type.
calAndAddIntraB_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
} else {
if (bothMembr(nodeRank1, nodeRank2)) {
calAndAddInter_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = auxVecs.bucketValues.size();
auxVecs.bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
auxVecs.bucketValuesIncludingNeighbor.resize(
valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), UINT_MAX);
int sizeBeforeShrink = auxVecs.bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(),
auxVecs.bucketValuesIncludingNeighbor.begin());
auxVecs.bucketKeysExpanded.erase(
auxVecs.bucketKeysExpanded.begin() + numberInsideRange,
auxVecs.bucketKeysExpanded.end());
auxVecs.bucketValuesIncludingNeighbor.erase(
auxVecs.bucketValuesIncludingNeighbor.begin() + numberInsideRange,
auxVecs.bucketValuesIncludingNeighbor.end());
}
void SceNodes::extendBuckets2D_M() {
endIndxExt_M = endIndx_M * 9;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(9);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::extendBuckets3D() {
endIndxExt_M = endIndx_M * 27;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(27);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M; // this is NOT numerical addition!
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NgbrFunc3D(domainPara.XBucketSize, domainPara.YBucketSize,
domainPara.ZBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::applySceForcesBasic() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceBasic(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress));
}
void SceNodes::applySceForcesDisc() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceDisc(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeGrowProAddr));
}
void SceNodes::applySceForcesDisc_M() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
int* membrIntnlAddress = thrust::raw_pointer_cast(
&infoVecs.membrIntnlIndex[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin() + endIndx_M),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin() + endIndx_M))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()))),
AddForceDisc_M(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeAdhIdxAddress, membrIntnlAddress, nodeGrowProAddr));
}
const SceDomainPara& SceNodes::getDomainPara() const {
return domainPara;
}
void SceNodes::setDomainPara(const SceDomainPara& domainPara) {
this->domainPara = domainPara;
}
const NodeAllocPara& SceNodes::getAllocPara() const {
return allocPara;
}
void SceNodes::setAllocPara(const NodeAllocPara& allocPara) {
this->allocPara = allocPara;
}
const NodeAuxVecs& SceNodes::getAuxVecs() const {
return auxVecs;
}
void SceNodes::setAuxVecs(const NodeAuxVecs& auxVecs) {
this->auxVecs = auxVecs;
}
NodeInfoVecs& SceNodes::getInfoVecs() {
return infoVecs;
}
std::vector<std::vector<int> > SceNodes::obtainLabelMatrix(
PixelizePara& pixelPara) {
std::vector<std::vector<int> > result;
std::vector<NodeWithLabel> nodeLabels;
ResAnalysisHelper resHelper;
resHelper.setPixelPara(pixelPara);
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<uint> hostTmpVectorIsActive = infoVecs.nodeIsActive;
uint startIndex = allocPara.startPosCells;
uint endIndex = startIndex
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
for (uint i = startIndex; i < endIndex; i++) {
if (hostTmpVectorIsActive[i] == true) {
NodeWithLabel nodeLabel;
nodeLabel.cellRank = hostTmpVectorNodeRank[i];
nodeLabel.position = CVector(hostTmpVectorLocX[i],
hostTmpVectorLocY[i], hostTmpVectorLocZ[i]);
nodeLabels.push_back(nodeLabel);
}
}
result = resHelper.outputLabelMatrix(nodeLabels);
return result;
}
void SceNodes::initControlPara(bool isStab) {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
controlPara.controlSwitchs.outputBmpImg = globalConfigVars.getSwitchState(
"Switch_OutputBMP");
controlPara.controlSwitchs.outputLabelMatrix =
globalConfigVars.getSwitchState("Switch_OutputLabelMatrix");
controlPara.controlSwitchs.outputStat = globalConfigVars.getSwitchState(
"Switch_OutputStat");
controlPara.controlSwitchs.outputVtkFile = globalConfigVars.getSwitchState(
"Switch_OutputVtk");
if (isStab) {
controlPara.controlSwitchs.stab = ON;
} else {
controlPara.controlSwitchs.stab = OFF;
}
}
void SceNodes::sceForcesPerfTesting() {
prepareSceForceComputation();
applySceForcesBasic();
}
void SceNodes::sceForcesPerfTesting_M() {
prepareSceForceComputation_M();
applySceForcesBasic_M();
}
void SceNodes::applySceForcesBasic_M() {
}
void SceNodes::sceForcesDisc() {
prepareSceForceComputation();
applySceForcesDisc();
}
void SceNodes::sceForcesDisc_M() {
#ifdef DebugMode
cudaEvent_t start1, start2, start3, stop;
float elapsedTime1, elapsedTime2, elapsedTime3;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start3);
cudaEventCreate(&stop);
cudaEventRecord(start1, 0);
#endif
cout << " confirm --- 1 ---" << endl;
cout.flush();
prepareSceForceComputation_M();
#ifdef DebugMode
cudaEventRecord(start2, 0);
cudaEventSynchronize(start2);
cudaEventElapsedTime(&elapsedTime1, start1, start2);
#endif
cout << " --- 2 ---" << endl;
cout.flush();
applySceForcesDisc_M();
#ifdef DebugMode
cudaEventRecord(start3, 0);
cudaEventSynchronize(start3);
cudaEventElapsedTime(&elapsedTime2, start2, start3);
#endif
cout << " --- 3 ---" << endl;
cout.flush();
processMembrAdh_M();
cout << " --- 4 ---" << endl;
cout.flush();
copyExtForces_M();//AAMIRI
#ifdef DebugMode
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime3, start3, stop);
std::cout << "time spent in Node logic: " << elapsedTime1 << " "
<< elapsedTime2 << " " << elapsedTime3 << std::endl;
#endif
}
double SceNodes::getMaxEffectiveRange() {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
SimulationType type = parseTypeFromConfig(simuTypeConfigValue);
if (type != Disc_M) {
double interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
double maxEffectiveRange = interLinkEffectiveRange;
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
if (intraLinkEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = intraLinkEffectiveRange;
}
double cartEffectiveRange = 0;
// cartilage effective range does not apply for other types of simulation.
try {
cartEffectiveRange = globalConfigVars.getConfigValue(
"CartForceEffectiveRange").toDouble();
} catch (SceException &exce) {
}
if (cartEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = cartEffectiveRange;
}
return maxEffectiveRange;
} else {
double membrMembrEffRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
double membrIntnlEffRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
double intnlIntnlEffRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
double intnlDivEffRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
double maxEffRange = 0;
std::vector<double> ranges;
ranges.push_back(membrMembrEffRange);
// all these are now
//ranges.push_back(membrIntnlEffRange);
//ranges.push_back(intnlIntnlEffRange);
//ranges.push_back(intnlDivEffRange);
maxEffRange = *std::max_element(ranges.begin(), ranges.end());
return maxEffRange;
}
}
void SceNodes::setInfoVecs(const NodeInfoVecs& infoVecs) {
this->infoVecs = infoVecs;
}
void SceNodes::allocSpaceForNodes(uint maxTotalNodeCount) {
infoVecs.nodeLocX.resize(maxTotalNodeCount);
infoVecs.nodeLocY.resize(maxTotalNodeCount);
infoVecs.nodeLocZ.resize(maxTotalNodeCount);
infoVecs.nodeVelX.resize(maxTotalNodeCount);
infoVecs.nodeVelY.resize(maxTotalNodeCount);
infoVecs.nodeVelZ.resize(maxTotalNodeCount);
infoVecs.nodeF_MI_M_x.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_y.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_T.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_N.resize(maxTotalNodeCount); //Ali
infoVecs.nodeVelTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeVelNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeCurvature.resize(maxTotalNodeCount, 0.0);//AAMIRI
infoVecs.nodeExtForceX.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceY.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeMaxForce.resize(maxTotalNodeCount);
infoVecs.nodeCellType.resize(maxTotalNodeCount);
infoVecs.nodeCellRank.resize(maxTotalNodeCount);
infoVecs.nodeIsActive.resize(maxTotalNodeCount);
if (controlPara.simuType == Disc
|| controlPara.simuType == SingleCellTest) {
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.nodeInterForceX.resize(maxTotalNodeCount);
infoVecs.nodeInterForceY.resize(maxTotalNodeCount);
infoVecs.nodeInterForceZ.resize(maxTotalNodeCount);
}
if (controlPara.simuType == Disc_M) {
infoVecs.nodeAdhereIndex.resize(maxTotalNodeCount);
infoVecs.nodeAdhIndxHostCopy.resize(maxTotalNodeCount);
infoVecs.membrIntnlIndex.resize(maxTotalNodeCount);
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.membrTensionMag.resize(maxTotalNodeCount, 0);
infoVecs.membrTenMagRi.resize(maxTotalNodeCount, 0);
infoVecs.membrDistToRi.resize(maxTotalNodeCount, 0);//AAMIRI
infoVecs.membrLinkRiMidX.resize(maxTotalNodeCount, 0);
infoVecs.membrLinkRiMidY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightY.resize(maxTotalNodeCount, 0);
auxVecs.bucketKeys.resize(maxTotalNodeCount);
auxVecs.bucketValues.resize(maxTotalNodeCount);
auxVecs.bucketKeysExpanded.resize(maxTotalNodeCount * 9);
auxVecs.bucketValuesIncludingNeighbor.resize(maxTotalNodeCount * 9);
}
}
void SceNodes::initNodeAllocPara(uint totalBdryNodeCount,
uint maxProfileNodeCount, uint maxCartNodeCount, uint maxTotalECMCount,
uint maxNodeInECM, uint maxTotalCellCount, uint maxNodeInCell) {
allocPara.maxCellCount = maxTotalCellCount;
allocPara.maxNodeOfOneCell = maxNodeInCell;
allocPara.maxNodePerECM = maxNodeInECM;
allocPara.maxECMCount = maxTotalECMCount;
allocPara.maxProfileNodeCount = maxProfileNodeCount;
allocPara.maxCartNodeCount = maxCartNodeCount;
allocPara.currentActiveProfileNodeCount = 0;
allocPara.currentActiveCartNodeCount = 0;
allocPara.BdryNodeCount = totalBdryNodeCount;
allocPara.currentActiveCellCount = 0;
allocPara.maxTotalECMNodeCount = allocPara.maxECMCount
* allocPara.maxNodePerECM;
allocPara.currentActiveECM = 0;
allocPara.maxTotalCellNodeCount = maxTotalCellCount
* allocPara.maxNodeOfOneCell;
allocPara.startPosProfile = totalBdryNodeCount;
allocPara.startPosCart = allocPara.startPosProfile
+ allocPara.maxProfileNodeCount;
allocPara.startPosECM = allocPara.startPosCart + allocPara.maxCartNodeCount;
allocPara.startPosCells = allocPara.startPosECM
+ allocPara.maxTotalECMNodeCount;
}
void SceNodes::initNodeAllocPara_M(uint totalBdryNodeCount,
uint maxTotalCellCount, uint maxEpiNodePerCell,
uint maxInternalNodePerCell) {
allocPara_M.bdryNodeCount = totalBdryNodeCount;
allocPara_M.currentActiveCellCount = 0;
allocPara_M.maxCellCount = maxTotalCellCount;
allocPara_M.maxAllNodePerCell = maxEpiNodePerCell + maxInternalNodePerCell;
allocPara_M.maxMembrNodePerCell = maxEpiNodePerCell;
allocPara_M.maxIntnlNodePerCell = maxInternalNodePerCell;
allocPara_M.maxTotalNodeCount = allocPara_M.maxAllNodePerCell
* allocPara_M.maxCellCount;
}
void SceNodes::removeNodes(int cellRank, vector<uint> &removeSeq) {
uint cellBeginIndex = allocPara.startPosCells
+ cellRank * allocPara.maxNodeOfOneCell;
uint cellEndIndex = cellBeginIndex + allocPara.maxNodeOfOneCell;
thrust::host_vector<double> cellXCoords(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYCoords(allocPara.maxNodeOfOneCell);
thrust::copy(infoVecs.nodeLocX.begin() + cellBeginIndex,
infoVecs.nodeLocX.begin() + cellEndIndex, cellXCoords.begin());
thrust::copy(infoVecs.nodeLocY.begin() + cellBeginIndex,
infoVecs.nodeLocY.begin() + cellEndIndex, cellYCoords.begin());
vector<bool> isRemove(allocPara.maxNodeOfOneCell, false);
/*
std::cout << "before, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXCoords[i] << " ";
}
std::cout << "]" << endl;
std::cout << "before, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYCoords[i] << " ";
}
std::cout << "]" << endl;
*/
for (uint i = 0; i < removeSeq.size(); i++) {
isRemove[removeSeq[i]] = true;
}
thrust::host_vector<double> cellXRemoved(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYRemoved(allocPara.maxNodeOfOneCell);
uint curIndex = 0;
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
if (isRemove[i] == false) {
cellXRemoved[curIndex] = cellXCoords[i];
cellYRemoved[curIndex] = cellYCoords[i];
curIndex++;
}
}
/*
std::cout << "after, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXRemoved[i] << " ";
}
std::cout << "]" << endl;
std::cout << "after, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYRemoved[i] << " ";
}
std::cout << "]" << endl;
*/
thrust::copy(cellXRemoved.begin(), cellXRemoved.end(),
infoVecs.nodeLocX.begin() + cellBeginIndex);
thrust::copy(cellYRemoved.begin(), cellYRemoved.end(),
infoVecs.nodeLocY.begin() + cellBeginIndex);
}
void SceNodes::processMembrAdh_M() {
keepAdhIndxCopyInHost_M();
applyMembrAdh_M();
removeInvalidPairs_M();
}
void SceNodes::keepAdhIndxCopyInHost_M() {
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::copy(infoVecs.nodeAdhereIndex.begin(),
infoVecs.nodeAdhereIndex.begin() + maxTotalNode,
infoVecs.nodeAdhIndxHostCopy.begin());
}
void SceNodes::removeInvalidPairs_M() {
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
uint curActiveNodeCt = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<int> iBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin()))
+ curActiveNodeCt, infoVecs.nodeAdhereIndex.begin(),
AdjustAdh(nodeAdhIdxAddress));
}
void SceNodes::applyMembrAdh_M() {
thrust::counting_iterator<uint> iBegin(0);
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdh(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr));
}
//AAMIRI
void SceNodes::copyExtForces_M(){
thrust::copy(infoVecs.nodeVelX.begin(), infoVecs.nodeVelX.end(),
infoVecs.nodeExtForceX.begin());
thrust::copy(infoVecs.nodeVelY.begin(), infoVecs.nodeVelY.end(),
infoVecs.nodeExtForceY.begin());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.