hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
408cb2885a8a9071d67d9636fbf058b21107c4e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "thinning.h"
#include "cuda_includes.h"
#include "neighbor.cuh"
#include "attachment.cuh"
#include "clique.cuh"
#include "thinning_details.cuh"
namespace thin
{
void initDevice()
{
nb::initDevice();
attach::initDevice();
clique::initDevice();
details::_setDeviceInited();
}
void shutdownDevice()
{
clique::shutdownDevice();
attach::shutdownDevice();
nb::shutdownDevice();
}
static unsigned _numThreads = 128U;
void setNumThreadsPerBlock(unsigned num) { _numThreads = num; }
unsigned numThreadsPerBlock() { return _numThreads; }
void isthmusSymmetricThinning(const std::vector<IjkType>& compactIjkVec,/* const std::vector<ObjIdType>& voxelIdVec,*/ std::vector<IjkType>& D_XK, const IjkType& size3D, int maxIter)
{
// using namespace clique;
using namespace details;
namespace cp = clique::_private;
DevDataPack::InitParams packInitParams;
packInitParams.arrSize = compactIjkVec.size();
packInitParams.size3D = size3D;
packInitParams.useBirth = false;
packInitParams.useVoxelID = false;
DevDataPack thinData(packInitParams);
thinData.alloc();
checkCudaErrors(hipMemset(thinData.recBitsArr, 0x01, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
// IjkType* d_compactIjkArr;
// checkCudaErrors(hipMalloc(&(thinData.compactIjkArr), sizeof(IjkType) * thinData.arrSize));
checkCudaErrors(hipMemcpy(thinData.compactIjkArr, compactIjkVec.data(), sizeof(IjkType) * thinData.arrSize, hipMemcpyHostToDevice));
unsigned curIter = 1;
unsigned lastIterSize = thinData.arrSize;
dim3 threadsDim(_numThreads, 1U, 1U);
dim3 blocksDim((thinData.arrSize + threadsDim.x - 1U) / threadsDim.x, 1U, 1U);
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
while ((maxIter < 0) || (maxIter > 0 && curIter <= maxIter))
{
std::cout << "Current iteration: " << curIter
<< ", size: " << lastIterSize << std::endl;
clique::crucialIsthmus(thinData, blocksDim, threadsDim);
unsigned curIterSize = cp::_countBit(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
if (curIterSize == lastIterSize) break;
hipLaunchKernelGGL(( cp::_assignKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( cp::_unionKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.recBitsArr, thinData.arrSize, REC_BIT_Z, REC_BIT_K);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
thinData.arrSize = cp::_shrinkArrs(thinData, blocksDim, threadsDim);
assert(thinData.arrSize == curIterSize);
// To-Do:
// 1. clean up the d_A/B_recBitsArr accordingly
// 2. re-calculate blocksDim
checkCudaErrors(hipFree(thinData.A_recBitsArr));
checkCudaErrors(hipFree(thinData.B_recBitsArr));
checkCudaErrors(hipMalloc(&(thinData.A_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMalloc(&(thinData.B_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
blocksDim.x = (thinData.arrSize + threadsDim.x - 1U) / threadsDim.x;
blocksDim.y = 1U;
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
lastIterSize = curIterSize;
++curIter;
}
D_XK.clear();
D_XK.resize(thinData.arrSize);
checkCudaErrors(hipMemcpy(D_XK.data(), thinData.compactIjkArr, sizeof(IjkType) * thinData.arrSize, hipMemcpyDeviceToHost));
thinData.dispose();
}
void persistenceIsthmusThinningCore(details::DevDataPack& thinData, unsigned curIter, unsigned p, int maxIter)
{
using namespace details;
namespace cp = clique::_private;
unsigned lastIterSize = thinData.arrSize;
dim3 threadsDim(_numThreads, 1U, 1U);
dim3 blocksDim((thinData.arrSize + threadsDim.x - 1U) / threadsDim.x, 1U, 1U);
auto TIMER1 = std::chrono::high_resolution_clock::now();
auto TIMER2 = std::chrono::high_resolution_clock::now();
auto TIMER3 = std::chrono::high_resolution_clock::now();
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
while ((maxIter < 0) || (maxIter > 0 && curIter <= maxIter))
{
std::cout << "Current iteration: " << curIter
<< ", size: " << lastIterSize << std::endl;
TIMER2 = std::chrono::high_resolution_clock::now();
// crucialIsthmus(grid3D, Kset, D_XK, I_XK1);
// crucialIsthmusCUDA(compactFlatIjkVec, flatMngr, recBitsVec, numThreads);
TIMER3 = std::chrono::high_resolution_clock::now();
clique::crucialIsthmus(thinData, blocksDim, threadsDim);
TIMER_END(">>> persistenceIsthmusThinningCore::crucialIsthmus()", TIMER3);
unsigned curIterSize = cp::_countBit(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
if (curIterSize == lastIterSize) break;
TIMER3 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( cp::_assignKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_assignKern()", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( cp::_updateBirthKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.birthArr, thinData.recBitsArr, thinData.arrSize, curIter);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_updateBirthKern()", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( cp::_unionKsetByBirth), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.birthArr, thinData.arrSize, curIter, p);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_unionKsetByBirth()", TIMER3);
thinData.arrSize = cp::_shrinkArrs(thinData, blocksDim, threadsDim);
assert(thinData.arrSize == curIterSize);
// To-Do:
// 1. clean up the d_A/B_recBitsArr accordingly
// 2. re-calculate blocksDim
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipFree(thinData.A_recBitsArr));
checkCudaErrors(hipFree(thinData.B_recBitsArr));
TIMER_END(">>> persistenceIsthmusThinningCore::hipFree(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMalloc(&(thinData.A_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMalloc(&(thinData.B_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>> persistenceIsthmusThinningCore::hipMalloc(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>> persistenceIsthmusThinningCore::hipMemset(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
blocksDim.x = (thinData.arrSize + threadsDim.x - 1U) / threadsDim.x;
blocksDim.y = 1U;
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
TIMER_END(">>> persistenceIsthmusThinningCore::while blocks", TIMER3);
lastIterSize = curIterSize;
++curIter;
TIMER_END(">>> persistenceIsthmusThinningCore::while iter", TIMER2);
}
TIMER_END(">>> persistenceIsthmusThinningCore::while all", TIMER1);
}
void persistenceIsthmusThinning(const std::vector<IjkType>& compactIjkVec, const std::vector<ObjIdType>& voxelIdVec, std::vector<IjkType>& D_XK,
const IjkType& size3D, unsigned p, int maxIter)
{
// using namespace clique;
using namespace details;
namespace cp = clique::_private;
// ThinningData thinData(compactIjkVec.size(), size3D);
DevDataPack::InitParams packInitParams;
packInitParams.arrSize = compactIjkVec.size();
packInitParams.size3D = size3D;
packInitParams.useBirth = true;
packInitParams.useVoxelID = voxelIdVec.size() > 0;
DevDataPack thinData(packInitParams);
thinData.alloc();
checkCudaErrors(hipMemset(thinData.recBitsArr, 0x01, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemcpy(thinData.compactIjkArr, compactIjkVec.data(), sizeof(IjkType) * thinData.arrSize, hipMemcpyHostToDevice));
if (thinData.useVoxelID())
{
checkCudaErrors(hipMemcpy(thinData.voxelIdArr, voxelIdVec.data(), sizeof(ObjIdType) * thinData.arrSize, hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemset(thinData.birthArr, 0, sizeof(unsigned) * thinData.arrSize));
unsigned curIter = 0;
persistenceIsthmusThinningCore(thinData, curIter, p, maxIter);
D_XK.clear();
D_XK.resize(thinData.arrSize);
checkCudaErrors(hipMemcpy(D_XK.data(), thinData.compactIjkArr, sizeof(IjkType) * thinData.arrSize, hipMemcpyDeviceToHost));
thinData.dispose();
}
void persistenceIsthmusThinning(const std::vector<IjkType>& compactIjkVec, std::vector<IjkType>& D_XK, const IjkType& size3D, unsigned p, int maxIter)
{
std::vector<ObjIdType> fakeVoxelIdVec;
persistenceIsthmusThinning(compactIjkVec, fakeVoxelIdVec, D_XK, size3D, p, maxIter);
}
void oneChunkThinning(details::DevDataPack& thinData, unsigned curIter, unsigned dim,
unsigned p, const dim3& blocksDim, const dim3& threadsDim)
{
using namespace thin::clique;
namespace cp = thin::clique::_private;
using namespace details;
if (dim == 3U)
{
// Y <- K
hipLaunchKernelGGL(( cp::_assignKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_K, REC_BIT_Y);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Z <- {}
hipLaunchKernelGGL(( cp::_clearKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
dimCrucialIsthmus<D3CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 2U)
{
dimCrucialIsthmus<D2CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 1U)
{
dimCrucialIsthmus<D1CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 0)
{
dimCrucialIsthmus<D0CliqueChecker>(thinData, blocksDim, threadsDim);
hipLaunchKernelGGL(( cp::_assignKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( cp::_updateBirthKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.birthArr, thinData.recBitsArr, thinData.arrSize, curIter);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( cp::_unionKsetByBirth), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.birthArr, thinData.arrSize, curIter, p);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( cp::_clearKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( cp::_clearKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
}
}
}; // namespace thin;
| 408cb2885a8a9071d67d9636fbf058b21107c4e3.cu | #include "thinning.h"
#include "cuda_includes.h"
#include "neighbor.cuh"
#include "attachment.cuh"
#include "clique.cuh"
#include "thinning_details.cuh"
namespace thin
{
void initDevice()
{
nb::initDevice();
attach::initDevice();
clique::initDevice();
details::_setDeviceInited();
}
void shutdownDevice()
{
clique::shutdownDevice();
attach::shutdownDevice();
nb::shutdownDevice();
}
static unsigned _numThreads = 128U;
void setNumThreadsPerBlock(unsigned num) { _numThreads = num; }
unsigned numThreadsPerBlock() { return _numThreads; }
void isthmusSymmetricThinning(const std::vector<IjkType>& compactIjkVec,/* const std::vector<ObjIdType>& voxelIdVec,*/ std::vector<IjkType>& D_XK, const IjkType& size3D, int maxIter)
{
// using namespace clique;
using namespace details;
namespace cp = clique::_private;
DevDataPack::InitParams packInitParams;
packInitParams.arrSize = compactIjkVec.size();
packInitParams.size3D = size3D;
packInitParams.useBirth = false;
packInitParams.useVoxelID = false;
DevDataPack thinData(packInitParams);
thinData.alloc();
checkCudaErrors(cudaMemset(thinData.recBitsArr, 0x01, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
// IjkType* d_compactIjkArr;
// checkCudaErrors(cudaMalloc(&(thinData.compactIjkArr), sizeof(IjkType) * thinData.arrSize));
checkCudaErrors(cudaMemcpy(thinData.compactIjkArr, compactIjkVec.data(), sizeof(IjkType) * thinData.arrSize, cudaMemcpyHostToDevice));
unsigned curIter = 1;
unsigned lastIterSize = thinData.arrSize;
dim3 threadsDim(_numThreads, 1U, 1U);
dim3 blocksDim((thinData.arrSize + threadsDim.x - 1U) / threadsDim.x, 1U, 1U);
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
while ((maxIter < 0) || (maxIter > 0 && curIter <= maxIter))
{
std::cout << "Current iteration: " << curIter
<< ", size: " << lastIterSize << std::endl;
clique::crucialIsthmus(thinData, blocksDim, threadsDim);
unsigned curIterSize = cp::_countBit(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
if (curIterSize == lastIterSize) break;
cp::_assignKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cp::_unionKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.recBitsArr, thinData.arrSize, REC_BIT_Z, REC_BIT_K);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
thinData.arrSize = cp::_shrinkArrs(thinData, blocksDim, threadsDim);
assert(thinData.arrSize == curIterSize);
// To-Do:
// 1. clean up the d_A/B_recBitsArr accordingly
// 2. re-calculate blocksDim
checkCudaErrors(cudaFree(thinData.A_recBitsArr));
checkCudaErrors(cudaFree(thinData.B_recBitsArr));
checkCudaErrors(cudaMalloc(&(thinData.A_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMalloc(&(thinData.B_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
blocksDim.x = (thinData.arrSize + threadsDim.x - 1U) / threadsDim.x;
blocksDim.y = 1U;
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
lastIterSize = curIterSize;
++curIter;
}
D_XK.clear();
D_XK.resize(thinData.arrSize);
checkCudaErrors(cudaMemcpy(D_XK.data(), thinData.compactIjkArr, sizeof(IjkType) * thinData.arrSize, cudaMemcpyDeviceToHost));
thinData.dispose();
}
void persistenceIsthmusThinningCore(details::DevDataPack& thinData, unsigned curIter, unsigned p, int maxIter)
{
using namespace details;
namespace cp = clique::_private;
unsigned lastIterSize = thinData.arrSize;
dim3 threadsDim(_numThreads, 1U, 1U);
dim3 blocksDim((thinData.arrSize + threadsDim.x - 1U) / threadsDim.x, 1U, 1U);
auto TIMER1 = std::chrono::high_resolution_clock::now();
auto TIMER2 = std::chrono::high_resolution_clock::now();
auto TIMER3 = std::chrono::high_resolution_clock::now();
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
while ((maxIter < 0) || (maxIter > 0 && curIter <= maxIter))
{
std::cout << "Current iteration: " << curIter
<< ", size: " << lastIterSize << std::endl;
TIMER2 = std::chrono::high_resolution_clock::now();
// crucialIsthmus(grid3D, Kset, D_XK, I_XK1);
// crucialIsthmusCUDA(compactFlatIjkVec, flatMngr, recBitsVec, numThreads);
TIMER3 = std::chrono::high_resolution_clock::now();
clique::crucialIsthmus(thinData, blocksDim, threadsDim);
TIMER_END(">>> persistenceIsthmusThinningCore::crucialIsthmus()", TIMER3);
unsigned curIterSize = cp::_countBit(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
if (curIterSize == lastIterSize) break;
TIMER3 = std::chrono::high_resolution_clock::now();
cp::_assignKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_assignKern()", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
cp::_updateBirthKern<<<blocksDim, threadsDim>>>(thinData.birthArr, thinData.recBitsArr, thinData.arrSize, curIter);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_updateBirthKern()", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
cp::_unionKsetByBirth<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.birthArr, thinData.arrSize, curIter, p);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
TIMER_END(">>> persistenceIsthmusThinningCore::_unionKsetByBirth()", TIMER3);
thinData.arrSize = cp::_shrinkArrs(thinData, blocksDim, threadsDim);
assert(thinData.arrSize == curIterSize);
// To-Do:
// 1. clean up the d_A/B_recBitsArr accordingly
// 2. re-calculate blocksDim
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaFree(thinData.A_recBitsArr));
checkCudaErrors(cudaFree(thinData.B_recBitsArr));
TIMER_END(">>> persistenceIsthmusThinningCore::cudaFree(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMalloc(&(thinData.A_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMalloc(&(thinData.B_recBitsArr), sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>> persistenceIsthmusThinningCore::cudaMalloc(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>> persistenceIsthmusThinningCore::cudaMemset(A&B)", TIMER3);
TIMER3 = std::chrono::high_resolution_clock::now();
blocksDim.x = (thinData.arrSize + threadsDim.x - 1U) / threadsDim.x;
blocksDim.y = 1U;
while (blocksDim.x > 32768U)
{
blocksDim.x /= 2;
blocksDim.y *= 2;
}
TIMER_END(">>> persistenceIsthmusThinningCore::while blocks", TIMER3);
lastIterSize = curIterSize;
++curIter;
TIMER_END(">>> persistenceIsthmusThinningCore::while iter", TIMER2);
}
TIMER_END(">>> persistenceIsthmusThinningCore::while all", TIMER1);
}
void persistenceIsthmusThinning(const std::vector<IjkType>& compactIjkVec, const std::vector<ObjIdType>& voxelIdVec, std::vector<IjkType>& D_XK,
const IjkType& size3D, unsigned p, int maxIter)
{
// using namespace clique;
using namespace details;
namespace cp = clique::_private;
// ThinningData thinData(compactIjkVec.size(), size3D);
DevDataPack::InitParams packInitParams;
packInitParams.arrSize = compactIjkVec.size();
packInitParams.size3D = size3D;
packInitParams.useBirth = true;
packInitParams.useVoxelID = voxelIdVec.size() > 0;
DevDataPack thinData(packInitParams);
thinData.alloc();
checkCudaErrors(cudaMemset(thinData.recBitsArr, 0x01, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemcpy(thinData.compactIjkArr, compactIjkVec.data(), sizeof(IjkType) * thinData.arrSize, cudaMemcpyHostToDevice));
if (thinData.useVoxelID())
{
checkCudaErrors(cudaMemcpy(thinData.voxelIdArr, voxelIdVec.data(), sizeof(ObjIdType) * thinData.arrSize, cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemset(thinData.birthArr, 0, sizeof(unsigned) * thinData.arrSize));
unsigned curIter = 0;
persistenceIsthmusThinningCore(thinData, curIter, p, maxIter);
D_XK.clear();
D_XK.resize(thinData.arrSize);
checkCudaErrors(cudaMemcpy(D_XK.data(), thinData.compactIjkArr, sizeof(IjkType) * thinData.arrSize, cudaMemcpyDeviceToHost));
thinData.dispose();
}
void persistenceIsthmusThinning(const std::vector<IjkType>& compactIjkVec, std::vector<IjkType>& D_XK, const IjkType& size3D, unsigned p, int maxIter)
{
std::vector<ObjIdType> fakeVoxelIdVec;
persistenceIsthmusThinning(compactIjkVec, fakeVoxelIdVec, D_XK, size3D, p, maxIter);
}
void oneChunkThinning(details::DevDataPack& thinData, unsigned curIter, unsigned dim,
unsigned p, const dim3& blocksDim, const dim3& threadsDim)
{
using namespace thin::clique;
namespace cp = thin::clique::_private;
using namespace details;
if (dim == 3U)
{
// Y <- K
cp::_assignKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_K, REC_BIT_Y);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Z <- {}
cp::_clearKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
dimCrucialIsthmus<D3CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 2U)
{
dimCrucialIsthmus<D2CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 1U)
{
dimCrucialIsthmus<D1CliqueChecker>(thinData, blocksDim, threadsDim);
}
else if (dim == 0)
{
dimCrucialIsthmus<D0CliqueChecker>(thinData, blocksDim, threadsDim);
cp::_assignKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y, REC_BIT_X);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cp::_updateBirthKern<<<blocksDim, threadsDim>>>(thinData.birthArr, thinData.recBitsArr, thinData.arrSize, curIter);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cp::_unionKsetByBirth<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.birthArr, thinData.arrSize, curIter, p);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cp::_clearKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Y);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cp::_clearKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
}
}
}; // namespace thin;
|
3658cc072e97cf3e581a96bd874052b77a160b1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 3658cc072e97cf3e581a96bd874052b77a160b1a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
e6712acd2eb1a22b6a6d62fa6c1e853d11bf617d.hip | // !!! This is a file automatically generated by hipify!!!
//
// This code performs 3D cone beam CT forwards and backwards projection
//
#include "conebeam_projection.h"
#include "float3x3.h"
#include "hoCuNDArray_math.h"
#include "vector_td.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_utils.h"
#include "cuNFFT.h"
#include "check_CUDA.h"
#include "GPUTimer.h"
#include "cudaDeviceManager.h"
#include "hoNDArray_fileio.h"
#include "setup_grid.h"
#include <hip/hip_runtime_api.h>
#include <math_constants.h>
#include <hipfft.h>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <vector>
#define PS_ORIGIN_CENTERING
#define IS_ORIGIN_CENTERING
//#define FLIP_Z_AXIS
// Read the projection/image data respectively as a texture (for input)
// - taking advantage of the cache and hardware interpolation
//
#define NORMALIZED_TC 1
static texture<float, 3, hipReadModeElementType>
image_tex( NORMALIZED_TC, hipFilterModeLinear, hipAddressModeBorder );
static texture<float, hipTextureType2DLayered, hipReadModeElementType>
projections_tex( NORMALIZED_TC, hipFilterModeLinear, hipAddressModeBorder );
namespace Gadgetron
{
// Utility to convert from degrees to radians
//
static inline __host__ __device__
float degrees2radians(float degree) {
return degree * (CUDART_PI_F/180.0f);
}
// Utilities for filtering in frequency space
//
static boost::shared_ptr< cuNDArray<float_complext> > cb_fft( cuNDArray<float> *data )
{
if( data == 0x0 )
throw std::runtime_error("CB FFT : illegal input pointer provided");
std::vector<size_t> in_dims = *data->get_dimensions();
std::vector<size_t> out_dims;
out_dims.push_back((in_dims[0]>>1)+1);
out_dims.push_back(in_dims[1]);
out_dims.push_back(in_dims[2]);
boost::shared_ptr< cuNDArray<float_complext> > result( new cuNDArray<float_complext>(&out_dims) );
hipfftHandle plan;
if( hipfftPlanMany( &plan, 1, (int*)(&in_dims[0]), 0x0, 1, in_dims[0], 0x0, 1, out_dims[0], HIPFFT_R2C, in_dims[1]*in_dims[2] ) != HIPFFT_SUCCESS) {
throw std::runtime_error("CB FFT plan failed");
}
if( hipfftExecR2C( plan, data->get_data_ptr(), (cuFloatComplex*) result->get_data_ptr() ) != HIPFFT_SUCCESS ) {
throw std::runtime_error("CB FFT execute failed");;
}
if( hipfftDestroy(plan) != HIPFFT_SUCCESS) {
throw std::runtime_error("CB FFT failed to destroy plan");
}
return result;
}
static void cb_ifft( cuNDArray<float_complext> *in_data, cuNDArray<float> *out_data )
{
if( in_data == 0x0 || out_data == 0x0 )
throw std::runtime_error("CB FFT : illegal input or output pointer provided");
std::vector<size_t> in_dims = *in_data->get_dimensions();
std::vector<size_t> out_dims = *out_data->get_dimensions();
hipfftHandle plan;
if( hipfftPlanMany( &plan, 1, (int*)(&out_dims[0]), 0x0, 1, in_dims[0], 0x0, 1, out_dims[0], HIPFFT_C2R, in_dims[1]*in_dims[2] ) != HIPFFT_SUCCESS) {
throw std::runtime_error("CB iFFT plan failed");
}
if( hipfftExecC2R( plan, (cuFloatComplex*) in_data->get_data_ptr(), out_data->get_data_ptr() ) != HIPFFT_SUCCESS ) {
throw std::runtime_error("CB iFFT execute failed");;
}
if( hipfftDestroy(plan) != HIPFFT_SUCCESS) {
throw std::runtime_error("CB iFFT failed to destroy plan");
}
*out_data /= float(out_dims[0]);
}
//
// Redundancy correction for short scan mode
// - i.e. for less than a full rotation of data
//
// See "Optimal short scan convolution reconstruction for fanbeam CT", Dennis Parker, Med. Phys. 9(2) 1982
// and (for the implementation) "Parker weights revisited", Wesarg et al, Med. Phys. 29(3) 2002.
//
static __device__ const float epsilon = 0.001f;
static __inline__ __device__ float S( float beta )
{
if( beta <= -0.5f ) return 0.0f;
else if( beta > -0.5f && beta < 0.5f ) return 0.5f*(1.0f+sinf(CUDART_PI_F*beta));
else /*if( beta >= 0.5f )*/ return 1.0f;
}
static __inline__ __device__ float B( float alpha, float delta )
{
return 2.0f*(delta-alpha)+epsilon;
}
static __inline__ __device__ float b( float alpha, float delta )
{
const float q = 0.1f; // with q=1 this formulae reduce to conventional Parker weights
return q*B(alpha, delta);
}
__global__ void
redundancy_correct_kernel( float *projections,
const float * __restrict__ angles,
uintd3 dims, // Dimensions of the projections array
float delta // The half-fan angle
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const float in = projections[idx];
const uintd3 co = idx_to_co<3>( idx, dims );
const float tan_delta = tanf(delta);
const float alpha = -atanf((float(co[0])/float(dims[0])-0.5f)*2.0f*tan_delta);
const float beta = degrees2radians(angles[co[2]]);
float omega = 0.5f*(S(beta/b(alpha, delta)-0.5f)+
S((beta+2.0f*(alpha-delta)-epsilon)/b(alpha, delta)+0.5f)-
S((beta-CUDART_PI_F+2.0f*alpha)/b(-alpha, delta)-0.5f)-
S((beta-CUDART_PI_F-2.0f*delta-epsilon)/b(-alpha, delta)+0.5f));
projections[idx] = in*omega;
}
}
void
redundancy_correct( cuNDArray<float> *projections,
float *angles_DevPtr,
float delta // The half-fan angle in radians
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: redundancy_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: redundancy_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
hipLaunchKernelGGL(( redundancy_correct_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, projections->get_data_ptr(), angles_DevPtr, dims, delta );
CHECK_FOR_CUDA_ERROR();
}
/***
* Redundancy (or offset) correction from Wang. Med. Phys 2002, doi: 10.1118/1.1489043
*/
__global__ static void
offset_correct_kernel( float *projections,
const floatd2 * __restrict__ offsets,
uintd3 dims, // Dimensions of the projections array
floatd2 phys_dims, // Physical dimensions in mm
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const uintd3 co = idx_to_co<3>( idx, dims );
const floatd2 offset = offsets[co[2]];
const float t = phys_dims[0]*(float(co[0])/(float(dims[0]))-0.5f)+offset[0];
const float omega = phys_dims[0]/2.0f-fabs(offset[0]);
//const float omega = phys_dims[0]*float(co[0])/(2.0f*float(dims[0]));
if( fabs(t) <= fabs(omega) ){
//float w = 0.5*sinf(CUDART_PI_F*atanf(t/SDD)/(2.0f*atanf(omega/SDD)))+0.5;
float sqrt_w = sinf(CUDART_PI_F*(t+omega)/(4.0f*omega));
float w = sqrt_w*sqrt_w;
projections[idx] *= w;
}
}
}
static void
offset_correct( cuNDArray<float> *projections,
floatd2* offsets, // Ptr to cuda array
floatd2 phys_dims,
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: offset_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: offset_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
hipLaunchKernelGGL(( offset_correct_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, projections->get_data_ptr(), offsets, dims, phys_dims, SAD, SDD );
CHECK_FOR_CUDA_ERROR();
}
/***
* Redundancy (or offset) correction from Wang. Med. Phys 2002, doi: 10.1118/1.1489043
*/
__global__ static void
offset_correct_kernel_sqrt( float *projections,
const floatd2 * __restrict__ offsets,
uintd3 dims, // Dimensions of the projections array
floatd2 phys_dims, // Physical dimensions in mm
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const uintd3 co = idx_to_co<3>( idx, dims );
const floatd2 offset = offsets[co[2]];
const float t = phys_dims[0]*(float(co[0])/(float(dims[0]))-0.5f)+offset[0];
const float omega = phys_dims[0]/2.0f-fabs(offset[0]);
//const float omega = phys_dims[0]*float(co[0])/(2.0f*float(dims[0]));
if( fabs(t) <= fabs(omega) ){
//float w = 0.5*sinf(CUDART_PI_F*atanf(t/SDD)/(2.0f*atanf(omega/SDD)))+0.5;
float sqrt_w = sinf(CUDART_PI_F*(t+omega)/(4.0f*omega));
projections[idx] *= sqrt_w;
}
}
}
static void
offset_correct_sqrt( cuNDArray<float> *projections,
floatd2* offsets, // Ptr to cuda array
floatd2 phys_dims,
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: offset_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: offset_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
hipLaunchKernelGGL(( offset_correct_kernel_sqrt), dim3(dimGrid), dim3(dimBlock) , 0, 0, projections->get_data_ptr(), offsets, dims, phys_dims, SAD, SDD );
CHECK_FOR_CUDA_ERROR();
}
void apply_offset_correct(hoCuNDArray<float>* projections,std::vector<floatd2>& offsets, floatd2 ps_dims_in_mm, float SDD, float SAD){
std::vector<size_t> dims = *projections->get_dimensions();
size_t projection_size = dims[0]*dims[1];
thrust::device_vector<floatd2> offsets_devVec(offsets);
//Calculate number of projections we can fit on device, rounded to nearest MB
size_t batch_size = (1024)*(cudaDeviceManager::Instance()->getFreeMemory()/(1024*projection_size*sizeof(float)));
size_t remaining = dims[2];
for (unsigned int i = 0; i < dims[2]/(batch_size+1)+1; i++){
std::vector<size_t> projection_dims = dims;
projection_dims[2] = ::min(remaining,batch_size);
//Make a view of the batch of projections
hoCuNDArray<float> projections_view(projection_dims,projections->get_data_ptr()+batch_size*i);
cuNDArray<float> cu_projections(projections_view); //Copy to device
floatd2* cu_offsets = thrust::raw_pointer_cast(&offsets_devVec[i*batch_size]);
offset_correct_sqrt(&cu_projections,cu_offsets,ps_dims_in_mm,SAD,SDD);
hipMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),cu_projections.get_number_of_bytes(),hipMemcpyDeviceToHost);
remaining -= batch_size;
}
}
//
// Forwards projection
//
__global__ void
conebeam_forwards_projection_kernel( float * __restrict__ projections,
float * __restrict__ angles,
floatd2 *offsets,
floatd3 is_dims_in_pixels,
floatd3 is_dims_in_mm,
intd2 ps_dims_in_pixels_int,
floatd2 ps_dims_in_mm,
int num_projections,
float SDD,
float SAD,
int num_samples_per_ray )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int num_elements = prod(ps_dims_in_pixels_int)*num_projections;
if( idx < num_elements){
const intd3 co = idx_to_co<3>( idx, intd3(ps_dims_in_pixels_int[0], ps_dims_in_pixels_int[1], num_projections) );
// Projection space dimensions and spacing
//
const floatd2 ps_dims_in_pixels = floatd2(ps_dims_in_pixels_int[0], ps_dims_in_pixels_int[1]);
const floatd2 ps_spacing = ps_dims_in_mm / ps_dims_in_pixels;
// Determine projection angle and rotation matrix
//
const float angle = angles[co[2]];
const float3x3 rotation = calcRotationMatrixAroundZ(degrees2radians(angle));
// Find start and end point for the line integral (image space)
//
floatd3 startPoint = floatd3(0.0f, -SAD, 0.0f);
startPoint = mul(rotation, startPoint);
// Projection plate indices
//
#ifdef PS_ORIGIN_CENTERING
const floatd2 ps_pc = floatd2(co[0], co[1]) + floatd2(0.5);
#else
const floatd2 ps_pc = floatd2(co[0], co[1]);
#endif
// Convert the projection plate coordinates into image space,
// - local to the plate in metric units
// - including half-fan and sag correction
//
const floatd2 proj_coords = (ps_pc / ps_dims_in_pixels - 0.5f) * ps_dims_in_mm + offsets[co[2]];
// Define the end point for the line integrals
//
const float ADD = SDD - SAD; // in mm.
floatd3 endPoint = floatd3(proj_coords[0], ADD, proj_coords[1]);
endPoint = mul(rotation, endPoint);
// Find direction vector of the line integral
//
floatd3 dir = endPoint-startPoint;
// Perform integration only inside the bounding cylinder of the image volume
//
const floatd3 vec_over_dir = (is_dims_in_mm-startPoint)/dir;
const floatd3 vecdiff_over_dir = (-is_dims_in_mm-startPoint)/dir;
const floatd3 start = amin(vecdiff_over_dir, vec_over_dir);
const floatd3 end = amax(vecdiff_over_dir, vec_over_dir);
float a1 = fmax(max(start),0.0f);
float aend = fmin(min(end),1.0f);
startPoint += a1*dir;
const float sampling_distance = norm((aend-a1)*dir)/num_samples_per_ray;
// Now perform conversion of the line integral start/end into voxel coordinates
//
startPoint /= is_dims_in_mm;
#ifdef FLIP_Z_AXIS
startPoint[2] *= -1.0f;
#endif
startPoint += 0.5f;
dir /= is_dims_in_mm;
#ifdef FLIP_Z_AXIS
dir[2] *= -1.0f;
#endif
dir /= float(num_samples_per_ray); // now in step size units
//
// Perform line integration
//
float result = 0.0f;
for ( int sampleIndex = 0; sampleIndex<num_samples_per_ray; sampleIndex++) {
#ifndef IS_ORIGIN_CENTERING
floatd3 samplePoint = startPoint+dir*float(sampleIndex) + floatd3(0.5f)/is_dims_in_pixels;
#else
floatd3 samplePoint = startPoint+dir*float(sampleIndex);
#endif
// Accumulate result
//
result += tex3D( image_tex, samplePoint[0], samplePoint[1], samplePoint[2] );
}
// Output (normalized to the length of the ray)
//
projections[idx] = result*sampling_distance;
}
}
//
// Forwards projection of a 3D volume onto a set of (binned) projections
//
void
conebeam_forwards_projection( hoCuNDArray<float> *projections,
hoCuNDArray<float> *image,
std::vector<float> angles,
std::vector<floatd2> offsets,
std::vector<unsigned int> indices,
int projections_per_batch,
float samples_per_pixel,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_mm,
float SDD,
float SAD)
{
//
// Validate the input
//
if( projections == 0x0 || image == 0x0 ){
throw std::runtime_error("Error: conebeam_forwards_projection: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_forwards_projection: projections array must be three-dimensional");
}
if( image->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_forwards_projection: image array must be three-dimensional");
}
if( projections->get_size(2) != angles.size() || projections->get_size(2) != offsets.size() ) {
throw std::runtime_error("Error: conebeam_forwards_projection: inconsistent sizes of input arrays/vectors");
}
int projection_res_x = projections->get_size(0);
int projection_res_y = projections->get_size(1);
int num_projections_in_bin = indices.size();
int num_projections_in_all_bins = projections->get_size(2);
int matrix_size_x = image->get_size(0);
int matrix_size_y = image->get_size(1);
int matrix_size_z = image->get_size(2);
hoCuNDArray<float> *int_projections = projections;
if( projections_per_batch > num_projections_in_bin )
projections_per_batch = num_projections_in_bin;
int num_batches = (num_projections_in_bin+projections_per_batch-1) / projections_per_batch;
// Build texture from input image
//
hipFuncSetCacheConfig(conebeam_forwards_projection_kernel, hipFuncCachePreferL1);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipExtent extent;
extent.width = matrix_size_x;
extent.height = matrix_size_y;
extent.depth = matrix_size_z;
hipMemcpy3DParms cpy_params = {0};
cpy_params.kind = hipMemcpyHostToDevice;
cpy_params.extent = extent;
hipArray *image_array;
hipMalloc3DArray(&image_array, &channelDesc, extent);
CHECK_FOR_CUDA_ERROR();
cpy_params.dstArray = image_array;
cpy_params.srcPtr = make_hipPitchedPtr
((void*)image->get_data_ptr(), extent.width*sizeof(float), extent.width, extent.height);
hipMemcpy3D(&cpy_params);
CHECK_FOR_CUDA_ERROR();
hipBindTextureToArray(image_tex, image_array, channelDesc);
CHECK_FOR_CUDA_ERROR();
// Allocate the angles, offsets and projections in device memory
//
float *projections_DevPtr, *projections_DevPtr2;
hipMalloc( (void**) &projections_DevPtr, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
hipMalloc( (void**) &projections_DevPtr2, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
hipStream_t mainStream, indyStream;
hipStreamCreate(&mainStream);
hipStreamCreate(&indyStream);
std::vector<float> angles_vec;
std::vector<floatd2> offsets_vec;
for( int p=0; p<indices.size(); p++ ){
int from_id = indices[p];
if( from_id >= num_projections_in_all_bins ) {
throw std::runtime_error("Error: conebeam_forwards_projection: illegal index in bin");
}
angles_vec.push_back(angles[from_id]);
offsets_vec.push_back(offsets[from_id]);
}
thrust::device_vector<float> angles_devVec(angles_vec);
thrust::device_vector<floatd2> offsets_devVec(offsets_vec);
//
// Iterate over the batches
//
for (unsigned int batch=0; batch<num_batches; batch++ ){
int from_projection = batch * projections_per_batch;
int to_projection = (batch+1) * projections_per_batch;
if (to_projection > num_projections_in_bin)
to_projection = num_projections_in_bin;
int projections_in_batch = to_projection-from_projection;
// Block/grid configuration
//
dim3 dimBlock, dimGrid;
setup_grid( projection_res_x*projection_res_y*projections_in_batch, &dimBlock, &dimGrid );
// Launch kernel
//
floatd3 is_dims_in_pixels(matrix_size_x, matrix_size_y, matrix_size_z);
intd2 ps_dims_in_pixels(projection_res_x, projection_res_y);
float* raw_angles = thrust::raw_pointer_cast(&angles_devVec[from_projection]);
floatd2* raw_offsets = thrust::raw_pointer_cast(&offsets_devVec[from_projection]);
hipLaunchKernelGGL(( conebeam_forwards_projection_kernel), dim3(dimGrid), dim3(dimBlock), 0, mainStream ,
projections_DevPtr, raw_angles, raw_offsets,
is_dims_in_pixels, is_dims_in_mm, ps_dims_in_pixels, ps_dims_in_mm,
projections_in_batch, SDD, SAD, samples_per_pixel*float(matrix_size_x) );
// If not initial batch, start copying the old stuff
//
int p = from_projection;
while( p<to_projection) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int to_id = indices[p];
int size = projection_res_x*projection_res_y;
hipMemcpyAsync( int_projections->get_data_ptr()+to_id*size,
projections_DevPtr+(p-from_projection)*size,
size*num_sequential_projections*sizeof(float),
hipMemcpyDeviceToHost, mainStream);
p += num_sequential_projections;
}
std::swap(projections_DevPtr, projections_DevPtr2);
std::swap(mainStream, indyStream);
}
hipFree(projections_DevPtr2);
hipFree(projections_DevPtr);
hipFreeArray(image_array);
CUDA_CALL(hipStreamDestroy(indyStream));
CUDA_CALL(hipStreamDestroy(mainStream));
CHECK_FOR_CUDA_ERROR();
}
template <bool FBP> __global__ void
conebeam_backwards_projection_kernel( float * __restrict__ image,
const float * __restrict__ angles,
floatd2 *offsets,
intd3 is_dims_in_pixels_int,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_pixels,
floatd2 ps_dims_in_mm,
int num_projections_in_batch,
float num_projections_in_bin,
float SDD,
float SAD,
bool accumulate )
{
// Image voxel to backproject into (pixel coordinate and index)
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int num_elements = prod(is_dims_in_pixels_int);
if( idx < num_elements ){
const intd3 co = idx_to_co<3>(idx, is_dims_in_pixels_int);
#ifdef IS_ORIGIN_CENTERING
const floatd3 is_pc = floatd3(co[0], co[1], co[2]) + floatd3(0.5);
#else
const floatd3 is_pc = floatd3(co[0], co[1], co[2]);
#endif
// Normalized image space coordinate [-0.5, 0.5[
//
const floatd3 is_dims_in_pixels(is_dims_in_pixels_int[0],is_dims_in_pixels_int[1],is_dims_in_pixels_int[2]);
#ifdef FLIP_Z_AXIS
floatd3 is_nc = is_pc / is_dims_in_pixels - floatd3(0.5f);
is_nc[2] *= -1.0f;
#else
const floatd3 is_nc = is_pc / is_dims_in_pixels - floatd3(0.5f);
#endif
// Image space coordinate in metric units
//
const floatd3 pos = is_nc * is_dims_in_mm;
// Read the existing output value for accumulation at this point.
// The cost of this fetch is hidden by the loop
const float incoming = (accumulate) ? image[idx] : 0.0f;
// Backprojection loop
//
float result = 0.0f;
for( int projection = 0; projection < num_projections_in_batch; projection++ ) {
// Projection angle
//
const float angle = degrees2radians(angles[projection]);
// Projection rotation matrix
//
const float3x3 inverseRotation = calcRotationMatrixAroundZ(-angle);
// Rotated image coordinate (local to the projection's coordinate system)
//
const floatd3 pos_proj = mul(inverseRotation, pos);
// Project the image position onto the projection plate.
// Account for half-fan and sag offsets.
//
const floatd3 startPoint = floatd3(0.0f, -SAD, 0.0f);
floatd3 dir = pos_proj - startPoint;
dir = dir / dir[1];
const floatd3 endPoint = startPoint + dir * SDD;
const floatd2 endPoint2d = floatd2(endPoint[0], endPoint[2]) - offsets[projection];
// Convert metric projection coordinates into pixel coordinates
//
#ifndef PS_ORIGIN_CENTERING
floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f)) + floatd2(0.5f)/ps_dims_in_pixels;
//floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f)) * ps_dims_in_pixels + floatd2(0.5f);
#else
floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f));
#endif
// Apply filter (filtered backprojection mode only)
//
float weight = 1.0;
if( FBP ){
// Equation 3.59, page 96 and equation 10.2, page 386
// in Computed Tomography 2nd edition, Jiang Hsieh
//
const float xx = pos[0];
const float yy = pos[1];
const float beta = angle;
const float r = hypotf(xx,yy);
const float phi = atan2f(yy,xx);
const float D = SAD;
const float ym = r*sinf(beta-phi);
const float U = (D+ym)/D;
weight = 1.0f/(U*U);
}
// Read the projection data (bilinear interpolation enabled) and accumulate
//
result += weight * tex2DLayered( projections_tex, ps_pc[0], ps_pc[1], projection );
}
// Output normalized image
//
image[idx] = incoming + result / num_projections_in_bin;
}
}
//
// Backprojection
//
template <bool FBP>
void conebeam_backwards_projection( hoCuNDArray<float> *projections,
hoCuNDArray<float> *image,
std::vector<float> angles,
std::vector<floatd2> offsets,
std::vector<unsigned int> indices,
int projections_per_batch,
intd3 is_dims_in_pixels,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_mm,
float SDD,
float SAD,
bool short_scan,
bool use_offset_correction,
bool accumulate,
cuNDArray<float> *cosine_weights,
cuNDArray<float> *frequency_filter
)
{
//
// Validate the input
//
if( projections == 0x0 || image == 0x0 ){
throw std::runtime_error("Error: conebeam_backwards_projection: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_backwards_projection: projections array must be three-dimensional");
}
if( image->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_backwards_projection: image array must be three-dimensional");
}
if( projections->get_size(2) != angles.size() || projections->get_size(2) != offsets.size() ) {
throw std::runtime_error("Error: conebeam_backwards_projection: inconsistent sizes of input arrays/vectors");
}
if( FBP && !(cosine_weights && frequency_filter) ){
throw std::runtime_error("Error: conebeam_backwards_projection: for _filtered_ backprojection both cosine weights and a filter must be provided");
}
// Some utility variables
//
int matrix_size_x = image->get_size(0);
int matrix_size_y = image->get_size(1);
int matrix_size_z = image->get_size(2);
floatd3 is_dims(matrix_size_x, matrix_size_y, matrix_size_z);
int num_image_elements = matrix_size_x*matrix_size_y*matrix_size_z;
int projection_res_x = projections->get_size(0);
int projection_res_y = projections->get_size(1);
floatd2 ps_dims_in_pixels(projection_res_x, projection_res_y);
int num_projections_in_all_bins = projections->get_size(2);
int num_projections_in_bin = indices.size();
if( projections_per_batch > num_projections_in_bin )
projections_per_batch = num_projections_in_bin;
int num_batches = (num_projections_in_bin+projections_per_batch-1) / projections_per_batch;
// Allocate device memory for the backprojection result
//
boost::shared_ptr< cuNDArray<float> > image_device;
if( accumulate ){
image_device = boost::shared_ptr< cuNDArray<float> >(new cuNDArray<float>(image));
}
else{
image_device = boost::shared_ptr< cuNDArray<float> >(new cuNDArray<float>(image->get_dimensions().get()));
}
// Allocate the angles, offsets and projections in device memory
//
float *projections_DevPtr, *projections_DevPtr2;
hipMalloc( (void**) &projections_DevPtr, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
hipMalloc( (void**) &projections_DevPtr2, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
hipStream_t mainStream, indyStream;
hipStreamCreate(&mainStream);
hipStreamCreate(&indyStream);
std::vector<float> angles_vec;
std::vector<floatd2> offsets_vec;
for( int p=0; p<indices.size(); p++ ){
int from_id = indices[p];
if( from_id >= num_projections_in_all_bins ) {
throw std::runtime_error("Error: conebeam_backwards_projection: illegal index in bin");
}
angles_vec.push_back(angles[from_id]);
offsets_vec.push_back(offsets[from_id]);
}
thrust::device_vector<float> angles_devVec(angles_vec);
thrust::device_vector<floatd2> offsets_devVec(offsets_vec);
// From/to for the first batch
// - to enable working streams...
//
int from_projection = 0;
int to_projection = projections_per_batch;
if (to_projection > num_projections_in_bin )
to_projection = num_projections_in_bin;
int projections_in_batch = to_projection-from_projection;
std::vector<size_t> dims;
dims.push_back(projection_res_x);
dims.push_back(projection_res_y);
dims.push_back(projections_in_batch);
std::vector<size_t> dims_next;
cuNDArray<float> *projections_batch = new cuNDArray<float>(&dims, projections_DevPtr);
// Upload first projections batch adhering to the binning.
// Be sure to copy sequentially numbered projections in one copy operation.
//
{
int p = from_projection;
while( p<to_projection ) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int from_id = indices[p];
int size = projection_res_x*projection_res_y;
hipMemcpyAsync( projections_batch->get_data_ptr()+(p-from_projection)*size,
projections->get_data_ptr()+from_id*size,
size*num_sequential_projections*sizeof(float), hipMemcpyHostToDevice, mainStream );
CHECK_FOR_CUDA_ERROR();
p += num_sequential_projections;
}
}
//
// Iterate over batches
//
for( int batch = 0; batch < num_batches; batch++ ) {
from_projection = batch * projections_per_batch;
to_projection = (batch+1) * projections_per_batch;
if (to_projection > num_projections_in_bin )
to_projection = num_projections_in_bin;
projections_in_batch = to_projection-from_projection;
float* raw_angles = thrust::raw_pointer_cast(&angles_devVec[from_projection]);
floatd2* raw_offsets = thrust::raw_pointer_cast(&offsets_devVec[from_projection]);
if( FBP ){
// Apply cosine weighting : "SDD / sqrt(SDD*SDD + u*u + v*v)"
// - with (u,v) positions given in metric units on a virtual detector at the origin
//
*projections_batch *= *cosine_weights;
// Redundancy correct
// - for short scan mode
//
if( short_scan ){
float delta = std::atan(ps_dims_in_mm[0]/(2.0f*SDD));
redundancy_correct( projections_batch, raw_angles, delta );
}
// Apply frequency filter
// - use zero padding to avoid the cyclic boundary conditions induced by the fft
//
std::vector<size_t> batch_dims = *projections_batch->get_dimensions();
uint64d3 pad_dims(batch_dims[0]<<1, batch_dims[1], batch_dims[2]);
boost::shared_ptr< cuNDArray<float> > padded_projections = pad<float,3>( pad_dims, projections_batch );
boost::shared_ptr< cuNDArray<complext<float> > > complex_projections = cb_fft( padded_projections.get() );
*complex_projections *= *frequency_filter;
cb_ifft( complex_projections.get(), padded_projections.get() );
uint64d3 crop_offsets(batch_dims[0]>>1, 0, 0);
crop<float,3>( crop_offsets, padded_projections.get(), projections_batch );
// Apply offset correction
// - for half fan mode, sag correction etc.
//
if (use_offset_correction)
offset_correct( projections_batch, raw_offsets, ps_dims_in_mm, SAD, SDD );
} else if (use_offset_correction)
offset_correct_sqrt( projections_batch, raw_offsets, ps_dims_in_mm, SAD, SDD );
// Build array for input texture
//
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipExtent extent;
extent.width = projection_res_x;
extent.height = projection_res_y;
extent.depth = projections_in_batch;
hipArray *projections_array;
hipMalloc3DArray( &projections_array, &channelDesc, extent, hipArrayLayered );
CHECK_FOR_CUDA_ERROR();
hipMemcpy3DParms cpy_params = {0};
cpy_params.extent = extent;
cpy_params.dstArray = projections_array;
cpy_params.kind = hipMemcpyDeviceToDevice;
cpy_params.srcPtr =
make_hipPitchedPtr( (void*)projections_batch->get_data_ptr(), projection_res_x*sizeof(float),
projection_res_x, projection_res_y );
hipMemcpy3DAsync( &cpy_params, mainStream );
CHECK_FOR_CUDA_ERROR();
hipBindTextureToArray( projections_tex, projections_array, channelDesc );
CHECK_FOR_CUDA_ERROR();
// Upload projections for the next batch
// - to enable streaming
//
if( batch < num_batches-1 ){ // for using multiple streams to hide the cost of the uploads
int from_projection_next = (batch+1) * projections_per_batch;
int to_projection_next = (batch+2) * projections_per_batch;
if (to_projection_next > num_projections_in_bin )
to_projection_next = num_projections_in_bin;
int projections_in_batch_next = to_projection_next-from_projection_next;
// printf("batch: %03i, handling projections: %03i - %03i, angles: %.2f - %.2f\n",
// batch+1, from_projection_next, to_projection_next-1, angles[from_projection_next], angles[to_projection_next-1]);
// Allocate device memory for projections and upload
//
dims_next.clear();
dims_next.push_back(projection_res_x);
dims_next.push_back(projection_res_y);
dims_next.push_back(projections_in_batch_next);
cuNDArray<float> projections_batch_next(&dims, projections_DevPtr2);
// Upload projections adhering to the binning.
// Be sure to copy sequentially numbered projections in one copy operation.
//
int p = from_projection_next;
while( p<to_projection_next ) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection_next &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int from_id = indices[p];
int size = projection_res_x*projection_res_y;
hipMemcpyAsync( projections_batch_next.get_data_ptr()+(p-from_projection_next)*size,
projections->get_data_ptr()+from_id*size,
size*num_sequential_projections*sizeof(float), hipMemcpyHostToDevice, indyStream );
CHECK_FOR_CUDA_ERROR();
p += num_sequential_projections;
}
}
// Define dimensions of grid/blocks.
//
dim3 dimBlock, dimGrid;
setup_grid( matrix_size_x*matrix_size_y*matrix_size_z, &dimBlock, &dimGrid );
// Invoke kernel
//
hipFuncSetCacheConfig(conebeam_backwards_projection_kernel<FBP>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( conebeam_backwards_projection_kernel<FBP>), dim3(dimGrid), dim3(dimBlock), 0, mainStream ,
image_device->get_data_ptr(), raw_angles, raw_offsets,
is_dims_in_pixels, is_dims_in_mm, ps_dims_in_pixels, ps_dims_in_mm,
projections_in_batch, num_projections_in_bin, SDD, SAD, (batch==0) ? accumulate : true );
CHECK_FOR_CUDA_ERROR();
// Cleanup
//
hipUnbindTexture(projections_tex);
hipFreeArray(projections_array);
CHECK_FOR_CUDA_ERROR();
std::swap(projections_DevPtr, projections_DevPtr2);
std::swap(mainStream, indyStream);
delete projections_batch;
if( batch < num_batches-1 )
projections_batch = new cuNDArray<float>(&dims_next, projections_DevPtr);
}
// Copy result from device to host
//
hipMemcpy( image->get_data_ptr(), image_device->get_data_ptr(),
num_image_elements*sizeof(float), hipMemcpyDeviceToHost );
CHECK_FOR_CUDA_ERROR();
hipFree(projections_DevPtr2);
hipFree(projections_DevPtr);
CUDA_CALL(hipStreamDestroy(indyStream));
CUDA_CALL(hipStreamDestroy(mainStream));
CHECK_FOR_CUDA_ERROR();
}
// Template instantiations
//
template void conebeam_backwards_projection<false>
( hoCuNDArray<float>*, hoCuNDArray<float>*, std::vector<float>, std::vector<floatd2>, std::vector<unsigned int>,
int, intd3, floatd3, floatd2, float, float, bool, bool, bool, cuNDArray<float>*, cuNDArray<float>* );
template void conebeam_backwards_projection<true>
( hoCuNDArray<float>*, hoCuNDArray<float>*, std::vector<float>, std::vector<floatd2>, std::vector<unsigned int>,
int, intd3, floatd3, floatd2, float, float, bool, bool, bool, cuNDArray<float>*, cuNDArray<float>* );
}
| e6712acd2eb1a22b6a6d62fa6c1e853d11bf617d.cu | //
// This code performs 3D cone beam CT forwards and backwards projection
//
#include "conebeam_projection.h"
#include "float3x3.h"
#include "hoCuNDArray_math.h"
#include "vector_td.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_utils.h"
#include "cuNFFT.h"
#include "check_CUDA.h"
#include "GPUTimer.h"
#include "cudaDeviceManager.h"
#include "hoNDArray_fileio.h"
#include "setup_grid.h"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <cufft.h>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <vector>
#define PS_ORIGIN_CENTERING
#define IS_ORIGIN_CENTERING
//#define FLIP_Z_AXIS
// Read the projection/image data respectively as a texture (for input)
// - taking advantage of the cache and hardware interpolation
//
#define NORMALIZED_TC 1
static texture<float, 3, cudaReadModeElementType>
image_tex( NORMALIZED_TC, cudaFilterModeLinear, cudaAddressModeBorder );
static texture<float, cudaTextureType2DLayered, cudaReadModeElementType>
projections_tex( NORMALIZED_TC, cudaFilterModeLinear, cudaAddressModeBorder );
namespace Gadgetron
{
// Utility to convert from degrees to radians
//
static inline __host__ __device__
float degrees2radians(float degree) {
return degree * (CUDART_PI_F/180.0f);
}
// Utilities for filtering in frequency space
//
static boost::shared_ptr< cuNDArray<float_complext> > cb_fft( cuNDArray<float> *data )
{
if( data == 0x0 )
throw std::runtime_error("CB FFT : illegal input pointer provided");
std::vector<size_t> in_dims = *data->get_dimensions();
std::vector<size_t> out_dims;
out_dims.push_back((in_dims[0]>>1)+1);
out_dims.push_back(in_dims[1]);
out_dims.push_back(in_dims[2]);
boost::shared_ptr< cuNDArray<float_complext> > result( new cuNDArray<float_complext>(&out_dims) );
cufftHandle plan;
if( cufftPlanMany( &plan, 1, (int*)(&in_dims[0]), 0x0, 1, in_dims[0], 0x0, 1, out_dims[0], CUFFT_R2C, in_dims[1]*in_dims[2] ) != CUFFT_SUCCESS) {
throw std::runtime_error("CB FFT plan failed");
}
if( cufftExecR2C( plan, data->get_data_ptr(), (cuFloatComplex*) result->get_data_ptr() ) != CUFFT_SUCCESS ) {
throw std::runtime_error("CB FFT execute failed");;
}
if( cufftDestroy(plan) != CUFFT_SUCCESS) {
throw std::runtime_error("CB FFT failed to destroy plan");
}
return result;
}
static void cb_ifft( cuNDArray<float_complext> *in_data, cuNDArray<float> *out_data )
{
if( in_data == 0x0 || out_data == 0x0 )
throw std::runtime_error("CB FFT : illegal input or output pointer provided");
std::vector<size_t> in_dims = *in_data->get_dimensions();
std::vector<size_t> out_dims = *out_data->get_dimensions();
cufftHandle plan;
if( cufftPlanMany( &plan, 1, (int*)(&out_dims[0]), 0x0, 1, in_dims[0], 0x0, 1, out_dims[0], CUFFT_C2R, in_dims[1]*in_dims[2] ) != CUFFT_SUCCESS) {
throw std::runtime_error("CB iFFT plan failed");
}
if( cufftExecC2R( plan, (cuFloatComplex*) in_data->get_data_ptr(), out_data->get_data_ptr() ) != CUFFT_SUCCESS ) {
throw std::runtime_error("CB iFFT execute failed");;
}
if( cufftDestroy(plan) != CUFFT_SUCCESS) {
throw std::runtime_error("CB iFFT failed to destroy plan");
}
*out_data /= float(out_dims[0]);
}
//
// Redundancy correction for short scan mode
// - i.e. for less than a full rotation of data
//
// See "Optimal short scan convolution reconstruction for fanbeam CT", Dennis Parker, Med. Phys. 9(2) 1982
// and (for the implementation) "Parker weights revisited", Wesarg et al, Med. Phys. 29(3) 2002.
//
static __device__ const float epsilon = 0.001f;
static __inline__ __device__ float S( float beta )
{
if( beta <= -0.5f ) return 0.0f;
else if( beta > -0.5f && beta < 0.5f ) return 0.5f*(1.0f+sinf(CUDART_PI_F*beta));
else /*if( beta >= 0.5f )*/ return 1.0f;
}
static __inline__ __device__ float B( float alpha, float delta )
{
return 2.0f*(delta-alpha)+epsilon;
}
static __inline__ __device__ float b( float alpha, float delta )
{
const float q = 0.1f; // with q=1 this formulae reduce to conventional Parker weights
return q*B(alpha, delta);
}
__global__ void
redundancy_correct_kernel( float *projections,
const float * __restrict__ angles,
uintd3 dims, // Dimensions of the projections array
float delta // The half-fan angle
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const float in = projections[idx];
const uintd3 co = idx_to_co<3>( idx, dims );
const float tan_delta = tanf(delta);
const float alpha = -atanf((float(co[0])/float(dims[0])-0.5f)*2.0f*tan_delta);
const float beta = degrees2radians(angles[co[2]]);
float omega = 0.5f*(S(beta/b(alpha, delta)-0.5f)+
S((beta+2.0f*(alpha-delta)-epsilon)/b(alpha, delta)+0.5f)-
S((beta-CUDART_PI_F+2.0f*alpha)/b(-alpha, delta)-0.5f)-
S((beta-CUDART_PI_F-2.0f*delta-epsilon)/b(-alpha, delta)+0.5f));
projections[idx] = in*omega;
}
}
void
redundancy_correct( cuNDArray<float> *projections,
float *angles_DevPtr,
float delta // The half-fan angle in radians
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: redundancy_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: redundancy_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
redundancy_correct_kernel<<< dimGrid, dimBlock >>>( projections->get_data_ptr(), angles_DevPtr, dims, delta );
CHECK_FOR_CUDA_ERROR();
}
/***
* Redundancy (or offset) correction from Wang. Med. Phys 2002, doi: 10.1118/1.1489043
*/
__global__ static void
offset_correct_kernel( float *projections,
const floatd2 * __restrict__ offsets,
uintd3 dims, // Dimensions of the projections array
floatd2 phys_dims, // Physical dimensions in mm
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const uintd3 co = idx_to_co<3>( idx, dims );
const floatd2 offset = offsets[co[2]];
const float t = phys_dims[0]*(float(co[0])/(float(dims[0]))-0.5f)+offset[0];
const float omega = phys_dims[0]/2.0f-fabs(offset[0]);
//const float omega = phys_dims[0]*float(co[0])/(2.0f*float(dims[0]));
if( fabs(t) <= fabs(omega) ){
//float w = 0.5*sinf(CUDART_PI_F*atanf(t/SDD)/(2.0f*atanf(omega/SDD)))+0.5;
float sqrt_w = sinf(CUDART_PI_F*(t+omega)/(4.0f*omega));
float w = sqrt_w*sqrt_w;
projections[idx] *= w;
}
}
}
static void
offset_correct( cuNDArray<float> *projections,
floatd2* offsets, // Ptr to cuda array
floatd2 phys_dims,
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: offset_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: offset_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
offset_correct_kernel<<< dimGrid, dimBlock >>>( projections->get_data_ptr(), offsets, dims, phys_dims, SAD, SDD );
CHECK_FOR_CUDA_ERROR();
}
/***
* Redundancy (or offset) correction from Wang. Med. Phys 2002, doi: 10.1118/1.1489043
*/
__global__ static void
offset_correct_kernel_sqrt( float *projections,
const floatd2 * __restrict__ offsets,
uintd3 dims, // Dimensions of the projections array
floatd2 phys_dims, // Physical dimensions in mm
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int num_elements = prod(dims);
if( idx < num_elements ){
const uintd3 co = idx_to_co<3>( idx, dims );
const floatd2 offset = offsets[co[2]];
const float t = phys_dims[0]*(float(co[0])/(float(dims[0]))-0.5f)+offset[0];
const float omega = phys_dims[0]/2.0f-fabs(offset[0]);
//const float omega = phys_dims[0]*float(co[0])/(2.0f*float(dims[0]));
if( fabs(t) <= fabs(omega) ){
//float w = 0.5*sinf(CUDART_PI_F*atanf(t/SDD)/(2.0f*atanf(omega/SDD)))+0.5;
float sqrt_w = sinf(CUDART_PI_F*(t+omega)/(4.0f*omega));
projections[idx] *= sqrt_w;
}
}
}
static void
offset_correct_sqrt( cuNDArray<float> *projections,
floatd2* offsets, // Ptr to cuda array
floatd2 phys_dims,
float SAD, // Source origin distance
float SDD // Source detector distance
)
{
//
// Validate the input
//
if( projections == 0x0 ){
throw std::runtime_error("Error: offset_correct: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: offset_correct: projections array must be three-dimensional");
}
const size_t projection_res_x = projections->get_size(0);
const size_t projection_res_y = projections->get_size(1);
const size_t num_projections = projections->get_size(2);
uintd3 dims(projection_res_x, projection_res_y, num_projections);
// Launch kernel
//
dim3 dimBlock, dimGrid;
setup_grid( prod(dims), &dimBlock, &dimGrid );
offset_correct_kernel_sqrt<<< dimGrid, dimBlock >>>( projections->get_data_ptr(), offsets, dims, phys_dims, SAD, SDD );
CHECK_FOR_CUDA_ERROR();
}
void apply_offset_correct(hoCuNDArray<float>* projections,std::vector<floatd2>& offsets, floatd2 ps_dims_in_mm, float SDD, float SAD){
std::vector<size_t> dims = *projections->get_dimensions();
size_t projection_size = dims[0]*dims[1];
thrust::device_vector<floatd2> offsets_devVec(offsets);
//Calculate number of projections we can fit on device, rounded to nearest MB
size_t batch_size = (1024)*(cudaDeviceManager::Instance()->getFreeMemory()/(1024*projection_size*sizeof(float)));
size_t remaining = dims[2];
for (unsigned int i = 0; i < dims[2]/(batch_size+1)+1; i++){
std::vector<size_t> projection_dims = dims;
projection_dims[2] = std::min(remaining,batch_size);
//Make a view of the batch of projections
hoCuNDArray<float> projections_view(projection_dims,projections->get_data_ptr()+batch_size*i);
cuNDArray<float> cu_projections(projections_view); //Copy to device
floatd2* cu_offsets = thrust::raw_pointer_cast(&offsets_devVec[i*batch_size]);
offset_correct_sqrt(&cu_projections,cu_offsets,ps_dims_in_mm,SAD,SDD);
cudaMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),cu_projections.get_number_of_bytes(),cudaMemcpyDeviceToHost);
remaining -= batch_size;
}
}
//
// Forwards projection
//
__global__ void
conebeam_forwards_projection_kernel( float * __restrict__ projections,
float * __restrict__ angles,
floatd2 *offsets,
floatd3 is_dims_in_pixels,
floatd3 is_dims_in_mm,
intd2 ps_dims_in_pixels_int,
floatd2 ps_dims_in_mm,
int num_projections,
float SDD,
float SAD,
int num_samples_per_ray )
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int num_elements = prod(ps_dims_in_pixels_int)*num_projections;
if( idx < num_elements){
const intd3 co = idx_to_co<3>( idx, intd3(ps_dims_in_pixels_int[0], ps_dims_in_pixels_int[1], num_projections) );
// Projection space dimensions and spacing
//
const floatd2 ps_dims_in_pixels = floatd2(ps_dims_in_pixels_int[0], ps_dims_in_pixels_int[1]);
const floatd2 ps_spacing = ps_dims_in_mm / ps_dims_in_pixels;
// Determine projection angle and rotation matrix
//
const float angle = angles[co[2]];
const float3x3 rotation = calcRotationMatrixAroundZ(degrees2radians(angle));
// Find start and end point for the line integral (image space)
//
floatd3 startPoint = floatd3(0.0f, -SAD, 0.0f);
startPoint = mul(rotation, startPoint);
// Projection plate indices
//
#ifdef PS_ORIGIN_CENTERING
const floatd2 ps_pc = floatd2(co[0], co[1]) + floatd2(0.5);
#else
const floatd2 ps_pc = floatd2(co[0], co[1]);
#endif
// Convert the projection plate coordinates into image space,
// - local to the plate in metric units
// - including half-fan and sag correction
//
const floatd2 proj_coords = (ps_pc / ps_dims_in_pixels - 0.5f) * ps_dims_in_mm + offsets[co[2]];
// Define the end point for the line integrals
//
const float ADD = SDD - SAD; // in mm.
floatd3 endPoint = floatd3(proj_coords[0], ADD, proj_coords[1]);
endPoint = mul(rotation, endPoint);
// Find direction vector of the line integral
//
floatd3 dir = endPoint-startPoint;
// Perform integration only inside the bounding cylinder of the image volume
//
const floatd3 vec_over_dir = (is_dims_in_mm-startPoint)/dir;
const floatd3 vecdiff_over_dir = (-is_dims_in_mm-startPoint)/dir;
const floatd3 start = amin(vecdiff_over_dir, vec_over_dir);
const floatd3 end = amax(vecdiff_over_dir, vec_over_dir);
float a1 = fmax(max(start),0.0f);
float aend = fmin(min(end),1.0f);
startPoint += a1*dir;
const float sampling_distance = norm((aend-a1)*dir)/num_samples_per_ray;
// Now perform conversion of the line integral start/end into voxel coordinates
//
startPoint /= is_dims_in_mm;
#ifdef FLIP_Z_AXIS
startPoint[2] *= -1.0f;
#endif
startPoint += 0.5f;
dir /= is_dims_in_mm;
#ifdef FLIP_Z_AXIS
dir[2] *= -1.0f;
#endif
dir /= float(num_samples_per_ray); // now in step size units
//
// Perform line integration
//
float result = 0.0f;
for ( int sampleIndex = 0; sampleIndex<num_samples_per_ray; sampleIndex++) {
#ifndef IS_ORIGIN_CENTERING
floatd3 samplePoint = startPoint+dir*float(sampleIndex) + floatd3(0.5f)/is_dims_in_pixels;
#else
floatd3 samplePoint = startPoint+dir*float(sampleIndex);
#endif
// Accumulate result
//
result += tex3D( image_tex, samplePoint[0], samplePoint[1], samplePoint[2] );
}
// Output (normalized to the length of the ray)
//
projections[idx] = result*sampling_distance;
}
}
//
// Forwards projection of a 3D volume onto a set of (binned) projections
//
void
conebeam_forwards_projection( hoCuNDArray<float> *projections,
hoCuNDArray<float> *image,
std::vector<float> angles,
std::vector<floatd2> offsets,
std::vector<unsigned int> indices,
int projections_per_batch,
float samples_per_pixel,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_mm,
float SDD,
float SAD)
{
//
// Validate the input
//
if( projections == 0x0 || image == 0x0 ){
throw std::runtime_error("Error: conebeam_forwards_projection: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_forwards_projection: projections array must be three-dimensional");
}
if( image->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_forwards_projection: image array must be three-dimensional");
}
if( projections->get_size(2) != angles.size() || projections->get_size(2) != offsets.size() ) {
throw std::runtime_error("Error: conebeam_forwards_projection: inconsistent sizes of input arrays/vectors");
}
int projection_res_x = projections->get_size(0);
int projection_res_y = projections->get_size(1);
int num_projections_in_bin = indices.size();
int num_projections_in_all_bins = projections->get_size(2);
int matrix_size_x = image->get_size(0);
int matrix_size_y = image->get_size(1);
int matrix_size_z = image->get_size(2);
hoCuNDArray<float> *int_projections = projections;
if( projections_per_batch > num_projections_in_bin )
projections_per_batch = num_projections_in_bin;
int num_batches = (num_projections_in_bin+projections_per_batch-1) / projections_per_batch;
// Build texture from input image
//
cudaFuncSetCacheConfig(conebeam_forwards_projection_kernel, cudaFuncCachePreferL1);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaExtent extent;
extent.width = matrix_size_x;
extent.height = matrix_size_y;
extent.depth = matrix_size_z;
cudaMemcpy3DParms cpy_params = {0};
cpy_params.kind = cudaMemcpyHostToDevice;
cpy_params.extent = extent;
cudaArray *image_array;
cudaMalloc3DArray(&image_array, &channelDesc, extent);
CHECK_FOR_CUDA_ERROR();
cpy_params.dstArray = image_array;
cpy_params.srcPtr = make_cudaPitchedPtr
((void*)image->get_data_ptr(), extent.width*sizeof(float), extent.width, extent.height);
cudaMemcpy3D(&cpy_params);
CHECK_FOR_CUDA_ERROR();
cudaBindTextureToArray(image_tex, image_array, channelDesc);
CHECK_FOR_CUDA_ERROR();
// Allocate the angles, offsets and projections in device memory
//
float *projections_DevPtr, *projections_DevPtr2;
cudaMalloc( (void**) &projections_DevPtr, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
cudaMalloc( (void**) &projections_DevPtr2, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
cudaStream_t mainStream, indyStream;
cudaStreamCreate(&mainStream);
cudaStreamCreate(&indyStream);
std::vector<float> angles_vec;
std::vector<floatd2> offsets_vec;
for( int p=0; p<indices.size(); p++ ){
int from_id = indices[p];
if( from_id >= num_projections_in_all_bins ) {
throw std::runtime_error("Error: conebeam_forwards_projection: illegal index in bin");
}
angles_vec.push_back(angles[from_id]);
offsets_vec.push_back(offsets[from_id]);
}
thrust::device_vector<float> angles_devVec(angles_vec);
thrust::device_vector<floatd2> offsets_devVec(offsets_vec);
//
// Iterate over the batches
//
for (unsigned int batch=0; batch<num_batches; batch++ ){
int from_projection = batch * projections_per_batch;
int to_projection = (batch+1) * projections_per_batch;
if (to_projection > num_projections_in_bin)
to_projection = num_projections_in_bin;
int projections_in_batch = to_projection-from_projection;
// Block/grid configuration
//
dim3 dimBlock, dimGrid;
setup_grid( projection_res_x*projection_res_y*projections_in_batch, &dimBlock, &dimGrid );
// Launch kernel
//
floatd3 is_dims_in_pixels(matrix_size_x, matrix_size_y, matrix_size_z);
intd2 ps_dims_in_pixels(projection_res_x, projection_res_y);
float* raw_angles = thrust::raw_pointer_cast(&angles_devVec[from_projection]);
floatd2* raw_offsets = thrust::raw_pointer_cast(&offsets_devVec[from_projection]);
conebeam_forwards_projection_kernel<<< dimGrid, dimBlock, 0, mainStream >>>
( projections_DevPtr, raw_angles, raw_offsets,
is_dims_in_pixels, is_dims_in_mm, ps_dims_in_pixels, ps_dims_in_mm,
projections_in_batch, SDD, SAD, samples_per_pixel*float(matrix_size_x) );
// If not initial batch, start copying the old stuff
//
int p = from_projection;
while( p<to_projection) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int to_id = indices[p];
int size = projection_res_x*projection_res_y;
cudaMemcpyAsync( int_projections->get_data_ptr()+to_id*size,
projections_DevPtr+(p-from_projection)*size,
size*num_sequential_projections*sizeof(float),
cudaMemcpyDeviceToHost, mainStream);
p += num_sequential_projections;
}
std::swap(projections_DevPtr, projections_DevPtr2);
std::swap(mainStream, indyStream);
}
cudaFree(projections_DevPtr2);
cudaFree(projections_DevPtr);
cudaFreeArray(image_array);
CUDA_CALL(cudaStreamDestroy(indyStream));
CUDA_CALL(cudaStreamDestroy(mainStream));
CHECK_FOR_CUDA_ERROR();
}
template <bool FBP> __global__ void
conebeam_backwards_projection_kernel( float * __restrict__ image,
const float * __restrict__ angles,
floatd2 *offsets,
intd3 is_dims_in_pixels_int,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_pixels,
floatd2 ps_dims_in_mm,
int num_projections_in_batch,
float num_projections_in_bin,
float SDD,
float SAD,
bool accumulate )
{
// Image voxel to backproject into (pixel coordinate and index)
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int num_elements = prod(is_dims_in_pixels_int);
if( idx < num_elements ){
const intd3 co = idx_to_co<3>(idx, is_dims_in_pixels_int);
#ifdef IS_ORIGIN_CENTERING
const floatd3 is_pc = floatd3(co[0], co[1], co[2]) + floatd3(0.5);
#else
const floatd3 is_pc = floatd3(co[0], co[1], co[2]);
#endif
// Normalized image space coordinate [-0.5, 0.5[
//
const floatd3 is_dims_in_pixels(is_dims_in_pixels_int[0],is_dims_in_pixels_int[1],is_dims_in_pixels_int[2]);
#ifdef FLIP_Z_AXIS
floatd3 is_nc = is_pc / is_dims_in_pixels - floatd3(0.5f);
is_nc[2] *= -1.0f;
#else
const floatd3 is_nc = is_pc / is_dims_in_pixels - floatd3(0.5f);
#endif
// Image space coordinate in metric units
//
const floatd3 pos = is_nc * is_dims_in_mm;
// Read the existing output value for accumulation at this point.
// The cost of this fetch is hidden by the loop
const float incoming = (accumulate) ? image[idx] : 0.0f;
// Backprojection loop
//
float result = 0.0f;
for( int projection = 0; projection < num_projections_in_batch; projection++ ) {
// Projection angle
//
const float angle = degrees2radians(angles[projection]);
// Projection rotation matrix
//
const float3x3 inverseRotation = calcRotationMatrixAroundZ(-angle);
// Rotated image coordinate (local to the projection's coordinate system)
//
const floatd3 pos_proj = mul(inverseRotation, pos);
// Project the image position onto the projection plate.
// Account for half-fan and sag offsets.
//
const floatd3 startPoint = floatd3(0.0f, -SAD, 0.0f);
floatd3 dir = pos_proj - startPoint;
dir = dir / dir[1];
const floatd3 endPoint = startPoint + dir * SDD;
const floatd2 endPoint2d = floatd2(endPoint[0], endPoint[2]) - offsets[projection];
// Convert metric projection coordinates into pixel coordinates
//
#ifndef PS_ORIGIN_CENTERING
floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f)) + floatd2(0.5f)/ps_dims_in_pixels;
//floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f)) * ps_dims_in_pixels + floatd2(0.5f);
#else
floatd2 ps_pc = ((endPoint2d / ps_dims_in_mm) + floatd2(0.5f));
#endif
// Apply filter (filtered backprojection mode only)
//
float weight = 1.0;
if( FBP ){
// Equation 3.59, page 96 and equation 10.2, page 386
// in Computed Tomography 2nd edition, Jiang Hsieh
//
const float xx = pos[0];
const float yy = pos[1];
const float beta = angle;
const float r = hypotf(xx,yy);
const float phi = atan2f(yy,xx);
const float D = SAD;
const float ym = r*sinf(beta-phi);
const float U = (D+ym)/D;
weight = 1.0f/(U*U);
}
// Read the projection data (bilinear interpolation enabled) and accumulate
//
result += weight * tex2DLayered( projections_tex, ps_pc[0], ps_pc[1], projection );
}
// Output normalized image
//
image[idx] = incoming + result / num_projections_in_bin;
}
}
//
// Backprojection
//
template <bool FBP>
void conebeam_backwards_projection( hoCuNDArray<float> *projections,
hoCuNDArray<float> *image,
std::vector<float> angles,
std::vector<floatd2> offsets,
std::vector<unsigned int> indices,
int projections_per_batch,
intd3 is_dims_in_pixels,
floatd3 is_dims_in_mm,
floatd2 ps_dims_in_mm,
float SDD,
float SAD,
bool short_scan,
bool use_offset_correction,
bool accumulate,
cuNDArray<float> *cosine_weights,
cuNDArray<float> *frequency_filter
)
{
//
// Validate the input
//
if( projections == 0x0 || image == 0x0 ){
throw std::runtime_error("Error: conebeam_backwards_projection: illegal array pointer provided");
}
if( projections->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_backwards_projection: projections array must be three-dimensional");
}
if( image->get_number_of_dimensions() != 3 ){
throw std::runtime_error("Error: conebeam_backwards_projection: image array must be three-dimensional");
}
if( projections->get_size(2) != angles.size() || projections->get_size(2) != offsets.size() ) {
throw std::runtime_error("Error: conebeam_backwards_projection: inconsistent sizes of input arrays/vectors");
}
if( FBP && !(cosine_weights && frequency_filter) ){
throw std::runtime_error("Error: conebeam_backwards_projection: for _filtered_ backprojection both cosine weights and a filter must be provided");
}
// Some utility variables
//
int matrix_size_x = image->get_size(0);
int matrix_size_y = image->get_size(1);
int matrix_size_z = image->get_size(2);
floatd3 is_dims(matrix_size_x, matrix_size_y, matrix_size_z);
int num_image_elements = matrix_size_x*matrix_size_y*matrix_size_z;
int projection_res_x = projections->get_size(0);
int projection_res_y = projections->get_size(1);
floatd2 ps_dims_in_pixels(projection_res_x, projection_res_y);
int num_projections_in_all_bins = projections->get_size(2);
int num_projections_in_bin = indices.size();
if( projections_per_batch > num_projections_in_bin )
projections_per_batch = num_projections_in_bin;
int num_batches = (num_projections_in_bin+projections_per_batch-1) / projections_per_batch;
// Allocate device memory for the backprojection result
//
boost::shared_ptr< cuNDArray<float> > image_device;
if( accumulate ){
image_device = boost::shared_ptr< cuNDArray<float> >(new cuNDArray<float>(image));
}
else{
image_device = boost::shared_ptr< cuNDArray<float> >(new cuNDArray<float>(image->get_dimensions().get()));
}
// Allocate the angles, offsets and projections in device memory
//
float *projections_DevPtr, *projections_DevPtr2;
cudaMalloc( (void**) &projections_DevPtr, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
cudaMalloc( (void**) &projections_DevPtr2, projection_res_x*projection_res_y*projections_per_batch*sizeof(float));
cudaStream_t mainStream, indyStream;
cudaStreamCreate(&mainStream);
cudaStreamCreate(&indyStream);
std::vector<float> angles_vec;
std::vector<floatd2> offsets_vec;
for( int p=0; p<indices.size(); p++ ){
int from_id = indices[p];
if( from_id >= num_projections_in_all_bins ) {
throw std::runtime_error("Error: conebeam_backwards_projection: illegal index in bin");
}
angles_vec.push_back(angles[from_id]);
offsets_vec.push_back(offsets[from_id]);
}
thrust::device_vector<float> angles_devVec(angles_vec);
thrust::device_vector<floatd2> offsets_devVec(offsets_vec);
// From/to for the first batch
// - to enable working streams...
//
int from_projection = 0;
int to_projection = projections_per_batch;
if (to_projection > num_projections_in_bin )
to_projection = num_projections_in_bin;
int projections_in_batch = to_projection-from_projection;
std::vector<size_t> dims;
dims.push_back(projection_res_x);
dims.push_back(projection_res_y);
dims.push_back(projections_in_batch);
std::vector<size_t> dims_next;
cuNDArray<float> *projections_batch = new cuNDArray<float>(&dims, projections_DevPtr);
// Upload first projections batch adhering to the binning.
// Be sure to copy sequentially numbered projections in one copy operation.
//
{
int p = from_projection;
while( p<to_projection ) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int from_id = indices[p];
int size = projection_res_x*projection_res_y;
cudaMemcpyAsync( projections_batch->get_data_ptr()+(p-from_projection)*size,
projections->get_data_ptr()+from_id*size,
size*num_sequential_projections*sizeof(float), cudaMemcpyHostToDevice, mainStream );
CHECK_FOR_CUDA_ERROR();
p += num_sequential_projections;
}
}
//
// Iterate over batches
//
for( int batch = 0; batch < num_batches; batch++ ) {
from_projection = batch * projections_per_batch;
to_projection = (batch+1) * projections_per_batch;
if (to_projection > num_projections_in_bin )
to_projection = num_projections_in_bin;
projections_in_batch = to_projection-from_projection;
float* raw_angles = thrust::raw_pointer_cast(&angles_devVec[from_projection]);
floatd2* raw_offsets = thrust::raw_pointer_cast(&offsets_devVec[from_projection]);
if( FBP ){
// Apply cosine weighting : "SDD / sqrt(SDD*SDD + u*u + v*v)"
// - with (u,v) positions given in metric units on a virtual detector at the origin
//
*projections_batch *= *cosine_weights;
// Redundancy correct
// - for short scan mode
//
if( short_scan ){
float delta = std::atan(ps_dims_in_mm[0]/(2.0f*SDD));
redundancy_correct( projections_batch, raw_angles, delta );
}
// Apply frequency filter
// - use zero padding to avoid the cyclic boundary conditions induced by the fft
//
std::vector<size_t> batch_dims = *projections_batch->get_dimensions();
uint64d3 pad_dims(batch_dims[0]<<1, batch_dims[1], batch_dims[2]);
boost::shared_ptr< cuNDArray<float> > padded_projections = pad<float,3>( pad_dims, projections_batch );
boost::shared_ptr< cuNDArray<complext<float> > > complex_projections = cb_fft( padded_projections.get() );
*complex_projections *= *frequency_filter;
cb_ifft( complex_projections.get(), padded_projections.get() );
uint64d3 crop_offsets(batch_dims[0]>>1, 0, 0);
crop<float,3>( crop_offsets, padded_projections.get(), projections_batch );
// Apply offset correction
// - for half fan mode, sag correction etc.
//
if (use_offset_correction)
offset_correct( projections_batch, raw_offsets, ps_dims_in_mm, SAD, SDD );
} else if (use_offset_correction)
offset_correct_sqrt( projections_batch, raw_offsets, ps_dims_in_mm, SAD, SDD );
// Build array for input texture
//
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaExtent extent;
extent.width = projection_res_x;
extent.height = projection_res_y;
extent.depth = projections_in_batch;
cudaArray *projections_array;
cudaMalloc3DArray( &projections_array, &channelDesc, extent, cudaArrayLayered );
CHECK_FOR_CUDA_ERROR();
cudaMemcpy3DParms cpy_params = {0};
cpy_params.extent = extent;
cpy_params.dstArray = projections_array;
cpy_params.kind = cudaMemcpyDeviceToDevice;
cpy_params.srcPtr =
make_cudaPitchedPtr( (void*)projections_batch->get_data_ptr(), projection_res_x*sizeof(float),
projection_res_x, projection_res_y );
cudaMemcpy3DAsync( &cpy_params, mainStream );
CHECK_FOR_CUDA_ERROR();
cudaBindTextureToArray( projections_tex, projections_array, channelDesc );
CHECK_FOR_CUDA_ERROR();
// Upload projections for the next batch
// - to enable streaming
//
if( batch < num_batches-1 ){ // for using multiple streams to hide the cost of the uploads
int from_projection_next = (batch+1) * projections_per_batch;
int to_projection_next = (batch+2) * projections_per_batch;
if (to_projection_next > num_projections_in_bin )
to_projection_next = num_projections_in_bin;
int projections_in_batch_next = to_projection_next-from_projection_next;
// printf("batch: %03i, handling projections: %03i - %03i, angles: %.2f - %.2f\n",
// batch+1, from_projection_next, to_projection_next-1, angles[from_projection_next], angles[to_projection_next-1]);
// Allocate device memory for projections and upload
//
dims_next.clear();
dims_next.push_back(projection_res_x);
dims_next.push_back(projection_res_y);
dims_next.push_back(projections_in_batch_next);
cuNDArray<float> projections_batch_next(&dims, projections_DevPtr2);
// Upload projections adhering to the binning.
// Be sure to copy sequentially numbered projections in one copy operation.
//
int p = from_projection_next;
while( p<to_projection_next ) {
int num_sequential_projections = 1;
while( p+num_sequential_projections < to_projection_next &&
indices[p+num_sequential_projections]==(indices[p+num_sequential_projections-1]+1) ){
num_sequential_projections++;
}
int from_id = indices[p];
int size = projection_res_x*projection_res_y;
cudaMemcpyAsync( projections_batch_next.get_data_ptr()+(p-from_projection_next)*size,
projections->get_data_ptr()+from_id*size,
size*num_sequential_projections*sizeof(float), cudaMemcpyHostToDevice, indyStream );
CHECK_FOR_CUDA_ERROR();
p += num_sequential_projections;
}
}
// Define dimensions of grid/blocks.
//
dim3 dimBlock, dimGrid;
setup_grid( matrix_size_x*matrix_size_y*matrix_size_z, &dimBlock, &dimGrid );
// Invoke kernel
//
cudaFuncSetCacheConfig(conebeam_backwards_projection_kernel<FBP>, cudaFuncCachePreferL1);
conebeam_backwards_projection_kernel<FBP><<< dimGrid, dimBlock, 0, mainStream >>>
( image_device->get_data_ptr(), raw_angles, raw_offsets,
is_dims_in_pixels, is_dims_in_mm, ps_dims_in_pixels, ps_dims_in_mm,
projections_in_batch, num_projections_in_bin, SDD, SAD, (batch==0) ? accumulate : true );
CHECK_FOR_CUDA_ERROR();
// Cleanup
//
cudaUnbindTexture(projections_tex);
cudaFreeArray(projections_array);
CHECK_FOR_CUDA_ERROR();
std::swap(projections_DevPtr, projections_DevPtr2);
std::swap(mainStream, indyStream);
delete projections_batch;
if( batch < num_batches-1 )
projections_batch = new cuNDArray<float>(&dims_next, projections_DevPtr);
}
// Copy result from device to host
//
cudaMemcpy( image->get_data_ptr(), image_device->get_data_ptr(),
num_image_elements*sizeof(float), cudaMemcpyDeviceToHost );
CHECK_FOR_CUDA_ERROR();
cudaFree(projections_DevPtr2);
cudaFree(projections_DevPtr);
CUDA_CALL(cudaStreamDestroy(indyStream));
CUDA_CALL(cudaStreamDestroy(mainStream));
CHECK_FOR_CUDA_ERROR();
}
// Template instantiations
//
template void conebeam_backwards_projection<false>
( hoCuNDArray<float>*, hoCuNDArray<float>*, std::vector<float>, std::vector<floatd2>, std::vector<unsigned int>,
int, intd3, floatd3, floatd2, float, float, bool, bool, bool, cuNDArray<float>*, cuNDArray<float>* );
template void conebeam_backwards_projection<true>
( hoCuNDArray<float>*, hoCuNDArray<float>*, std::vector<float>, std::vector<floatd2>, std::vector<unsigned int>,
int, intd3, floatd3, floatd2, float, float, bool, bool, bool, cuNDArray<float>*, cuNDArray<float>* );
}
|
acededd09ff9389f5e6547cccef6b961383e167f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "distconv/cudnn/batchnorm.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/tensor/algorithms_cuda.hpp"
#include <type_traits>
using distconv::tensor::LocaleMPI;
using distconv::tensor::HIPAllocator;
template <typename DataType>
using Tensor = distconv::tensor::Tensor<DataType, LocaleMPI, HIPAllocator>;
namespace distconv {
namespace batchnorm {
// To reduce round-off divergence with the default LBANN code, use the
// same thread mapping and reduction method as LBANN
#if 0
template <typename DataType, typename Allocator>
void channel_sums_and_sqsums(
int num_current_local_samples,
Tensor4<DataType, Allocator> &input,
Tensor4<DataType, Allocator> &sums,
Tensor4<DataType, Allocator> &sqsums,
hipStream_t stream) {
// Clear GPU memory
DISTCONV_CHECK_CUDA(hipMemsetAsync(
sums.get_buffer(), 0,
sums.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(hipMemsetAsync(
sqsums.get_buffer(), 0,
sqsums.get_local_pitched_size() * sizeof(DataType),
stream));
auto reduction_region = input.get_local_shape();
reduction_region[-1] = num_current_local_samples;
tensor::TransformReduceSum(input, reduction_region,
sums, [] __device__(DataType x) { return x; },
sqsums, [] __device__(DataType x) { return x * x; },
stream);
}
#else
template <int ND, typename DataType, int BLOCK_SIZE>
__global__ void channel_sums_and_sqsums_kernel(const DataType *input,
DataType *sums, DataType *sqsums,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides) {
__shared__ DataType shared_sums[BLOCK_SIZE];
__shared__ DataType shared_sqsums[BLOCK_SIZE];
const int tid = threadIdx.x;
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
DataType sum = DataType(0);
DataType sqsum = DataType(0);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0;
for (int d = 0; d < ND -2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
sum += x;
sqsum += x * x;
input_offset += input_strides[-1];
}
}
shared_sums[tid] = sum;
shared_sqsums[tid] = sqsum;
// Compute channel sum with shared memory reduction
// TODO: unroll loops
for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
__syncthreads();
if(tid < stride) {
shared_sums[tid] += shared_sums[tid + stride];
shared_sqsums[tid] += shared_sqsums[tid + stride];
}
}
// Output channel sum to global memory
if(tid == 0) {
atomicAdd(&sums[ch_idx], shared_sums[0]);
atomicAdd(&sqsums[ch_idx], shared_sqsums[0]);
}
}
template <int ND, typename Tensor>
void channel_sums_and_sqsums(int num_samples, const Tensor &input, Tensor &sums,
Tensor &sqsums, hipStream_t stream,
const std::vector<bool> &reduction_dims,
bool reduce) {
using DataType = typename Tensor::data_type;
// Clear GPU memory
DISTCONV_CHECK_CUDA(hipMemsetAsync(
sums.get_buffer(), 0,
sums.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(hipMemsetAsync(
sqsums.get_buffer(), 0,
sqsums.get_local_pitched_size() * sizeof(DataType),
stream));
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
auto input_strides = input.get_strides();
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
// Do not contribute to the accumulation if the local tensor is not
// a split root.
if (input.get_local_size() > 0 && input.is_split_root()) {
hipLaunchKernelGGL(( channel_sums_and_sqsums_kernel<ND, DataType, block_size>)
, dim3(grid_dim), dim3(block_dim), 0, stream,
input.get_const_base_ptr(),
sums.get_base_ptr(),
sqsums.get_base_ptr(),
shape, input_strides);
}
if (reduce) {
// TODO: only global reduction is supported.
DISTCONV_CHECK_CUDA(hipStreamSynchronize(stream));
sums.allreduce_shared_regions();
sqsums.allreduce_shared_regions();
}
}
#endif
#define INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(ND, TYPE) \
template void \
channel_sums_and_sqsums<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, Tensor<TYPE> &sums, \
Tensor<TYPE> &sqsums, hipStream_t stream, \
const std::vector<bool> &reduction_dims, \
bool reduce);
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(4, float)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(4, double)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(5, float)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(5, double)
#undef INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS
template <typename DataType>
struct sums_to_statistics_functor {
index_t m_num_per_sum;
DataType m_decay;
sums_to_statistics_functor(index_t num_per_sum, DataType decay):
m_num_per_sum(num_per_sum),
m_decay(decay) {}
__device__ void operator()(DataType &global_mean, DataType &global_var,
DataType &running_mean, DataType &running_var) {
const DataType mean = global_mean / m_num_per_sum;
const DataType sqmean = global_var / m_num_per_sum;
DataType var = sqmean- mean * mean;
var = var > DataType(0) ? var : DataType(0);
var *= m_num_per_sum / (m_num_per_sum - DataType(1));
global_mean = mean;
global_var = var;
running_mean = m_decay * running_mean + (DataType(1) - m_decay) * mean;
running_var = m_decay * running_var + (DataType(1) - m_decay) * var;
}
};
template <int ND, typename TensorType>
void sums_to_statistics(index_t num_per_sum, typename TensorType::data_type decay,
TensorType &global_mean, TensorType &global_var,
TensorType &running_mean, TensorType &running_var,
hipStream_t stream) {
using DataType = typename TensorType::data_type;
if (num_per_sum > 0) {
tensor::Transform(
global_mean, global_var, running_mean, running_var,
sums_to_statistics_functor<DataType>(num_per_sum, decay),
stream);
} else {
// Fill global_var with 1. Do the same thing as the corresponding LBANN code.
tensor::Transform(
global_var,
[] __device__ (DataType &global_var) {
global_var = DataType(1);
}, stream);
}
}
#define INSTANTIATE_SUMS_TO_STATISTICS(ND, TYPE) \
template \
void sums_to_statistics<ND, Tensor<TYPE>>( \
index_t num_per_sum, TYPE decay, \
Tensor<TYPE> &global_mean, Tensor<TYPE> &global_var, \
Tensor<TYPE> &running_mean, Tensor<TYPE> &running_var, \
hipStream_t stream);
INSTANTIATE_SUMS_TO_STATISTICS(4, float)
INSTANTIATE_SUMS_TO_STATISTICS(4, double)
INSTANTIATE_SUMS_TO_STATISTICS(5, float)
INSTANTIATE_SUMS_TO_STATISTICS(5, double)
#undef INSTANTIATE_SUMS_TO_STATISTICS
__device__ inline float rsqrt(float x) {
return rsqrtf(x);
}
template <int ND, typename DataType>
void __global__ batch_normalization_kernel(const DataType *input,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
const DataType *global_bias,
DataType *output,
DataType epsilon,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> output_strides) {
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType bias = global_bias[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, output_offset = 0;
for (int d = 0; d < ND - 2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
output_offset += idx * output_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
output_offset += ch_idx * output_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
DataType xhat = (x - mean) * inv_stdev;
DataType y = scale * xhat + bias;
output[output_offset] = y;
input_offset += input_strides[-1];
output_offset += output_strides[-1];
}
}
}
template <int ND, typename TensorType>
void batch_normalization(int num_samples, const TensorType &input,
const TensorType &mean, const TensorType &var,
const TensorType &scale, const TensorType &bias,
TensorType &output, typename TensorType::data_type epsilon,
hipStream_t stream) {
// local tensors can be empty
if (output.get_local_size() == 0) return;
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
tensor::Array<ND> input_strides = input.get_strides();
tensor::Array<ND> output_strides = output.get_strides();
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
tensor::Array<ND> shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
hipLaunchKernelGGL(( batch_normalization_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
input.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
bias.get_const_base_ptr(),
output.get_base_ptr(),
epsilon, shape,
input_strides, output_strides);
}
#define INSTANTIATE_BATCH_NORMALIZATION(ND, TYPE) \
template \
void batch_normalization<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, const Tensor<TYPE> &mean, \
const Tensor<TYPE> &var, const Tensor<TYPE> &scale, \
const Tensor<TYPE> &bias, Tensor<TYPE> &output, \
TYPE epsilon, hipStream_t stream);
INSTANTIATE_BATCH_NORMALIZATION(4, float)
INSTANTIATE_BATCH_NORMALIZATION(4, double)
INSTANTIATE_BATCH_NORMALIZATION(5, float)
INSTANTIATE_BATCH_NORMALIZATION(5, double)
#undef INSTANTIATE_BATCH_NORMALIZATION
template <int ND, typename DataType, int BLOCK_SIZE>
void __global__ backprop1_kernel(const DataType *input,
const DataType *d_output,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
DataType *global_dscale, DataType *global_dbias,
DataType *global_dmean, DataType *global_dvar,
DataType epsilon, tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> d_output_strides) {
__shared__ DataType shared_dscale[BLOCK_SIZE];
__shared__ DataType shared_dbias[BLOCK_SIZE];
__shared__ DataType shared_dmean[BLOCK_SIZE];
__shared__ DataType shared_dvar[BLOCK_SIZE];
const int tid = threadIdx.x;
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const DataType dvar_factor = inv_stdev * inv_stdev * inv_stdev / 2;
DataType dscale = DataType(0);
DataType dbias = DataType(0);
DataType dmean = DataType(0);
DataType dvar = DataType(0);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, d_output_offset = 0;
for (int d = 0; d < ND -2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
d_output_offset += idx * d_output_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
d_output_offset += ch_idx * d_output_strides[-2];
for (int sample_idx = 0; sample_idx < num_samples; ++sample_idx) {
const DataType x = input[input_offset];
const DataType xhat = (x - mean) * inv_stdev;
const DataType dy = d_output[d_output_offset];
dscale += dy * xhat;
dbias += dy;
const DataType dxhat = dy * scale;
dmean += - dxhat * inv_stdev;
dvar += - dxhat * (x - mean) * dvar_factor;
input_offset += input_strides[-1];
d_output_offset += d_output_strides[-1];
}
}
shared_dscale[tid] = dscale;
shared_dbias[tid] = dbias;
shared_dmean[tid] = dmean;
shared_dvar[tid] = dvar;
for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
__syncthreads();
if(tid < stride) {
shared_dscale[tid] += shared_dscale[tid + stride];
shared_dbias[tid] += shared_dbias[tid + stride];
shared_dmean[tid] += shared_dmean[tid + stride];
shared_dvar[tid] += shared_dvar[tid + stride];
}
}
// Output channel sum to global memory
if (tid == 0) {
atomicAdd(&global_dscale[ch_idx], shared_dscale[0]);
atomicAdd(&global_dbias[ch_idx], shared_dbias[0]);
atomicAdd(&global_dmean[ch_idx], shared_dmean[0]);
atomicAdd(&global_dvar[ch_idx], shared_dvar[0]);
}
}
template <int ND, typename TensorType>
void backprop1(int num_samples, const TensorType &input,
const TensorType &d_output, const TensorType &mean,
const TensorType &var, const TensorType &scale,
TensorType &scale_gradient, TensorType &bias_gradient,
TensorType &mean_gradient, TensorType &var_gradient,
typename TensorType::data_type epsilon, hipStream_t stream) {
using DataType = typename TensorType::data_type;
DISTCONV_CHECK_CUDA(hipMemsetAsync(
scale_gradient.get_buffer(), 0,
scale_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(hipMemsetAsync(
bias_gradient.get_buffer(), 0,
bias_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(hipMemsetAsync(
mean_gradient.get_buffer(), 0,
mean_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(hipMemsetAsync(
var_gradient.get_buffer(), 0,
var_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
if (input.get_local_size() == 0 || !input.is_split_root()) {
return;
}
const auto input_strides = input.get_strides();
const auto d_output_strides = d_output.get_strides();
const int num_channels = input.get_local_shape()[get_channel_dim()];
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
constexpr int block_size = 256;
dim3 block_dim(block_size);
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
hipLaunchKernelGGL(( backprop1_kernel<ND, DataType, block_size>), dim3(grid_dim), dim3(block_dim), 0, stream,
input.get_const_base_ptr(),
d_output.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
scale_gradient.get_base_ptr(),
bias_gradient.get_base_ptr(),
mean_gradient.get_base_ptr(),
var_gradient.get_base_ptr(),
epsilon, shape,
input_strides, d_output_strides);
}
#define INSTANTIATE_BACKPROP1(ND, TYPE) \
template \
void backprop1<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, const Tensor<TYPE> &d_output, \
const Tensor<TYPE> &mean, const Tensor<TYPE> &var, \
const Tensor<TYPE> &scale, Tensor<TYPE> &scale_gradient, \
Tensor<TYPE> &bias_gradient, Tensor<TYPE> &mean_gradient, \
Tensor<TYPE> &var_gradient, TYPE epsilon, \
hipStream_t stream);
INSTANTIATE_BACKPROP1(4, float)
INSTANTIATE_BACKPROP1(4, double)
INSTANTIATE_BACKPROP1(5, float)
INSTANTIATE_BACKPROP1(5, double)
#undef INSTANTIATE_BACKPROP1
template <int ND, typename DataType>
void __global__ backprop2_kernel(const DataType *input,
const DataType *d_output,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
const DataType *global_dmean,
const DataType *global_dvar,
DataType *d_input, DataType epsilon,
index_t num_per_sum,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> d_output_strides,
tensor::Array<ND> d_input_strides) {
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[-1];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType dmean = global_dmean[ch_idx];
const DataType dvar = global_dvar[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const DataType dmean_term = dmean / num_per_sum;
const DataType dvar_term = dvar * 2 / (num_per_sum - 1);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, d_output_offset = 0, d_input_offset = 0;
for (int d = 0; d < ND - 2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
d_output_offset += idx * d_output_strides[d];
d_input_offset += idx * d_input_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
d_output_offset += ch_idx * d_output_strides[-2];
d_input_offset += ch_idx * d_input_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
const DataType dy = d_output[d_output_offset];
const DataType dxhat = dy * scale;
DataType dx = dxhat * inv_stdev;
dx += dmean_term;
dx += dvar_term * (x - mean);
d_input[d_input_offset] = dx;
input_offset += input_strides[-1];
d_output_offset += d_output_strides[-1];
d_input_offset += d_input_strides[-1];
}
}
}
template <int ND, typename TensorType>
void backprop2(index_t num_samples, index_t num_per_sum,
const TensorType &input, const TensorType &d_output,
const TensorType &mean, const TensorType &var,
const TensorType &scale, const TensorType &mean_gradient,
const TensorType &var_gradient, TensorType &d_input,
typename TensorType::data_type epsilon, hipStream_t stream) {
using DataType = typename TensorType::data_type;
if (d_input.get_local_size() == 0) return;
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
auto input_strides = input.get_strides();
auto d_output_strides = d_output.get_strides();
auto d_input_strides = d_input.get_strides();
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
hipLaunchKernelGGL(( backprop2_kernel<ND, DataType>), dim3(grid_dim), dim3(block_dim), 0, stream,
input.get_const_base_ptr(),
d_output.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
mean_gradient.get_const_base_ptr(),
var_gradient.get_const_base_ptr(),
d_input.get_base_ptr(),
epsilon, num_per_sum, shape,
input_strides, d_output_strides, d_input_strides);
}
#define INSTANTIATE_BACKPROP2(ND, TYPE) \
template \
void backprop2<ND, Tensor<TYPE>>( \
index_t num_samples, index_t num_per_sum, \
const Tensor<TYPE> &input, const Tensor<TYPE> &d_output, \
const Tensor<TYPE> &mean, const Tensor<TYPE> &var, \
const Tensor<TYPE> &scale, const Tensor<TYPE> &mean_gradient, \
const Tensor<TYPE> &var_gradient, Tensor<TYPE> &d_input, \
TYPE epsilon, hipStream_t stream);
INSTANTIATE_BACKPROP2(4, float)
INSTANTIATE_BACKPROP2(4, double)
INSTANTIATE_BACKPROP2(5, float)
INSTANTIATE_BACKPROP2(5, double)
#undef INSTANTIATE_BACKPROP2
} // namespace batchnorm
} // namespace distconv
| acededd09ff9389f5e6547cccef6b961383e167f.cu | #include "distconv/cudnn/batchnorm.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/tensor/algorithms_cuda.hpp"
#include <type_traits>
using distconv::tensor::LocaleMPI;
using distconv::tensor::CUDAAllocator;
template <typename DataType>
using Tensor = distconv::tensor::Tensor<DataType, LocaleMPI, CUDAAllocator>;
namespace distconv {
namespace batchnorm {
// To reduce round-off divergence with the default LBANN code, use the
// same thread mapping and reduction method as LBANN
#if 0
template <typename DataType, typename Allocator>
void channel_sums_and_sqsums(
int num_current_local_samples,
Tensor4<DataType, Allocator> &input,
Tensor4<DataType, Allocator> &sums,
Tensor4<DataType, Allocator> &sqsums,
cudaStream_t stream) {
// Clear GPU memory
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
sums.get_buffer(), 0,
sums.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
sqsums.get_buffer(), 0,
sqsums.get_local_pitched_size() * sizeof(DataType),
stream));
auto reduction_region = input.get_local_shape();
reduction_region[-1] = num_current_local_samples;
tensor::TransformReduceSum(input, reduction_region,
sums, [] __device__(DataType x) { return x; },
sqsums, [] __device__(DataType x) { return x * x; },
stream);
}
#else
template <int ND, typename DataType, int BLOCK_SIZE>
__global__ void channel_sums_and_sqsums_kernel(const DataType *input,
DataType *sums, DataType *sqsums,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides) {
__shared__ DataType shared_sums[BLOCK_SIZE];
__shared__ DataType shared_sqsums[BLOCK_SIZE];
const int tid = threadIdx.x;
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
DataType sum = DataType(0);
DataType sqsum = DataType(0);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0;
for (int d = 0; d < ND -2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
sum += x;
sqsum += x * x;
input_offset += input_strides[-1];
}
}
shared_sums[tid] = sum;
shared_sqsums[tid] = sqsum;
// Compute channel sum with shared memory reduction
// TODO: unroll loops
for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
__syncthreads();
if(tid < stride) {
shared_sums[tid] += shared_sums[tid + stride];
shared_sqsums[tid] += shared_sqsums[tid + stride];
}
}
// Output channel sum to global memory
if(tid == 0) {
atomicAdd(&sums[ch_idx], shared_sums[0]);
atomicAdd(&sqsums[ch_idx], shared_sqsums[0]);
}
}
template <int ND, typename Tensor>
void channel_sums_and_sqsums(int num_samples, const Tensor &input, Tensor &sums,
Tensor &sqsums, cudaStream_t stream,
const std::vector<bool> &reduction_dims,
bool reduce) {
using DataType = typename Tensor::data_type;
// Clear GPU memory
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
sums.get_buffer(), 0,
sums.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
sqsums.get_buffer(), 0,
sqsums.get_local_pitched_size() * sizeof(DataType),
stream));
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
auto input_strides = input.get_strides();
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
// Do not contribute to the accumulation if the local tensor is not
// a split root.
if (input.get_local_size() > 0 && input.is_split_root()) {
channel_sums_and_sqsums_kernel<ND, DataType, block_size>
<<<grid_dim, block_dim, 0, stream>>>(
input.get_const_base_ptr(),
sums.get_base_ptr(),
sqsums.get_base_ptr(),
shape, input_strides);
}
if (reduce) {
// TODO: only global reduction is supported.
DISTCONV_CHECK_CUDA(cudaStreamSynchronize(stream));
sums.allreduce_shared_regions();
sqsums.allreduce_shared_regions();
}
}
#endif
#define INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(ND, TYPE) \
template void \
channel_sums_and_sqsums<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, Tensor<TYPE> &sums, \
Tensor<TYPE> &sqsums, cudaStream_t stream, \
const std::vector<bool> &reduction_dims, \
bool reduce);
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(4, float)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(4, double)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(5, float)
INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS(5, double)
#undef INSTANTIATE_CHANNEL_SUMS_AND_SQSUMS
template <typename DataType>
struct sums_to_statistics_functor {
index_t m_num_per_sum;
DataType m_decay;
sums_to_statistics_functor(index_t num_per_sum, DataType decay):
m_num_per_sum(num_per_sum),
m_decay(decay) {}
__device__ void operator()(DataType &global_mean, DataType &global_var,
DataType &running_mean, DataType &running_var) {
const DataType mean = global_mean / m_num_per_sum;
const DataType sqmean = global_var / m_num_per_sum;
DataType var = sqmean- mean * mean;
var = var > DataType(0) ? var : DataType(0);
var *= m_num_per_sum / (m_num_per_sum - DataType(1));
global_mean = mean;
global_var = var;
running_mean = m_decay * running_mean + (DataType(1) - m_decay) * mean;
running_var = m_decay * running_var + (DataType(1) - m_decay) * var;
}
};
template <int ND, typename TensorType>
void sums_to_statistics(index_t num_per_sum, typename TensorType::data_type decay,
TensorType &global_mean, TensorType &global_var,
TensorType &running_mean, TensorType &running_var,
cudaStream_t stream) {
using DataType = typename TensorType::data_type;
if (num_per_sum > 0) {
tensor::Transform(
global_mean, global_var, running_mean, running_var,
sums_to_statistics_functor<DataType>(num_per_sum, decay),
stream);
} else {
// Fill global_var with 1. Do the same thing as the corresponding LBANN code.
tensor::Transform(
global_var,
[] __device__ (DataType &global_var) {
global_var = DataType(1);
}, stream);
}
}
#define INSTANTIATE_SUMS_TO_STATISTICS(ND, TYPE) \
template \
void sums_to_statistics<ND, Tensor<TYPE>>( \
index_t num_per_sum, TYPE decay, \
Tensor<TYPE> &global_mean, Tensor<TYPE> &global_var, \
Tensor<TYPE> &running_mean, Tensor<TYPE> &running_var, \
cudaStream_t stream);
INSTANTIATE_SUMS_TO_STATISTICS(4, float)
INSTANTIATE_SUMS_TO_STATISTICS(4, double)
INSTANTIATE_SUMS_TO_STATISTICS(5, float)
INSTANTIATE_SUMS_TO_STATISTICS(5, double)
#undef INSTANTIATE_SUMS_TO_STATISTICS
__device__ inline float rsqrt(float x) {
return rsqrtf(x);
}
template <int ND, typename DataType>
void __global__ batch_normalization_kernel(const DataType *input,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
const DataType *global_bias,
DataType *output,
DataType epsilon,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> output_strides) {
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType bias = global_bias[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, output_offset = 0;
for (int d = 0; d < ND - 2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
output_offset += idx * output_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
output_offset += ch_idx * output_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
DataType xhat = (x - mean) * inv_stdev;
DataType y = scale * xhat + bias;
output[output_offset] = y;
input_offset += input_strides[-1];
output_offset += output_strides[-1];
}
}
}
template <int ND, typename TensorType>
void batch_normalization(int num_samples, const TensorType &input,
const TensorType &mean, const TensorType &var,
const TensorType &scale, const TensorType &bias,
TensorType &output, typename TensorType::data_type epsilon,
cudaStream_t stream) {
// local tensors can be empty
if (output.get_local_size() == 0) return;
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
tensor::Array<ND> input_strides = input.get_strides();
tensor::Array<ND> output_strides = output.get_strides();
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
tensor::Array<ND> shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
batch_normalization_kernel<<<grid_dim, block_dim, 0, stream>>>(
input.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
bias.get_const_base_ptr(),
output.get_base_ptr(),
epsilon, shape,
input_strides, output_strides);
}
#define INSTANTIATE_BATCH_NORMALIZATION(ND, TYPE) \
template \
void batch_normalization<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, const Tensor<TYPE> &mean, \
const Tensor<TYPE> &var, const Tensor<TYPE> &scale, \
const Tensor<TYPE> &bias, Tensor<TYPE> &output, \
TYPE epsilon, cudaStream_t stream);
INSTANTIATE_BATCH_NORMALIZATION(4, float)
INSTANTIATE_BATCH_NORMALIZATION(4, double)
INSTANTIATE_BATCH_NORMALIZATION(5, float)
INSTANTIATE_BATCH_NORMALIZATION(5, double)
#undef INSTANTIATE_BATCH_NORMALIZATION
template <int ND, typename DataType, int BLOCK_SIZE>
void __global__ backprop1_kernel(const DataType *input,
const DataType *d_output,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
DataType *global_dscale, DataType *global_dbias,
DataType *global_dmean, DataType *global_dvar,
DataType epsilon, tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> d_output_strides) {
__shared__ DataType shared_dscale[BLOCK_SIZE];
__shared__ DataType shared_dbias[BLOCK_SIZE];
__shared__ DataType shared_dmean[BLOCK_SIZE];
__shared__ DataType shared_dvar[BLOCK_SIZE];
const int tid = threadIdx.x;
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[get_sample_dim()];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const DataType dvar_factor = inv_stdev * inv_stdev * inv_stdev / 2;
DataType dscale = DataType(0);
DataType dbias = DataType(0);
DataType dmean = DataType(0);
DataType dvar = DataType(0);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, d_output_offset = 0;
for (int d = 0; d < ND -2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
d_output_offset += idx * d_output_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
d_output_offset += ch_idx * d_output_strides[-2];
for (int sample_idx = 0; sample_idx < num_samples; ++sample_idx) {
const DataType x = input[input_offset];
const DataType xhat = (x - mean) * inv_stdev;
const DataType dy = d_output[d_output_offset];
dscale += dy * xhat;
dbias += dy;
const DataType dxhat = dy * scale;
dmean += - dxhat * inv_stdev;
dvar += - dxhat * (x - mean) * dvar_factor;
input_offset += input_strides[-1];
d_output_offset += d_output_strides[-1];
}
}
shared_dscale[tid] = dscale;
shared_dbias[tid] = dbias;
shared_dmean[tid] = dmean;
shared_dvar[tid] = dvar;
for(int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
__syncthreads();
if(tid < stride) {
shared_dscale[tid] += shared_dscale[tid + stride];
shared_dbias[tid] += shared_dbias[tid + stride];
shared_dmean[tid] += shared_dmean[tid + stride];
shared_dvar[tid] += shared_dvar[tid + stride];
}
}
// Output channel sum to global memory
if (tid == 0) {
atomicAdd(&global_dscale[ch_idx], shared_dscale[0]);
atomicAdd(&global_dbias[ch_idx], shared_dbias[0]);
atomicAdd(&global_dmean[ch_idx], shared_dmean[0]);
atomicAdd(&global_dvar[ch_idx], shared_dvar[0]);
}
}
template <int ND, typename TensorType>
void backprop1(int num_samples, const TensorType &input,
const TensorType &d_output, const TensorType &mean,
const TensorType &var, const TensorType &scale,
TensorType &scale_gradient, TensorType &bias_gradient,
TensorType &mean_gradient, TensorType &var_gradient,
typename TensorType::data_type epsilon, cudaStream_t stream) {
using DataType = typename TensorType::data_type;
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
scale_gradient.get_buffer(), 0,
scale_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
bias_gradient.get_buffer(), 0,
bias_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
mean_gradient.get_buffer(), 0,
mean_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
DISTCONV_CHECK_CUDA(cudaMemsetAsync(
var_gradient.get_buffer(), 0,
var_gradient.get_local_pitched_size() * sizeof(DataType),
stream));
if (input.get_local_size() == 0 || !input.is_split_root()) {
return;
}
const auto input_strides = input.get_strides();
const auto d_output_strides = d_output.get_strides();
const int num_channels = input.get_local_shape()[get_channel_dim()];
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
constexpr int block_size = 256;
dim3 block_dim(block_size);
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
backprop1_kernel<ND, DataType, block_size><<<grid_dim, block_dim, 0, stream>>>(
input.get_const_base_ptr(),
d_output.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
scale_gradient.get_base_ptr(),
bias_gradient.get_base_ptr(),
mean_gradient.get_base_ptr(),
var_gradient.get_base_ptr(),
epsilon, shape,
input_strides, d_output_strides);
}
#define INSTANTIATE_BACKPROP1(ND, TYPE) \
template \
void backprop1<ND, Tensor<TYPE>>( \
int num_samples, \
const Tensor<TYPE> &input, const Tensor<TYPE> &d_output, \
const Tensor<TYPE> &mean, const Tensor<TYPE> &var, \
const Tensor<TYPE> &scale, Tensor<TYPE> &scale_gradient, \
Tensor<TYPE> &bias_gradient, Tensor<TYPE> &mean_gradient, \
Tensor<TYPE> &var_gradient, TYPE epsilon, \
cudaStream_t stream);
INSTANTIATE_BACKPROP1(4, float)
INSTANTIATE_BACKPROP1(4, double)
INSTANTIATE_BACKPROP1(5, float)
INSTANTIATE_BACKPROP1(5, double)
#undef INSTANTIATE_BACKPROP1
template <int ND, typename DataType>
void __global__ backprop2_kernel(const DataType *input,
const DataType *d_output,
const DataType *global_mean,
const DataType *global_var,
const DataType *global_scale,
const DataType *global_dmean,
const DataType *global_dvar,
DataType *d_input, DataType epsilon,
index_t num_per_sum,
tensor::Array<ND> shape,
tensor::Array<ND> input_strides,
tensor::Array<ND> d_output_strides,
tensor::Array<ND> d_input_strides) {
const index_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch_idx = blockIdx.y;
const int num_channels = shape[get_channel_dim()];
const int num_samples = shape[-1];
const DataType mean = global_mean[ch_idx];
const DataType var = global_var[ch_idx];
const DataType scale = global_scale[ch_idx];
const DataType dmean = global_dmean[ch_idx];
const DataType dvar = global_dvar[ch_idx];
const DataType inv_stdev = rsqrt(var + epsilon);
const DataType dmean_term = dmean / num_per_sum;
const DataType dvar_term = dvar * 2 / (num_per_sum - 1);
const index_t channel_size = shape.get_size() / num_channels / num_samples;
if (gidx < channel_size) {
index_t offset = gidx;
index_t input_offset = 0, d_output_offset = 0, d_input_offset = 0;
for (int d = 0; d < ND - 2; ++d) {
int idx = offset % shape[d];
input_offset += idx * input_strides[d];
d_output_offset += idx * d_output_strides[d];
d_input_offset += idx * d_input_strides[d];
offset /= shape[d];
}
input_offset += ch_idx * input_strides[-2];
d_output_offset += ch_idx * d_output_strides[-2];
d_input_offset += ch_idx * d_input_strides[-2];
for (int s = 0; s < num_samples; ++s) {
const DataType x = input[input_offset];
const DataType dy = d_output[d_output_offset];
const DataType dxhat = dy * scale;
DataType dx = dxhat * inv_stdev;
dx += dmean_term;
dx += dvar_term * (x - mean);
d_input[d_input_offset] = dx;
input_offset += input_strides[-1];
d_output_offset += d_output_strides[-1];
d_input_offset += d_input_strides[-1];
}
}
}
template <int ND, typename TensorType>
void backprop2(index_t num_samples, index_t num_per_sum,
const TensorType &input, const TensorType &d_output,
const TensorType &mean, const TensorType &var,
const TensorType &scale, const TensorType &mean_gradient,
const TensorType &var_gradient, TensorType &d_input,
typename TensorType::data_type epsilon, cudaStream_t stream) {
using DataType = typename TensorType::data_type;
if (d_input.get_local_size() == 0) return;
const int num_channels = input.get_local_shape()[get_channel_dim()];
constexpr int block_size = 256;
dim3 block_dim(block_size);
index_t channel_size = input.get_local_size() / num_channels / num_samples;
dim3 grid_dim((channel_size + block_size - 1) / block_size,
num_channels);
auto input_strides = input.get_strides();
auto d_output_strides = d_output.get_strides();
auto d_input_strides = d_input.get_strides();
auto shape = input.get_local_shape();
shape[get_sample_dim()] = num_samples;
// CUDA grid dimension limitation
assert_always(num_channels < 65535);
backprop2_kernel<ND, DataType><<<grid_dim, block_dim, 0, stream>>>(
input.get_const_base_ptr(),
d_output.get_const_base_ptr(),
mean.get_const_base_ptr(),
var.get_const_base_ptr(),
scale.get_const_base_ptr(),
mean_gradient.get_const_base_ptr(),
var_gradient.get_const_base_ptr(),
d_input.get_base_ptr(),
epsilon, num_per_sum, shape,
input_strides, d_output_strides, d_input_strides);
}
#define INSTANTIATE_BACKPROP2(ND, TYPE) \
template \
void backprop2<ND, Tensor<TYPE>>( \
index_t num_samples, index_t num_per_sum, \
const Tensor<TYPE> &input, const Tensor<TYPE> &d_output, \
const Tensor<TYPE> &mean, const Tensor<TYPE> &var, \
const Tensor<TYPE> &scale, const Tensor<TYPE> &mean_gradient, \
const Tensor<TYPE> &var_gradient, Tensor<TYPE> &d_input, \
TYPE epsilon, cudaStream_t stream);
INSTANTIATE_BACKPROP2(4, float)
INSTANTIATE_BACKPROP2(4, double)
INSTANTIATE_BACKPROP2(5, float)
INSTANTIATE_BACKPROP2(5, double)
#undef INSTANTIATE_BACKPROP2
} // namespace batchnorm
} // namespace distconv
|
e306c18bc1248e8b865f335a497ecacb28cf8fa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "XLib.hpp"
const int SIZE = (1 << 27);
const int BLOCKDIM = 256;
int main() {
int* devInput;
hipMalloc(&devInput, SIZE * sizeof(int));
//Timer<DEVICE> TM;
//TM.start();
//xlib::fill<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024);
/*TM.getTime("fill1");
CUDA_ERROR("A")
TM.start();
cuda_util::fill2<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024);
TM.getTime("fill2");
CUDA_ERROR("B")*/
}
| e306c18bc1248e8b865f335a497ecacb28cf8fa4.cu | #include "XLib.hpp"
const int SIZE = (1 << 27);
const int BLOCKDIM = 256;
int main() {
int* devInput;
cudaMalloc(&devInput, SIZE * sizeof(int));
//Timer<DEVICE> TM;
//TM.start();
//xlib::fill<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024);
/*TM.getTime("fill1");
CUDA_ERROR("A")
TM.start();
cuda_util::fill2<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024);
TM.getTime("fill2");
CUDA_ERROR("B")*/
}
|
14f36756133b6e7846de40c33244cf944190c219.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// nvBWT.cu
//
#define NVBIO_CUDA_DEBUG
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
#include <algorithm>
#include <crc/crc.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/bnt.h>
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/packedstream.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/dna.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/fmindex/bwt.h>
#include <nvbio/fasta/fasta.h>
#include <nvbio/io/fmindex/fmindex.h>
#include <nvbio/sufsort/sufsort.h>
#include "filelist.h"
// PAC File Type
enum PacType { BPAC = 0, WPAC = 1 };
using namespace nvbio;
unsigned char nst_nt4_table[256] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
#define RAND 0
#define RAND48 1
#if (GENERATOR == RAND) || ((GENERATOR == RAND48) && defined(WIN32))
// generate random base pairs using rand()
inline void srand_bp(const unsigned int s) { srand(s); }
inline float frand() { return float(rand()) / float(RAND_MAX); }
inline uint8 rand_bp() { return uint8( frand() * 4 ) & 3; }
#elif (GENERATOR == RAND48)
// generate random base pairs using rand48()
inline void srand_bp(const unsigned int s) { srand48(s); }
inline uint8 rand_bp() { return uint8( drand48() * 4 ) & 3; }
#endif
struct Counter
{
Counter() : m_size(0), m_reads(0) {}
void begin_read() { m_reads++; }
void end_read() {}
void id(const uint8 c) {}
void read(const uint8 c) { m_size++; }
uint64 m_size;
uint32 m_reads;
};
template <typename stream_type>
struct Writer
{
Writer(stream_type stream, const uint32 reads, const uint64 max_size) :
m_max_size(max_size), m_size(0), m_stream( stream )
{
m_bntseq.seed = 11;
m_bntseq.anns_data.resize( reads );
m_bntseq.anns_info.resize( reads );
srand_bp( m_bntseq.seed );
for (uint32 i = 0; i < 4; ++i)
m_freq[i] = 0;
}
void begin_read()
{
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len = 0;
ann_data.gi = 0;
ann_data.offset = m_size;
ann_data.n_ambs = 0;
BNTAnnInfo& ann_info = m_bntseq.anns_info[ m_bntseq.n_seqs ];
ann_info.anno = "null";
m_lasts = 0;
}
void end_read()
{
m_bntseq.n_seqs++;
}
void id(const uint8 c)
{
m_bntseq.anns_info[ m_bntseq.n_seqs ].name.push_back(char(c));
}
void read(const uint8 s)
{
if (m_size < m_max_size)
{
const uint8 c = nst_nt4_table[s];
const uint8 sc = c < 4 ? c : rand_bp();
m_stream[ m_size ] = sc;
// keep track of the symbol frequencies
++m_freq[sc];
if (c >= 4) // we have an N
{
if (m_lasts == s) // contiguous N
{
// increment length of the last hole
++m_bntseq.ambs.back().len;
}
else
{
// beginning of a new hole
BNTAmb amb;
amb.len = 1;
amb.offset = m_size;
amb.amb = s;
m_bntseq.ambs.push_back( amb );
++m_bntseq.anns_data[ m_bntseq.n_seqs ].n_ambs;
++m_bntseq.n_holes;
}
}
// save last symbol
m_lasts = s;
// update sequence length
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len++;
}
m_bntseq.l_pac++;
m_size++;
}
uint64 m_max_size;
uint64 m_size;
stream_type m_stream;
BNTSeq m_bntseq;
uint8 m_lasts;
uint32 m_freq[4];
};
template <typename StreamType>
bool save_stream(FILE* output_file, const uint64 seq_words, const StreamType* stream)
{
for (uint64 words = 0; words < seq_words; words += 1024)
{
const uint32 n_words = (uint32)nvbio::min( uint64(1024u), uint64(seq_words - words) );
if (fwrite( stream + words, sizeof(StreamType), n_words, output_file ) != n_words)
return false;
}
return true;
}
//
// .wpac file
//
void save_wpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 seq_words = util::divide_ri( seq_length, 16 );
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// write the sequence length as a uint64
const uint64 len = seq_length;
fwrite( &len, sizeof(len), 1u, output_file );
// save the uint32 stream
if (save_stream( output_file, seq_words, string_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac file
//
void save_bpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
typedef PackedStream<const uint32*,uint8,2,true,int64> stream_type;
typedef PackedStream< uint8*, uint8,2,true,int64> pac_stream_type;
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 bps_per_byte = 4u;
const uint64 seq_bytes = (seq_length + bps_per_byte - 1u) / bps_per_byte;
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// copy the uint32 packed stream into a uint8 pac stream
thrust::host_vector<uint8> pac_storage( seq_bytes );
pac_stream_type pac_string( nvbio::plain_view( pac_storage ) );
stream_type string( string_storage );
for (uint32 i = 0; i < seq_length; ++i)
pac_string[i] = string[i];
// save the uint8 stream
if (save_stream( output_file, seq_bytes, nvbio::raw_pointer( pac_storage ) ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
// the following code makes the pac file size always (l_pac/4+1+1)
if (seq_length % 4 == 0)
{
const uint8 ct = 0;
fwrite( &ct, 1, 1, output_file );
}
{
const uint8 ct = seq_length % 4;
fwrite( &ct, 1, 1, output_file );
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac | .wpac file
//
void save_pac(const uint32 seq_length, const uint32* string_storage, const char* pac_name, const PacType pac_type)
{
if (pac_type == BPAC)
save_bpac( seq_length, string_storage, pac_name );
else
save_wpac( seq_length, string_storage, pac_name );
}
//
// .bwt file
//
void save_bwt(const uint32 seq_length, const uint32 seq_words, const uint32 primary, const uint32* cumFreq, const uint32* h_bwt_storage, const char* bwt_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", bwt_name);
FILE* output_file = fopen( bwt_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", bwt_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1, output_file );
fwrite( cumFreq, sizeof(uint32), 4, output_file );
if (save_stream( output_file, seq_words, h_bwt_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", bwt_name);
}
//
// .sa file
//
void save_ssa(const uint32 seq_length, const uint32 sa_intv, const uint32 ssa_len, const uint32 primary, const uint32* cumFreq, const uint32* h_ssa, const char* sa_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", sa_name);
FILE* output_file = fopen( sa_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", sa_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1u, output_file );
fwrite( &cumFreq, sizeof(uint32), 4u, output_file );
fwrite( &sa_intv, sizeof(uint32), 1u, output_file );
fwrite( &seq_length, sizeof(uint32), 1u, output_file );
fwrite( &h_ssa[1], sizeof(uint32), ssa_len-1, output_file );
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", sa_name);
}
int build(
const char* input_name,
const char* output_name,
const char* pac_name,
const char* rpac_name,
const char* bwt_name,
const char* rbwt_name,
const char* sa_name,
const char* rsa_name,
const uint64 max_length,
const PacType pac_type,
const bool compute_crc)
{
std::vector<std::string> sortednames;
list_files(input_name, sortednames);
uint32 n_inputs = (uint32)sortednames.size();
log_info(stderr, "\ncounting bps... started\n");
// count entire sequence length
Counter counter;
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " counting \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file\n");
exit(1);
}
while (fasta.read( 1024, counter ) == 1024);
}
log_info(stderr, "counting bps... done\n");
const uint64 seq_length = nvbio::min( (uint64)counter.m_size, (uint64)max_length );
const uint32 bps_per_word = sizeof(uint32)*4u;
const uint64 seq_words = (seq_length + bps_per_word - 1u) / bps_per_word;
log_info(stderr, "\nstats:\n");
log_info(stderr, " reads : %u\n", counter.m_reads );
log_info(stderr, " sequence length : %llu bps (%.1f MB)\n",
seq_length,
float(seq_words*sizeof(uint32))/float(1024*1024));
log_info(stderr, " buffer size : %.1f MB\n",
2*seq_words*sizeof(uint32)/1.0e6f );
const uint32 sa_intv = nvbio::io::FMIndexData::SA_INT;
const uint32 ssa_len = (seq_length + sa_intv) / sa_intv;
// allocate the actual storage
thrust::host_vector<uint32> h_string_storage( seq_words+1 );
thrust::host_vector<uint32> h_bwt_storage( seq_words+1 );
thrust::host_vector<uint32> h_ssa( ssa_len );
typedef PackedStream<const uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> const_stream_type;
typedef PackedStream< uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> stream_type;
stream_type h_string( nvbio::plain_view( h_string_storage ) );
uint32 cumFreq[4] = { 0, 0, 0, 0 };
log_info(stderr, "\nbuffering bps... started\n");
// read all files
{
Writer<stream_type> writer( h_string, counter.m_reads, seq_length );
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " buffering \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file!\n");
exit(1);
}
while (fasta.read( 1024, writer ) == 1024);
}
save_bns( writer.m_bntseq, output_name );
// compute the cumulative symbol frequencies
cumFreq[0] = writer.m_freq[0];
cumFreq[1] = writer.m_freq[1] + cumFreq[0];
cumFreq[2] = writer.m_freq[2] + cumFreq[1];
cumFreq[3] = writer.m_freq[3] + cumFreq[2];
if (cumFreq[3] != seq_length)
{
log_error(stderr, " mismatching symbol frequencies!\n");
log_error(stderr, " (%u, %u, %u, %u)\n", cumFreq[0], cumFreq[1], cumFreq[2], cumFreq[3]);
exit(1);
}
}
log_info(stderr, "buffering bps... done\n");
if (compute_crc)
{
const uint32 crc = crcCalc( h_string, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
try
{
BWTParams params;
uint32 primary;
thrust::device_vector<uint32> d_string_storage( h_string_storage );
thrust::device_vector<uint32> d_bwt_storage( seq_words+1 );
const_stream_type d_string( nvbio::plain_view( d_string_storage ) );
stream_type d_bwt( nvbio::plain_view( d_bwt_storage ) );
Timer timer;
log_info(stderr, "\nbuilding forward BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building forward BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), pac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), bwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), sa_name );
}
// reverse the string in h_string_storage
{
// reuse the bwt storage to build the reverse
uint32* h_rbase_stream = nvbio::plain_view( h_bwt_storage );
stream_type h_rstring( h_rbase_stream );
// reverse the string
for (uint32 i = 0; i < seq_length; ++i)
h_rstring[i] = h_string[ seq_length - i - 1u ];
// and now swap the vectors
h_bwt_storage.swap( h_string_storage );
h_string = stream_type( nvbio::plain_view( h_string_storage ) );
// and copy back the new string to the device
d_string_storage = h_string_storage;
}
log_info(stderr, "\nbuilding reverse BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building reverse BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), rpac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), rbwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), rsa_name );
}
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr,"unknown exception caught!\n");
exit(1);
}
return 0;
}
int main(int argc, char* argv[])
{
crcInit();
if (argc < 2)
{
log_info(stderr, "please specify input and output file names, e.g:\n");
log_info(stderr, " nvBWT [options] myinput.*.fa output-prefix\n");
log_info(stderr, " options:\n");
log_info(stderr, " -v | --verbosity select verbosity\n");
log_info(stderr, " -m | --max-length clamp input to max_length\n");
log_info(stderr, " -b | --byte-packing output byte packed .pac\n");
log_info(stderr, " -w | --word-packing output word packed .wpac\n");
log_info(stderr, " -c | --crc compute crcs\n");
log_info(stderr, " -d | --device cuda device\n");
exit(0);
}
const char* file_names[2] = { NULL, NULL };
uint64 max_length = uint64(-1);
PacType pac_type = BPAC;
bool crc = false;
int cuda_device = -1;
uint32 n_files = 0;
for (int32 i = 1; i < argc; ++i)
{
const char* arg = argv[i];
if ((strcmp( arg, "-m" ) == 0) ||
(strcmp( arg, "--max-length" ) == 0))
{
max_length = atoi( argv[++i] );
}
else if ((strcmp( argv[i], "-v" ) == 0) ||
(strcmp( argv[i], "-verbosity" ) == 0) ||
(strcmp( argv[i], "--verbosity" ) == 0))
{
set_verbosity( Verbosity( atoi( argv[++i] ) ) );
}
else if ((strcmp( arg, "-b" ) == 0) ||
(strcmp( arg, "--byte-packing" ) == 0))
{
pac_type = BPAC;
}
else if ((strcmp( arg, "-w" ) == 0) ||
(strcmp( arg, "--word-packing" ) == 0))
{
pac_type = WPAC;
}
else if ((strcmp( arg, "-c" ) == 0) ||
(strcmp( arg, "--crc" ) == 0))
{
crc = true;
}
else if ((strcmp( arg, "-d" ) == 0) ||
(strcmp( arg, "--device" ) == 0))
{
cuda_device = atoi( argv[++i] );
}
else
file_names[ n_files++ ] = argv[i];
}
const char* input_name = file_names[0];
const char* output_name = file_names[1];
std::string pac_string = std::string( output_name ) + (pac_type == BPAC ? ".pac" : ".wpac");
const char* pac_name = pac_string.c_str();
std::string rpac_string = std::string( output_name ) + (pac_type == BPAC ? ".rpac" : ".rwpac");
const char* rpac_name = rpac_string.c_str();
std::string bwt_string = std::string( output_name ) + ".bwt";
const char* bwt_name = bwt_string.c_str();
std::string rbwt_string = std::string( output_name ) + ".rbwt";
const char* rbwt_name = rbwt_string.c_str();
std::string sa_string = std::string( output_name ) + ".sa";
const char* sa_name = sa_string.c_str();
std::string rsa_string = std::string( output_name ) + ".rsa";
const char* rsa_name = rsa_string.c_str();
log_info(stderr, "max length : %lld\n", max_length);
log_info(stderr, "input : \"%s\"\n", input_name);
log_info(stderr, "output : \"%s\"\n", output_name);
try
{
int device_count;
hipGetDeviceCount(&device_count);
cuda::check_error("cuda-check");
log_verbose(stderr, " cuda devices : %d\n", device_count);
// inspect and select cuda devices
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
hipDeviceProp_t best_device_prop;
hipGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties( &device_prop, device );
log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor);
log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount);
log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000);
log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f);
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
hipSetDevice( cuda_device );
}
size_t free, total;
hipMemGetInfo(&free, &total);
NVBIO_CUDA_DEBUG_STATEMENT( log_info(stderr,"device mem : total: %.1f GB, free: %.1f GB\n", float(total)/float(1024*1024*1024), float(free)/float(1024*1024*1024)) );
cuda::check_error("cuda-memory-check");
return build( input_name, output_name, pac_name, rpac_name, bwt_name, rbwt_name, sa_name, rsa_name, max_length, pac_type, crc );
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (thrust::system::system_error e)
{
log_error(stderr, "caught a thrust::system_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
return 1;
}
}
| 14f36756133b6e7846de40c33244cf944190c219.cu | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// nvBWT.cu
//
#define NVBIO_CUDA_DEBUG
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
#include <algorithm>
#include <crc/crc.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/bnt.h>
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/packedstream.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/dna.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/fmindex/bwt.h>
#include <nvbio/fasta/fasta.h>
#include <nvbio/io/fmindex/fmindex.h>
#include <nvbio/sufsort/sufsort.h>
#include "filelist.h"
// PAC File Type
enum PacType { BPAC = 0, WPAC = 1 };
using namespace nvbio;
unsigned char nst_nt4_table[256] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
#define RAND 0
#define RAND48 1
#if (GENERATOR == RAND) || ((GENERATOR == RAND48) && defined(WIN32))
// generate random base pairs using rand()
inline void srand_bp(const unsigned int s) { srand(s); }
inline float frand() { return float(rand()) / float(RAND_MAX); }
inline uint8 rand_bp() { return uint8( frand() * 4 ) & 3; }
#elif (GENERATOR == RAND48)
// generate random base pairs using rand48()
inline void srand_bp(const unsigned int s) { srand48(s); }
inline uint8 rand_bp() { return uint8( drand48() * 4 ) & 3; }
#endif
struct Counter
{
Counter() : m_size(0), m_reads(0) {}
void begin_read() { m_reads++; }
void end_read() {}
void id(const uint8 c) {}
void read(const uint8 c) { m_size++; }
uint64 m_size;
uint32 m_reads;
};
template <typename stream_type>
struct Writer
{
Writer(stream_type stream, const uint32 reads, const uint64 max_size) :
m_max_size(max_size), m_size(0), m_stream( stream )
{
m_bntseq.seed = 11;
m_bntseq.anns_data.resize( reads );
m_bntseq.anns_info.resize( reads );
srand_bp( m_bntseq.seed );
for (uint32 i = 0; i < 4; ++i)
m_freq[i] = 0;
}
void begin_read()
{
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len = 0;
ann_data.gi = 0;
ann_data.offset = m_size;
ann_data.n_ambs = 0;
BNTAnnInfo& ann_info = m_bntseq.anns_info[ m_bntseq.n_seqs ];
ann_info.anno = "null";
m_lasts = 0;
}
void end_read()
{
m_bntseq.n_seqs++;
}
void id(const uint8 c)
{
m_bntseq.anns_info[ m_bntseq.n_seqs ].name.push_back(char(c));
}
void read(const uint8 s)
{
if (m_size < m_max_size)
{
const uint8 c = nst_nt4_table[s];
const uint8 sc = c < 4 ? c : rand_bp();
m_stream[ m_size ] = sc;
// keep track of the symbol frequencies
++m_freq[sc];
if (c >= 4) // we have an N
{
if (m_lasts == s) // contiguous N
{
// increment length of the last hole
++m_bntseq.ambs.back().len;
}
else
{
// beginning of a new hole
BNTAmb amb;
amb.len = 1;
amb.offset = m_size;
amb.amb = s;
m_bntseq.ambs.push_back( amb );
++m_bntseq.anns_data[ m_bntseq.n_seqs ].n_ambs;
++m_bntseq.n_holes;
}
}
// save last symbol
m_lasts = s;
// update sequence length
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len++;
}
m_bntseq.l_pac++;
m_size++;
}
uint64 m_max_size;
uint64 m_size;
stream_type m_stream;
BNTSeq m_bntseq;
uint8 m_lasts;
uint32 m_freq[4];
};
template <typename StreamType>
bool save_stream(FILE* output_file, const uint64 seq_words, const StreamType* stream)
{
for (uint64 words = 0; words < seq_words; words += 1024)
{
const uint32 n_words = (uint32)nvbio::min( uint64(1024u), uint64(seq_words - words) );
if (fwrite( stream + words, sizeof(StreamType), n_words, output_file ) != n_words)
return false;
}
return true;
}
//
// .wpac file
//
void save_wpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 seq_words = util::divide_ri( seq_length, 16 );
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// write the sequence length as a uint64
const uint64 len = seq_length;
fwrite( &len, sizeof(len), 1u, output_file );
// save the uint32 stream
if (save_stream( output_file, seq_words, string_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac file
//
void save_bpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
typedef PackedStream<const uint32*,uint8,2,true,int64> stream_type;
typedef PackedStream< uint8*, uint8,2,true,int64> pac_stream_type;
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 bps_per_byte = 4u;
const uint64 seq_bytes = (seq_length + bps_per_byte - 1u) / bps_per_byte;
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// copy the uint32 packed stream into a uint8 pac stream
thrust::host_vector<uint8> pac_storage( seq_bytes );
pac_stream_type pac_string( nvbio::plain_view( pac_storage ) );
stream_type string( string_storage );
for (uint32 i = 0; i < seq_length; ++i)
pac_string[i] = string[i];
// save the uint8 stream
if (save_stream( output_file, seq_bytes, nvbio::raw_pointer( pac_storage ) ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
// the following code makes the pac file size always (l_pac/4+1+1)
if (seq_length % 4 == 0)
{
const uint8 ct = 0;
fwrite( &ct, 1, 1, output_file );
}
{
const uint8 ct = seq_length % 4;
fwrite( &ct, 1, 1, output_file );
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac | .wpac file
//
void save_pac(const uint32 seq_length, const uint32* string_storage, const char* pac_name, const PacType pac_type)
{
if (pac_type == BPAC)
save_bpac( seq_length, string_storage, pac_name );
else
save_wpac( seq_length, string_storage, pac_name );
}
//
// .bwt file
//
void save_bwt(const uint32 seq_length, const uint32 seq_words, const uint32 primary, const uint32* cumFreq, const uint32* h_bwt_storage, const char* bwt_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", bwt_name);
FILE* output_file = fopen( bwt_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", bwt_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1, output_file );
fwrite( cumFreq, sizeof(uint32), 4, output_file );
if (save_stream( output_file, seq_words, h_bwt_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", bwt_name);
}
//
// .sa file
//
void save_ssa(const uint32 seq_length, const uint32 sa_intv, const uint32 ssa_len, const uint32 primary, const uint32* cumFreq, const uint32* h_ssa, const char* sa_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", sa_name);
FILE* output_file = fopen( sa_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", sa_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1u, output_file );
fwrite( &cumFreq, sizeof(uint32), 4u, output_file );
fwrite( &sa_intv, sizeof(uint32), 1u, output_file );
fwrite( &seq_length, sizeof(uint32), 1u, output_file );
fwrite( &h_ssa[1], sizeof(uint32), ssa_len-1, output_file );
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", sa_name);
}
int build(
const char* input_name,
const char* output_name,
const char* pac_name,
const char* rpac_name,
const char* bwt_name,
const char* rbwt_name,
const char* sa_name,
const char* rsa_name,
const uint64 max_length,
const PacType pac_type,
const bool compute_crc)
{
std::vector<std::string> sortednames;
list_files(input_name, sortednames);
uint32 n_inputs = (uint32)sortednames.size();
log_info(stderr, "\ncounting bps... started\n");
// count entire sequence length
Counter counter;
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " counting \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file\n");
exit(1);
}
while (fasta.read( 1024, counter ) == 1024);
}
log_info(stderr, "counting bps... done\n");
const uint64 seq_length = nvbio::min( (uint64)counter.m_size, (uint64)max_length );
const uint32 bps_per_word = sizeof(uint32)*4u;
const uint64 seq_words = (seq_length + bps_per_word - 1u) / bps_per_word;
log_info(stderr, "\nstats:\n");
log_info(stderr, " reads : %u\n", counter.m_reads );
log_info(stderr, " sequence length : %llu bps (%.1f MB)\n",
seq_length,
float(seq_words*sizeof(uint32))/float(1024*1024));
log_info(stderr, " buffer size : %.1f MB\n",
2*seq_words*sizeof(uint32)/1.0e6f );
const uint32 sa_intv = nvbio::io::FMIndexData::SA_INT;
const uint32 ssa_len = (seq_length + sa_intv) / sa_intv;
// allocate the actual storage
thrust::host_vector<uint32> h_string_storage( seq_words+1 );
thrust::host_vector<uint32> h_bwt_storage( seq_words+1 );
thrust::host_vector<uint32> h_ssa( ssa_len );
typedef PackedStream<const uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> const_stream_type;
typedef PackedStream< uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> stream_type;
stream_type h_string( nvbio::plain_view( h_string_storage ) );
uint32 cumFreq[4] = { 0, 0, 0, 0 };
log_info(stderr, "\nbuffering bps... started\n");
// read all files
{
Writer<stream_type> writer( h_string, counter.m_reads, seq_length );
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " buffering \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file!\n");
exit(1);
}
while (fasta.read( 1024, writer ) == 1024);
}
save_bns( writer.m_bntseq, output_name );
// compute the cumulative symbol frequencies
cumFreq[0] = writer.m_freq[0];
cumFreq[1] = writer.m_freq[1] + cumFreq[0];
cumFreq[2] = writer.m_freq[2] + cumFreq[1];
cumFreq[3] = writer.m_freq[3] + cumFreq[2];
if (cumFreq[3] != seq_length)
{
log_error(stderr, " mismatching symbol frequencies!\n");
log_error(stderr, " (%u, %u, %u, %u)\n", cumFreq[0], cumFreq[1], cumFreq[2], cumFreq[3]);
exit(1);
}
}
log_info(stderr, "buffering bps... done\n");
if (compute_crc)
{
const uint32 crc = crcCalc( h_string, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
try
{
BWTParams params;
uint32 primary;
thrust::device_vector<uint32> d_string_storage( h_string_storage );
thrust::device_vector<uint32> d_bwt_storage( seq_words+1 );
const_stream_type d_string( nvbio::plain_view( d_string_storage ) );
stream_type d_bwt( nvbio::plain_view( d_bwt_storage ) );
Timer timer;
log_info(stderr, "\nbuilding forward BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building forward BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), pac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), bwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), sa_name );
}
// reverse the string in h_string_storage
{
// reuse the bwt storage to build the reverse
uint32* h_rbase_stream = nvbio::plain_view( h_bwt_storage );
stream_type h_rstring( h_rbase_stream );
// reverse the string
for (uint32 i = 0; i < seq_length; ++i)
h_rstring[i] = h_string[ seq_length - i - 1u ];
// and now swap the vectors
h_bwt_storage.swap( h_string_storage );
h_string = stream_type( nvbio::plain_view( h_string_storage ) );
// and copy back the new string to the device
d_string_storage = h_string_storage;
}
log_info(stderr, "\nbuilding reverse BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building reverse BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), rpac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), rbwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), rsa_name );
}
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr,"unknown exception caught!\n");
exit(1);
}
return 0;
}
int main(int argc, char* argv[])
{
crcInit();
if (argc < 2)
{
log_info(stderr, "please specify input and output file names, e.g:\n");
log_info(stderr, " nvBWT [options] myinput.*.fa output-prefix\n");
log_info(stderr, " options:\n");
log_info(stderr, " -v | --verbosity select verbosity\n");
log_info(stderr, " -m | --max-length clamp input to max_length\n");
log_info(stderr, " -b | --byte-packing output byte packed .pac\n");
log_info(stderr, " -w | --word-packing output word packed .wpac\n");
log_info(stderr, " -c | --crc compute crcs\n");
log_info(stderr, " -d | --device cuda device\n");
exit(0);
}
const char* file_names[2] = { NULL, NULL };
uint64 max_length = uint64(-1);
PacType pac_type = BPAC;
bool crc = false;
int cuda_device = -1;
uint32 n_files = 0;
for (int32 i = 1; i < argc; ++i)
{
const char* arg = argv[i];
if ((strcmp( arg, "-m" ) == 0) ||
(strcmp( arg, "--max-length" ) == 0))
{
max_length = atoi( argv[++i] );
}
else if ((strcmp( argv[i], "-v" ) == 0) ||
(strcmp( argv[i], "-verbosity" ) == 0) ||
(strcmp( argv[i], "--verbosity" ) == 0))
{
set_verbosity( Verbosity( atoi( argv[++i] ) ) );
}
else if ((strcmp( arg, "-b" ) == 0) ||
(strcmp( arg, "--byte-packing" ) == 0))
{
pac_type = BPAC;
}
else if ((strcmp( arg, "-w" ) == 0) ||
(strcmp( arg, "--word-packing" ) == 0))
{
pac_type = WPAC;
}
else if ((strcmp( arg, "-c" ) == 0) ||
(strcmp( arg, "--crc" ) == 0))
{
crc = true;
}
else if ((strcmp( arg, "-d" ) == 0) ||
(strcmp( arg, "--device" ) == 0))
{
cuda_device = atoi( argv[++i] );
}
else
file_names[ n_files++ ] = argv[i];
}
const char* input_name = file_names[0];
const char* output_name = file_names[1];
std::string pac_string = std::string( output_name ) + (pac_type == BPAC ? ".pac" : ".wpac");
const char* pac_name = pac_string.c_str();
std::string rpac_string = std::string( output_name ) + (pac_type == BPAC ? ".rpac" : ".rwpac");
const char* rpac_name = rpac_string.c_str();
std::string bwt_string = std::string( output_name ) + ".bwt";
const char* bwt_name = bwt_string.c_str();
std::string rbwt_string = std::string( output_name ) + ".rbwt";
const char* rbwt_name = rbwt_string.c_str();
std::string sa_string = std::string( output_name ) + ".sa";
const char* sa_name = sa_string.c_str();
std::string rsa_string = std::string( output_name ) + ".rsa";
const char* rsa_name = rsa_string.c_str();
log_info(stderr, "max length : %lld\n", max_length);
log_info(stderr, "input : \"%s\"\n", input_name);
log_info(stderr, "output : \"%s\"\n", output_name);
try
{
int device_count;
cudaGetDeviceCount(&device_count);
cuda::check_error("cuda-check");
log_verbose(stderr, " cuda devices : %d\n", device_count);
// inspect and select cuda devices
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
cudaDeviceProp best_device_prop;
cudaGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, device );
log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor);
log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount);
log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000);
log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f);
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
cudaSetDevice( cuda_device );
}
size_t free, total;
cudaMemGetInfo(&free, &total);
NVBIO_CUDA_DEBUG_STATEMENT( log_info(stderr,"device mem : total: %.1f GB, free: %.1f GB\n", float(total)/float(1024*1024*1024), float(free)/float(1024*1024*1024)) );
cuda::check_error("cuda-memory-check");
return build( input_name, output_name, pac_name, rpac_name, bwt_name, rbwt_name, sa_name, rsa_name, max_length, pac_type, crc );
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (thrust::system::system_error e)
{
log_error(stderr, "caught a thrust::system_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
return 1;
}
}
|
14e198b921441085d39a0901859bb6f347cc251b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void square(float* d_out, float* d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
} | 14e198b921441085d39a0901859bb6f347cc251b.cu | #include "includes.h"
__global__ void square(float* d_out, float* d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
} |
2622dc610ee85272ac2b0d9c7084f2cac3d35eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <conv_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = (WIDTH_B<WIDTH_PADDING?WIDTH_PADDING:WIDTH_B) * HEIGHT_B;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
for (int i=0; i<WIDTH_A; i++) {
for (int j=0; j<HEIGHT_A; j++) {
if (i<WIDTH_B||j<HEIGHT_B) {
h_A[j*WIDTH_A+i] = 0.0f;
}
}
}
// allocate device memory
float* d_A;
CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A));
float* d_B;
CUDA_SAFE_CALL(hipMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice) );
CUDA_SAFE_CALL(hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice) );
// allocate device memory for result
unsigned int size_C = WIDTH_C * HEIGHT_C;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, WIDTH_INPUT, HEIGHT_INPUT, HEIGHT_B, WIDTH_B);
CUTBoolean res;
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(16, 16);
dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
hipLaunchKernelGGL(( conv_naive), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B);
// stop and destroy timer
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
printf("conv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-6f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(256, 1);
dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y /8);
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
hipLaunchKernelGGL(( conv_opt), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B);
// stop and destroy timer
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
printf("conv_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-6f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
CUDA_SAFE_CALL(hipFree(d_A));
CUDA_SAFE_CALL(hipFree(d_B));
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf(" nTotal Errors = %d n", error_count);
}
| 2622dc610ee85272ac2b0d9c7084f2cac3d35eb0.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <conv_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = (WIDTH_B<WIDTH_PADDING?WIDTH_PADDING:WIDTH_B) * HEIGHT_B;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
for (int i=0; i<WIDTH_A; i++) {
for (int j=0; j<HEIGHT_A; j++) {
if (i<WIDTH_B||j<HEIGHT_B) {
h_A[j*WIDTH_A+i] = 0.0f;
}
}
}
// allocate device memory
float* d_A;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A));
float* d_B;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL(cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice) );
// allocate device memory for result
unsigned int size_C = WIDTH_C * HEIGHT_C;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, WIDTH_INPUT, HEIGHT_INPUT, HEIGHT_B, WIDTH_B);
CUTBoolean res;
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(16, 16);
dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
conv_naive<<< grid, threads >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B);
// stop and destroy timer
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
printf("conv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-6f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(256, 1);
dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y /8);
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
conv_opt<<< grid, threads >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B);
// stop and destroy timer
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
printf("conv_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-6f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
CUDA_SAFE_CALL(cudaFree(d_A));
CUDA_SAFE_CALL(cudaFree(d_B));
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf(" nTotal Errors = %d n", error_count);
}
|
19883f308ef024c0beead04e865aebd74dc6fd3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Main.cuh"
extern double *Dens_d;
extern double ScalingFactor;
extern int NRAD, NSEC, size_grid;
extern dim3 dimGrid2, dimBlock2;
__host__ void MultiplyPolarGridbyConstant (double *Dens)
{
hipLaunchKernelGGL(( MultiplyPolarGridbyConstantKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Dens_d, NRAD, NSEC, ScalingFactor);
gpuErrchk(hipDeviceSynchronize());
}
| 19883f308ef024c0beead04e865aebd74dc6fd3b.cu | #include "Main.cuh"
extern double *Dens_d;
extern double ScalingFactor;
extern int NRAD, NSEC, size_grid;
extern dim3 dimGrid2, dimBlock2;
__host__ void MultiplyPolarGridbyConstant (double *Dens)
{
MultiplyPolarGridbyConstantKernel<<<dimGrid2, dimBlock2>>>(Dens_d, NRAD, NSEC, ScalingFactor);
gpuErrchk(cudaDeviceSynchronize());
}
|
2f28bbd82a06e40f70d1df27f08a20a81e5f1fa1.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _CUDA_CU
#define _CUDA_CU
#include "Header.cuh"
#include "hip/hip_runtime_api.h"
#include "device_launch_parameters.h";
#include "DXF.h"
#include "Voxel.h"
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <chrono>
using std::chrono::duration_cast;
using std::chrono::microseconds;
typedef std::chrono::steady_clock the_clock;
//
// Interpolate between two nodes
__device__ void AddInterpolatedVertex(unsigned int voxelIter,
unsigned int vertexIter,
float* devVertexArray,
float* devNodePositionArray,
float* devNodeDensityArray,
unsigned int node1,
unsigned int node2,
float devDensityCutoff)
{
if (abs(devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node1]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2];
return;
}
if (abs(devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node2]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 2];
return;
}
if (abs(devNodeDensityArray[(voxelIter * 8) + node1] - devNodeDensityArray[(voxelIter * 8) + node2]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2];
return;
}
float mu = (devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node1]) / (devNodeDensityArray[(voxelIter * 8) + node2] - devNodeDensityArray[(voxelIter * 8) + node1]);
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3)] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)]);
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 1] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1]);
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 2] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2]);
}
__global__ void March(float* devDensityCutoff,
int* devVoxelArraySize,
float* devNodeDensityArray,
float* devNodePositionArray,
int* devEdgeTable,
int* devTriTable,
float* devAllVertices,
int* devVertCounter)
{
int voxelIter = (blockIdx.x * blockDim.x) + threadIdx.x;
if(voxelIter < *devVoxelArraySize)
{
unsigned char voxelByte = 0x00;
if (devNodeDensityArray[(voxelIter * 8) + 1] < *devDensityCutoff) voxelByte |= 1;
if (devNodeDensityArray[(voxelIter * 8) + 5] < *devDensityCutoff) voxelByte |= 2;
if (devNodeDensityArray[(voxelIter * 8) + 4] < *devDensityCutoff) voxelByte |= 4;
if (devNodeDensityArray[(voxelIter * 8) + 0] < *devDensityCutoff) voxelByte |= 8;
if (devNodeDensityArray[(voxelIter * 8) + 3] < *devDensityCutoff) voxelByte |= 16;
if (devNodeDensityArray[(voxelIter * 8) + 7] < *devDensityCutoff) voxelByte |= 32;
if (devNodeDensityArray[(voxelIter * 8) + 6] < *devDensityCutoff) voxelByte |= 64;
if (devNodeDensityArray[(voxelIter * 8) + 2] < *devDensityCutoff) voxelByte |= 128;
if (devEdgeTable[voxelByte] != 0)
{
float vertexArray[12];
if (devEdgeTable[voxelByte] & 1) // AND operator
AddInterpolatedVertex(voxelIter, 0, vertexArray, devNodePositionArray, devNodeDensityArray, 1, 5, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 2)
AddInterpolatedVertex(voxelIter, 1, vertexArray, devNodePositionArray, devNodeDensityArray, 5, 4, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 4)
AddInterpolatedVertex(voxelIter, 2, vertexArray, devNodePositionArray, devNodeDensityArray, 4, 0, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 8)
AddInterpolatedVertex(voxelIter, 3, vertexArray, devNodePositionArray, devNodeDensityArray, 0, 1, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 16)
AddInterpolatedVertex(voxelIter, 4, vertexArray, devNodePositionArray, devNodeDensityArray, 3, 7, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 32)
AddInterpolatedVertex(voxelIter, 5, vertexArray, devNodePositionArray, devNodeDensityArray, 7, 6, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 64)
AddInterpolatedVertex(voxelIter, 6, vertexArray, devNodePositionArray, devNodeDensityArray, 6, 2, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 128)
AddInterpolatedVertex(voxelIter, 7, vertexArray, devNodePositionArray, devNodeDensityArray, 2, 3, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 256)
AddInterpolatedVertex(voxelIter, 8, vertexArray, devNodePositionArray, devNodeDensityArray, 1, 3, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 512)
AddInterpolatedVertex(voxelIter, 9, vertexArray, devNodePositionArray, devNodeDensityArray, 5, 7, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 1024)
AddInterpolatedVertex(voxelIter, 10, vertexArray, devNodePositionArray, devNodeDensityArray, 4, 6, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 2048)
AddInterpolatedVertex(voxelIter, 11, vertexArray, devNodePositionArray, devNodeDensityArray, 0, 2, *devDensityCutoff);
for (int vertIter = 0; devTriTable[(voxelByte * 16) + vertIter] != -1; vertIter += 3)
{
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 2];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 2];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 2];
atomicAdd(&devVertCounter[0], 3);
}
}
}
}
struct is_non_zero
{
__host__ __device__ bool operator()(const float x)
{
return x != 0;
}
};
void RunMarchingCubes(float densityCutoff,
Voxel* voxelArray,
int voxelArraySize,
XMFLOAT3* allVertices,
int* allIndices,
int& vertCounter,
int* triTable,
int* edgeTable,
double& nodeParseTime,
double& mallocTime,
double& memcpyTime,
double& marchTime,
double& compactTime,
double& freeTime)
{
the_clock::time_point p1 = the_clock::now();
//
// Create and Load intermediate arrays
float* nodeDensityArray = new float[8 * voxelArraySize];
float* nodePositionArray = new float[3 * 8 * voxelArraySize];
for (int i = 0; i < voxelArraySize; ++i)
{
for (int j = 0; j < 8; ++j)
{
nodeDensityArray[(i * 8) + j] = voxelArray[i].getNode(j)->density;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 0] = voxelArray[i].getNode(j)->position.x;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 1] = voxelArray[i].getNode(j)->position.y;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 2] = voxelArray[i].getNode(j)->position.z;
}
}
the_clock::time_point p2 = the_clock::now();
nodeParseTime = duration_cast<microseconds>(p2 - p1).count();
float* devDensityCutoff = 0;
int* devVoxelArraySize = 0;
float* devNodeDensityArray = 0;
float* devNodePositionArray = 0;
int* devEdgeTable = 0;
int* devTriTable = 0;
float* devAllVertices = 0;
int* devVertCounter = 0;
hipError_t cudaStatus;
//
// Malloc
cudaStatus = hipMallocManaged((void**)&devDensityCutoff, sizeof(float));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devVoxelArraySize, sizeof(int));
if (cudaStatus != cudaStatus)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devNodeDensityArray, 8 * voxelArraySize * sizeof(float));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devNodePositionArray, 3 * 8 * voxelArraySize * sizeof(float));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devEdgeTable, 256 * sizeof(int));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devTriTable, 256 * 16 * sizeof(int));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devVertCounter, sizeof(int));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMallocManaged((void**)&devAllVertices, 15 * 3 * voxelArraySize * sizeof(float));
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
the_clock::time_point p3 = the_clock::now();
mallocTime = duration_cast<microseconds>(p3 - p2).count();
//
// Initialise arrays with values
cudaStatus = hipMemcpy(devDensityCutoff, &densityCutoff, sizeof(float), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(devVoxelArraySize, &voxelArraySize, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(devNodeDensityArray, nodeDensityArray, 8 * voxelArraySize * sizeof(float), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(devNodePositionArray, nodePositionArray, 3 * 8 * voxelArraySize * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(devEdgeTable, edgeTable, 256 * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(devTriTable, triTable, 256 * 16 * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
//
// Delete intermediate dynamic arrays
delete nodeDensityArray;
nodeDensityArray = 0;
delete nodePositionArray;
nodePositionArray = 0;
the_clock::time_point p4 = the_clock::now();
memcpyTime = duration_cast<microseconds>(p4 - p3).count();
//
// Optimise thread hierarchies
int numThreads = voxelArraySize % 32 == 0 ? voxelArraySize : ((voxelArraySize / 32) + 1.0f) * 32;
int numBlocks = 1;
if (numThreads > 1024)
{
numBlocks = numThreads % 1024 == 0 ? (numThreads / 1024) : (numThreads / 1024) + 1;
numThreads = numThreads / numBlocks;
}
dim3 blocks(numBlocks);
dim3 threads(numThreads);
//
// Run
March << <blocks, threads>> > (devDensityCutoff,
devVoxelArraySize,
devNodeDensityArray,
devNodePositionArray,
devEdgeTable,
devTriTable,
devAllVertices,
devVertCounter);
//
// Check error
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
//
// Sync
hipDeviceSynchronize();
the_clock::time_point p5 = the_clock::now();
marchTime = duration_cast<microseconds>(p5 - p4).count();
//
// Compact verts and indices
if (cudaStatus == hipSuccess)
{
thrust::device_vector<float> t_devAllVertices(devAllVertices, devAllVertices + (voxelArraySize * 3 * 15));
thrust::device_vector<float> t_compactAllVertices(voxelArraySize * 3 * 15, 0);
thrust::copy_if(thrust::device, t_devAllVertices.begin(), t_devAllVertices.end(), t_compactAllVertices.begin(),is_non_zero());
thrust::copy(t_compactAllVertices.begin(), t_compactAllVertices.end(), devAllVertices);
cudaStatus = hipMemcpy(allVertices, devAllVertices, 15 * 3 * voxelArraySize * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
cudaStatus = hipMemcpy(&vertCounter, devVertCounter,sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
thrust::sequence(thrust::host, allIndices, allIndices + vertCounter, 0);
}
the_clock::time_point p6 = the_clock::now();
compactTime = duration_cast<microseconds>(p6 - p5).count();
//
// Free
hipFree(devDensityCutoff);
hipFree(devVoxelArraySize);
hipFree(devNodeDensityArray);
hipFree(devNodePositionArray);
hipFree(devEdgeTable);
hipFree(devTriTable);
hipFree(devAllVertices);
hipFree(devVertCounter);
the_clock::time_point p7 = the_clock::now();
freeTime = duration_cast<microseconds>(p7 - p6).count();
}
#endif | 2f28bbd82a06e40f70d1df27f08a20a81e5f1fa1.cu | #ifndef _CUDA_CU
#define _CUDA_CU
#include "Header.cuh"
#include "cuda_runtime_api.h"
#include "device_launch_parameters.h";
#include "DXF.h"
#include "Voxel.h"
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <chrono>
using std::chrono::duration_cast;
using std::chrono::microseconds;
typedef std::chrono::steady_clock the_clock;
//
// Interpolate between two nodes
__device__ void AddInterpolatedVertex(unsigned int voxelIter,
unsigned int vertexIter,
float* devVertexArray,
float* devNodePositionArray,
float* devNodeDensityArray,
unsigned int node1,
unsigned int node2,
float devDensityCutoff)
{
if (abs(devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node1]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2];
return;
}
if (abs(devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node2]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 2];
return;
}
if (abs(devNodeDensityArray[(voxelIter * 8) + node1] - devNodeDensityArray[(voxelIter * 8) + node2]) < 0.00001f)
{
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)];
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1];
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2];
return;
}
float mu = (devDensityCutoff - devNodeDensityArray[(voxelIter * 8) + node1]) / (devNodeDensityArray[(voxelIter * 8) + node2] - devNodeDensityArray[(voxelIter * 8) + node1]);
devVertexArray[(vertexIter * 3)] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3)] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3)]);
devVertexArray[(vertexIter * 3) + 1] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 1] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 1]);
devVertexArray[(vertexIter * 3) + 2] = devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2] + mu * (devNodePositionArray[(voxelIter * (8 * 3)) + (node2 * 3) + 2] - devNodePositionArray[(voxelIter * (8 * 3)) + (node1 * 3) + 2]);
}
__global__ void March(float* devDensityCutoff,
int* devVoxelArraySize,
float* devNodeDensityArray,
float* devNodePositionArray,
int* devEdgeTable,
int* devTriTable,
float* devAllVertices,
int* devVertCounter)
{
int voxelIter = (blockIdx.x * blockDim.x) + threadIdx.x;
if(voxelIter < *devVoxelArraySize)
{
unsigned char voxelByte = 0x00;
if (devNodeDensityArray[(voxelIter * 8) + 1] < *devDensityCutoff) voxelByte |= 1;
if (devNodeDensityArray[(voxelIter * 8) + 5] < *devDensityCutoff) voxelByte |= 2;
if (devNodeDensityArray[(voxelIter * 8) + 4] < *devDensityCutoff) voxelByte |= 4;
if (devNodeDensityArray[(voxelIter * 8) + 0] < *devDensityCutoff) voxelByte |= 8;
if (devNodeDensityArray[(voxelIter * 8) + 3] < *devDensityCutoff) voxelByte |= 16;
if (devNodeDensityArray[(voxelIter * 8) + 7] < *devDensityCutoff) voxelByte |= 32;
if (devNodeDensityArray[(voxelIter * 8) + 6] < *devDensityCutoff) voxelByte |= 64;
if (devNodeDensityArray[(voxelIter * 8) + 2] < *devDensityCutoff) voxelByte |= 128;
if (devEdgeTable[voxelByte] != 0)
{
float vertexArray[12];
if (devEdgeTable[voxelByte] & 1) // AND operator
AddInterpolatedVertex(voxelIter, 0, vertexArray, devNodePositionArray, devNodeDensityArray, 1, 5, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 2)
AddInterpolatedVertex(voxelIter, 1, vertexArray, devNodePositionArray, devNodeDensityArray, 5, 4, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 4)
AddInterpolatedVertex(voxelIter, 2, vertexArray, devNodePositionArray, devNodeDensityArray, 4, 0, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 8)
AddInterpolatedVertex(voxelIter, 3, vertexArray, devNodePositionArray, devNodeDensityArray, 0, 1, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 16)
AddInterpolatedVertex(voxelIter, 4, vertexArray, devNodePositionArray, devNodeDensityArray, 3, 7, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 32)
AddInterpolatedVertex(voxelIter, 5, vertexArray, devNodePositionArray, devNodeDensityArray, 7, 6, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 64)
AddInterpolatedVertex(voxelIter, 6, vertexArray, devNodePositionArray, devNodeDensityArray, 6, 2, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 128)
AddInterpolatedVertex(voxelIter, 7, vertexArray, devNodePositionArray, devNodeDensityArray, 2, 3, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 256)
AddInterpolatedVertex(voxelIter, 8, vertexArray, devNodePositionArray, devNodeDensityArray, 1, 3, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 512)
AddInterpolatedVertex(voxelIter, 9, vertexArray, devNodePositionArray, devNodeDensityArray, 5, 7, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 1024)
AddInterpolatedVertex(voxelIter, 10, vertexArray, devNodePositionArray, devNodeDensityArray, 4, 6, *devDensityCutoff);
if (devEdgeTable[voxelByte] & 2048)
AddInterpolatedVertex(voxelIter, 11, vertexArray, devNodePositionArray, devNodeDensityArray, 0, 2, *devDensityCutoff);
for (int vertIter = 0; devTriTable[(voxelByte * 16) + vertIter] != -1; vertIter += 3)
{
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 0) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter)] * 3) + 2];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 1) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 1)] * 3) + 2];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 0] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 0];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 1] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 1];
devAllVertices[(voxelIter * (3 * 15)) + ((vertIter + 2) * 3) + 2] = vertexArray[(devTriTable[(voxelByte * 16) + (vertIter + 2)] * 3) + 2];
atomicAdd(&devVertCounter[0], 3);
}
}
}
}
struct is_non_zero
{
__host__ __device__ bool operator()(const float x)
{
return x != 0;
}
};
void RunMarchingCubes(float densityCutoff,
Voxel* voxelArray,
int voxelArraySize,
XMFLOAT3* allVertices,
int* allIndices,
int& vertCounter,
int* triTable,
int* edgeTable,
double& nodeParseTime,
double& mallocTime,
double& memcpyTime,
double& marchTime,
double& compactTime,
double& freeTime)
{
the_clock::time_point p1 = the_clock::now();
//
// Create and Load intermediate arrays
float* nodeDensityArray = new float[8 * voxelArraySize];
float* nodePositionArray = new float[3 * 8 * voxelArraySize];
for (int i = 0; i < voxelArraySize; ++i)
{
for (int j = 0; j < 8; ++j)
{
nodeDensityArray[(i * 8) + j] = voxelArray[i].getNode(j)->density;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 0] = voxelArray[i].getNode(j)->position.x;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 1] = voxelArray[i].getNode(j)->position.y;
nodePositionArray[(i * (8 * 3)) + (j * 3) + 2] = voxelArray[i].getNode(j)->position.z;
}
}
the_clock::time_point p2 = the_clock::now();
nodeParseTime = duration_cast<microseconds>(p2 - p1).count();
float* devDensityCutoff = 0;
int* devVoxelArraySize = 0;
float* devNodeDensityArray = 0;
float* devNodePositionArray = 0;
int* devEdgeTable = 0;
int* devTriTable = 0;
float* devAllVertices = 0;
int* devVertCounter = 0;
cudaError_t cudaStatus;
//
// Malloc
cudaStatus = cudaMallocManaged((void**)&devDensityCutoff, sizeof(float));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devVoxelArraySize, sizeof(int));
if (cudaStatus != cudaStatus)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devNodeDensityArray, 8 * voxelArraySize * sizeof(float));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devNodePositionArray, 3 * 8 * voxelArraySize * sizeof(float));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devEdgeTable, 256 * sizeof(int));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devTriTable, 256 * 16 * sizeof(int));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devVertCounter, sizeof(int));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMallocManaged((void**)&devAllVertices, 15 * 3 * voxelArraySize * sizeof(float));
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
the_clock::time_point p3 = the_clock::now();
mallocTime = duration_cast<microseconds>(p3 - p2).count();
//
// Initialise arrays with values
cudaStatus = cudaMemcpy(devDensityCutoff, &densityCutoff, sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(devVoxelArraySize, &voxelArraySize, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(devNodeDensityArray, nodeDensityArray, 8 * voxelArraySize * sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(devNodePositionArray, nodePositionArray, 3 * 8 * voxelArraySize * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(devEdgeTable, edgeTable, 256 * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(devTriTable, triTable, 256 * 16 * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
//
// Delete intermediate dynamic arrays
delete nodeDensityArray;
nodeDensityArray = 0;
delete nodePositionArray;
nodePositionArray = 0;
the_clock::time_point p4 = the_clock::now();
memcpyTime = duration_cast<microseconds>(p4 - p3).count();
//
// Optimise thread hierarchies
int numThreads = voxelArraySize % 32 == 0 ? voxelArraySize : ((voxelArraySize / 32) + 1.0f) * 32;
int numBlocks = 1;
if (numThreads > 1024)
{
numBlocks = numThreads % 1024 == 0 ? (numThreads / 1024) : (numThreads / 1024) + 1;
numThreads = numThreads / numBlocks;
}
dim3 blocks(numBlocks);
dim3 threads(numThreads);
//
// Run
March << <blocks, threads>> > (devDensityCutoff,
devVoxelArraySize,
devNodeDensityArray,
devNodePositionArray,
devEdgeTable,
devTriTable,
devAllVertices,
devVertCounter);
//
// Check error
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
//
// Sync
cudaDeviceSynchronize();
the_clock::time_point p5 = the_clock::now();
marchTime = duration_cast<microseconds>(p5 - p4).count();
//
// Compact verts and indices
if (cudaStatus == cudaSuccess)
{
thrust::device_vector<float> t_devAllVertices(devAllVertices, devAllVertices + (voxelArraySize * 3 * 15));
thrust::device_vector<float> t_compactAllVertices(voxelArraySize * 3 * 15, 0);
thrust::copy_if(thrust::device, t_devAllVertices.begin(), t_devAllVertices.end(), t_compactAllVertices.begin(),is_non_zero());
thrust::copy(t_compactAllVertices.begin(), t_compactAllVertices.end(), devAllVertices);
cudaStatus = cudaMemcpy(allVertices, devAllVertices, 15 * 3 * voxelArraySize * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
cudaStatus = cudaMemcpy(&vertCounter, devVertCounter,sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
thrust::sequence(thrust::host, allIndices, allIndices + vertCounter, 0);
}
the_clock::time_point p6 = the_clock::now();
compactTime = duration_cast<microseconds>(p6 - p5).count();
//
// Free
cudaFree(devDensityCutoff);
cudaFree(devVoxelArraySize);
cudaFree(devNodeDensityArray);
cudaFree(devNodePositionArray);
cudaFree(devEdgeTable);
cudaFree(devTriTable);
cudaFree(devAllVertices);
cudaFree(devVertCounter);
the_clock::time_point p7 = the_clock::now();
freeTime = duration_cast<microseconds>(p7 - p6).count();
}
#endif |
a6072540a251506d14ff4364eacac28b84b9e029.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SmoothL1LossLayer.cpp
*
* Created on: Nov 23, 2016
* Author: jkim
*/
#include <vector>
#include "SmoothL1LossLayer.h"
#include "MathFunctions.h"
#include "PropMgmt.h"
#include "SysLog.h"
#include "MemoryMgmt.h"
#define SMOOTHL1LOSSLAYER_LOG 0
using namespace std;
template <typename Dtype>
int SmoothL1LossLayer<Dtype>::INNER_ID = 11010;
template <typename Dtype>
SmoothL1LossLayer<Dtype>::SmoothL1LossLayer()
: SmoothL1LossLayer(NULL) {}
template <typename Dtype>
SmoothL1LossLayer<Dtype>::SmoothL1LossLayer(_SmoothL1LossPropLayer* prop)
: LossLayer<Dtype>(),
diff("diff"),
errors("errors"),
ones("ones") {
this->type = Layer<Dtype>::SmoothL1Loss;
if (prop) {
this->prop = NULL;
SNEW(this->prop, _SmoothL1LossPropLayer);
SASSUME0(this->prop != NULL);
*(this->prop) = *(prop);
} else {
this->prop = NULL;
}
}
template <typename Dtype>
SmoothL1LossLayer<Dtype>::~SmoothL1LossLayer() {
if (this->prop != NULL)
SFREE(this->prop);
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::reshape() {
bool adjusted = Layer<Dtype>::_adjustInputShape();
if (adjusted) {
this->hasWeights = (this->_inputData.size() >= 3);
if (this->hasWeights) {
SASSERT(this->_inputData.size() == 4,
"If weights are used, must specify both inside and outside weights");
}
this->_outputData[0]->reshape({1, 1, 1, 1});
this->_outputData[0]->mutable_host_grad()[0] = GET_PROP(prop, Loss, lossWeight);
#if SMOOTHL1LOSSLAYER_LOG
printf("<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
GET_PROP(prop, Loss, name).c_str(), 1, 1, 1, 1);
#endif
}
const uint32_t inputSize = this->_inputData.size();
for (uint32_t i = 0; i < inputSize; i++) {
if (!Layer<Dtype>::_isInputShapeChanged(i))
continue;
const vector<uint32_t>& inputDataShape = this->_inputData[i]->getShape();
this->_inputShape[i] = inputDataShape;
// rpn_bbox_pred
if (i == 0) {
this->diff.reshape(inputDataShape);
this->errors.reshape(inputDataShape);
// vector of ones used to sum
this->ones.reshape(inputDataShape);
this->ones.reset_host_data(false, 1.0f);
}
// rpn_bbox_targets
else if (i == 1) {
// XXX: FullyConnectedLayer output (batches, 1, rows, 1) ,
// bbox_targets shape
if (this->_inputData[0]->getShape() != this->_inputData[1]->getShape()) {
this->_inputData[1]->reshape({this->_inputData[1]->getShape(2), 1,
this->_inputData[1]->getShape(3), 1});
assert(this->_inputData[0]->getShape() == this->_inputData[1]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[1]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[1]->height());
//assert(this->_inputData[0]->width() == this->_inputData[1]->width());
}
// rpn_bbox_inside_weights
else if (i == 2) {
if (this->hasWeights) {
if (this->_inputData[0]->getShape() != this->_inputData[2]->getShape()) {
this->_inputData[2]->reshape({this->_inputData[2]->getShape(2), 1,
this->_inputData[2]->getShape(3), 1});
assert(this->_inputData[0]->getShape() ==
this->_inputData[2]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[2]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[2]->height());
//assert(this->_inputData[0]->width() == this->_inputData[2]->width());
}
}
// rpn_bbox_outside_weights
else if (i == 3) {
if (this->hasWeights) {
if (this->_inputData[0]->getShape() != this->_inputData[3]->getShape()) {
this->_inputData[3]->reshape({this->_inputData[3]->getShape(2), 1,
this->_inputData[3]->getShape(3), 1});
assert(this->_inputData[0]->getShape() ==
this->_inputData[3]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[3]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[3]->height());
//assert(this->_inputData[0]->width() == this->_inputData[3]->width());
}
}
}
}
template <typename Dtype>
__global__ void SmoothL1Forward(const uint32_t n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma2 * x)^2 if |x| < 1 / sigma2 / sigma2
// |x| - 0.5 / sigma2 / sigma2 otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = 0.5 * val * val * sigma2;
} else {
out[index] = abs_val - 0.5 / sigma2;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::feedforward() {
reshape();
const uint32_t count = this->_inputData[0]->getCount();
// prediction (inputData[0]) - target (inputData[1]) => diff
soooa_gpu_sub(
count,
this->_inputData[0]->device_data(),
this->_inputData[1]->device_data(),
this->diff.mutable_device_data()); // d := b0 - b1
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[0]->print_data();
this->_inputData[1]->print_data();
this->diff.print_data();
this->_printOff();
#endif
if (this->hasWeights) {
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[2]->print_data();
this->diff.print_data();
this->_printOff();
#endif
// apply "inside" weights
soooa_gpu_mul(
count,
this->_inputData[2]->device_data(),
this->diff.device_data(),
this->diff.mutable_device_data()); // d := w_in * (b0 - b1)
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->diff.print_data();
this->_printOff();
#endif
}
// smoothL1Forward
const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma);
hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
count, this->diff.device_data(), this->errors.mutable_device_data(), sigma2);
CUDA_POST_KERNEL_CHECK;
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->diff.print_data();
this->errors.print_data();
this->_printOff();
#endif
if (this->hasWeights) {
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[3]->print_data();
this->errors.print_data();
this->_printOff();
#endif
// apply "outside" weights
soooa_gpu_mul(
count,
this->_inputData[3]->device_data(),
this->errors.device_data(),
this->errors.mutable_device_data()); // d := w_out * SmoothL1(w_in * (b0 - b1))
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->errors.print_data();
this->_printOff();
#endif
}
const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis);
const float lossWeight = GET_PROP(prop, Loss, lossWeight);
Dtype loss;
soooa_gpu_dot(count, this->ones.device_data(), this->errors.device_data(), &loss);
this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(lossWeight) /
this->_inputData[0]->getShape(firstAxis);
//this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(this->lossWeight);
//cout << "smoothl1loss: " << this->_outputData[0]->host_data()[0] << endl;
}
template <typename Dtype>
__global__ void SmoothL1Backward(const uint32_t n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f'(x) = sigma2 * sigma2 * x if |x| < 1 / sigma2 / sigma2
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = sigma2 * val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::backpropagation() {
// after forwards, diff holds w_in * (b0 - b1)
const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma);
const uint32_t count = this->diff.getCount();
hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
count, this->diff.device_data(), this->diff.mutable_device_data(), sigma2);
CUDA_POST_KERNEL_CHECK;
const vector<bool> propDown = GET_PROP(prop, SmoothL1Loss, propDown);
const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis);
for (uint32_t i = 0; i < 2; i++) {
if (propDown[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
// XXX: caffe, top[0]->cpu_diff()[0] set
// 1 1.0f
//const Dtype alpha = sign * this->_outputData[0]->host_grad()[0] /
// this->_inputData[i]->batches();
const Dtype alpha = sign * GET_PROP(prop, Loss, lossWeight) /
this->_inputData[i]->getShape(firstAxis);
soooa_gpu_axpby(
count,
alpha,
this->diff.device_data(),
Dtype(0),
this->_inputData[i]->mutable_device_grad());
//this->_printOn();
//this->_inputData[i]->print_grad({}, false, -1);
//this->_printOff();
if (this->hasWeights) {
// Scale by "inside" weight
soooa_gpu_mul(
count,
this->_inputData[2]->device_data(),
this->_inputData[i]->device_grad(),
this->_inputData[i]->mutable_device_grad());
// Scale by "outside" weight
soooa_gpu_mul(
count,
this->_inputData[3]->device_data(),
this->_inputData[i]->device_grad(),
this->_inputData[i]->mutable_device_grad());
}
}
}
/*
if (GET_PROP(prop, SmoothL1Loss, name) == "rpn_loss_bbox") {
this->_printOn();
this->_inputData[i]->print_grad({}, false);
this->_printOff();
}
*/
}
template <typename Dtype>
Dtype SmoothL1LossLayer<Dtype>::cost() {
return this->_outputData[0]->host_data()[0];
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* SmoothL1LossLayer<Dtype>::initLayer() {
SmoothL1LossLayer* layer = NULL;
SNEW(layer, SmoothL1LossLayer<Dtype>);
SASSUME0(layer != NULL);
return (void*)layer;
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::destroyLayer(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
SDELETE(layer);
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
if (isInput) {
SASSERT0(index < 4);
} else {
SASSERT0(index == 0);
}
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == index);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == index);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool SmoothL1LossLayer<Dtype>::allocLayerTensors(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::backwardTensor(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template<typename Dtype>
bool SmoothL1LossLayer<Dtype>::checkShape(vector<TensorShape> inputShape,
vector<TensorShape> &outputShape) {
if (inputShape.size() != 4)
return false;
TensorShape outputShape1;
outputShape1.N = 1;
outputShape1.C = 1;
outputShape1.H = 1;
outputShape1.W = 1;
outputShape.push_back(outputShape1);
return true;
}
template<typename Dtype>
uint64_t SmoothL1LossLayer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) {
const size_t inputCount = tensorCount(inputShape[0]);
uint64_t size = 0;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
return size;
}
template class SmoothL1LossLayer<float>;
| a6072540a251506d14ff4364eacac28b84b9e029.cu | /*
* SmoothL1LossLayer.cpp
*
* Created on: Nov 23, 2016
* Author: jkim
*/
#include <vector>
#include "SmoothL1LossLayer.h"
#include "MathFunctions.h"
#include "PropMgmt.h"
#include "SysLog.h"
#include "MemoryMgmt.h"
#define SMOOTHL1LOSSLAYER_LOG 0
using namespace std;
template <typename Dtype>
int SmoothL1LossLayer<Dtype>::INNER_ID = 11010;
template <typename Dtype>
SmoothL1LossLayer<Dtype>::SmoothL1LossLayer()
: SmoothL1LossLayer(NULL) {}
template <typename Dtype>
SmoothL1LossLayer<Dtype>::SmoothL1LossLayer(_SmoothL1LossPropLayer* prop)
: LossLayer<Dtype>(),
diff("diff"),
errors("errors"),
ones("ones") {
this->type = Layer<Dtype>::SmoothL1Loss;
if (prop) {
this->prop = NULL;
SNEW(this->prop, _SmoothL1LossPropLayer);
SASSUME0(this->prop != NULL);
*(this->prop) = *(prop);
} else {
this->prop = NULL;
}
}
template <typename Dtype>
SmoothL1LossLayer<Dtype>::~SmoothL1LossLayer() {
if (this->prop != NULL)
SFREE(this->prop);
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::reshape() {
bool adjusted = Layer<Dtype>::_adjustInputShape();
if (adjusted) {
this->hasWeights = (this->_inputData.size() >= 3);
if (this->hasWeights) {
SASSERT(this->_inputData.size() == 4,
"If weights are used, must specify both inside and outside weights");
}
this->_outputData[0]->reshape({1, 1, 1, 1});
this->_outputData[0]->mutable_host_grad()[0] = GET_PROP(prop, Loss, lossWeight);
#if SMOOTHL1LOSSLAYER_LOG
printf("<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
GET_PROP(prop, Loss, name).c_str(), 1, 1, 1, 1);
#endif
}
const uint32_t inputSize = this->_inputData.size();
for (uint32_t i = 0; i < inputSize; i++) {
if (!Layer<Dtype>::_isInputShapeChanged(i))
continue;
const vector<uint32_t>& inputDataShape = this->_inputData[i]->getShape();
this->_inputShape[i] = inputDataShape;
// rpn_bbox_pred
if (i == 0) {
this->diff.reshape(inputDataShape);
this->errors.reshape(inputDataShape);
// vector of ones used to sum
this->ones.reshape(inputDataShape);
this->ones.reset_host_data(false, 1.0f);
}
// rpn_bbox_targets
else if (i == 1) {
// XXX: FullyConnectedLayer의 output이 (batches, 1, rows, 1)의 현 구조를 반영,
// 강제로 bbox_targets의 shape를 조정
if (this->_inputData[0]->getShape() != this->_inputData[1]->getShape()) {
this->_inputData[1]->reshape({this->_inputData[1]->getShape(2), 1,
this->_inputData[1]->getShape(3), 1});
assert(this->_inputData[0]->getShape() == this->_inputData[1]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[1]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[1]->height());
//assert(this->_inputData[0]->width() == this->_inputData[1]->width());
}
// rpn_bbox_inside_weights
else if (i == 2) {
if (this->hasWeights) {
if (this->_inputData[0]->getShape() != this->_inputData[2]->getShape()) {
this->_inputData[2]->reshape({this->_inputData[2]->getShape(2), 1,
this->_inputData[2]->getShape(3), 1});
assert(this->_inputData[0]->getShape() ==
this->_inputData[2]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[2]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[2]->height());
//assert(this->_inputData[0]->width() == this->_inputData[2]->width());
}
}
// rpn_bbox_outside_weights
else if (i == 3) {
if (this->hasWeights) {
if (this->_inputData[0]->getShape() != this->_inputData[3]->getShape()) {
this->_inputData[3]->reshape({this->_inputData[3]->getShape(2), 1,
this->_inputData[3]->getShape(3), 1});
assert(this->_inputData[0]->getShape() ==
this->_inputData[3]->getShape());
}
//assert(this->_inputData[0]->channels() == this->_inputData[3]->channels());
//assert(this->_inputData[0]->height() == this->_inputData[3]->height());
//assert(this->_inputData[0]->width() == this->_inputData[3]->width());
}
}
}
}
template <typename Dtype>
__global__ void SmoothL1Forward(const uint32_t n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma2 * x)^2 if |x| < 1 / sigma2 / sigma2
// |x| - 0.5 / sigma2 / sigma2 otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = 0.5 * val * val * sigma2;
} else {
out[index] = abs_val - 0.5 / sigma2;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::feedforward() {
reshape();
const uint32_t count = this->_inputData[0]->getCount();
// prediction (inputData[0]) - target (inputData[1]) => diff
soooa_gpu_sub(
count,
this->_inputData[0]->device_data(),
this->_inputData[1]->device_data(),
this->diff.mutable_device_data()); // d := b0 - b1
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[0]->print_data();
this->_inputData[1]->print_data();
this->diff.print_data();
this->_printOff();
#endif
if (this->hasWeights) {
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[2]->print_data();
this->diff.print_data();
this->_printOff();
#endif
// apply "inside" weights
soooa_gpu_mul(
count,
this->_inputData[2]->device_data(),
this->diff.device_data(),
this->diff.mutable_device_data()); // d := w_in * (b0 - b1)
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->diff.print_data();
this->_printOff();
#endif
}
// smoothL1Forward
const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma);
SmoothL1Forward<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>(
count, this->diff.device_data(), this->errors.mutable_device_data(), sigma2);
CUDA_POST_KERNEL_CHECK;
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->diff.print_data();
this->errors.print_data();
this->_printOff();
#endif
if (this->hasWeights) {
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->_inputData[3]->print_data();
this->errors.print_data();
this->_printOff();
#endif
// apply "outside" weights
soooa_gpu_mul(
count,
this->_inputData[3]->device_data(),
this->errors.device_data(),
this->errors.mutable_device_data()); // d := w_out * SmoothL1(w_in * (b0 - b1))
#if SMOOTHL1LOSSLAYER_LOG
this->_printOn();
this->errors.print_data();
this->_printOff();
#endif
}
const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis);
const float lossWeight = GET_PROP(prop, Loss, lossWeight);
Dtype loss;
soooa_gpu_dot(count, this->ones.device_data(), this->errors.device_data(), &loss);
this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(lossWeight) /
this->_inputData[0]->getShape(firstAxis);
//this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(this->lossWeight);
//cout << "smoothl1loss: " << this->_outputData[0]->host_data()[0] << endl;
}
template <typename Dtype>
__global__ void SmoothL1Backward(const uint32_t n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f'(x) = sigma2 * sigma2 * x if |x| < 1 / sigma2 / sigma2
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = sigma2 * val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::backpropagation() {
// after forwards, diff holds w_in * (b0 - b1)
const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma);
const uint32_t count = this->diff.getCount();
SmoothL1Backward<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>(
count, this->diff.device_data(), this->diff.mutable_device_data(), sigma2);
CUDA_POST_KERNEL_CHECK;
const vector<bool> propDown = GET_PROP(prop, SmoothL1Loss, propDown);
const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis);
for (uint32_t i = 0; i < 2; i++) {
if (propDown[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
// XXX: caffe, top[0]->cpu_diff()[0]에 대해서 set하는 부분을 찾을 수 없고
// 현재 특수한 값이 들어 있는 것이 아닌 1의 값이 들어있어 상수 1.0f으로 대체
//const Dtype alpha = sign * this->_outputData[0]->host_grad()[0] /
// this->_inputData[i]->batches();
const Dtype alpha = sign * GET_PROP(prop, Loss, lossWeight) /
this->_inputData[i]->getShape(firstAxis);
soooa_gpu_axpby(
count,
alpha,
this->diff.device_data(),
Dtype(0),
this->_inputData[i]->mutable_device_grad());
//this->_printOn();
//this->_inputData[i]->print_grad({}, false, -1);
//this->_printOff();
if (this->hasWeights) {
// Scale by "inside" weight
soooa_gpu_mul(
count,
this->_inputData[2]->device_data(),
this->_inputData[i]->device_grad(),
this->_inputData[i]->mutable_device_grad());
// Scale by "outside" weight
soooa_gpu_mul(
count,
this->_inputData[3]->device_data(),
this->_inputData[i]->device_grad(),
this->_inputData[i]->mutable_device_grad());
}
}
}
/*
if (GET_PROP(prop, SmoothL1Loss, name) == "rpn_loss_bbox") {
this->_printOn();
this->_inputData[i]->print_grad({}, false);
this->_printOff();
}
*/
}
template <typename Dtype>
Dtype SmoothL1LossLayer<Dtype>::cost() {
return this->_outputData[0]->host_data()[0];
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* SmoothL1LossLayer<Dtype>::initLayer() {
SmoothL1LossLayer* layer = NULL;
SNEW(layer, SmoothL1LossLayer<Dtype>);
SASSUME0(layer != NULL);
return (void*)layer;
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::destroyLayer(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
SDELETE(layer);
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
if (isInput) {
SASSERT0(index < 4);
} else {
SASSERT0(index == 0);
}
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == index);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == index);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool SmoothL1LossLayer<Dtype>::allocLayerTensors(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::backwardTensor(void* instancePtr) {
SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void SmoothL1LossLayer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template<typename Dtype>
bool SmoothL1LossLayer<Dtype>::checkShape(vector<TensorShape> inputShape,
vector<TensorShape> &outputShape) {
if (inputShape.size() != 4)
return false;
TensorShape outputShape1;
outputShape1.N = 1;
outputShape1.C = 1;
outputShape1.H = 1;
outputShape1.W = 1;
outputShape.push_back(outputShape1);
return true;
}
template<typename Dtype>
uint64_t SmoothL1LossLayer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) {
const size_t inputCount = tensorCount(inputShape[0]);
uint64_t size = 0;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
size += ALIGNUP(sizeof(Dtype) * inputCount, SPARAM(CUDA_MEMPAGE_SIZE)) * 2UL;
return size;
}
template class SmoothL1LossLayer<float>;
|
eb0f63270510c8b0c7ffd45cc80488d59fcfdedd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
static const int CYPHER_OFFSET = 3;
__global__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
__global__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
__global__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
__global__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
__global__ void caesarCypher(char * textToEncrypt, const int offset)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
char c;
c = textToEncrypt[thread_idx] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[thread_idx] = c;
}
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
void hostCaesarCypher(char * textToEncrypt, const int offset, const int size)
{
for (int i = 0; i < size; ++i)
{
char c;
c = textToEncrypt[i] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[i] = c;
}
}
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
hostCaesarCypher(textToEncrypt, totalThreads, CYPHER_OFFSET);
}
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
int *gpu_a, *gpu_b, *gpu_c;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_c, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
// Add all of the numbers c[i] = a[i] + b[i];
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_c);
hipMemcpy(c, gpu_c, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
// Subtract all of the numbers c[i] = a[i] - b[i];
hipLaunchKernelGGL(( subtract), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_c);
hipMemcpy(c, gpu_c, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
// Multiply all of the numbers c[i] = a[i] * b[i];
hipLaunchKernelGGL(( mult), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_c);
hipMemcpy(c, gpu_c, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
// Mod all of the numbers c[i] = a[i] % b[i];
hipLaunchKernelGGL(( mod), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_c);
hipMemcpy(c, gpu_c, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_c);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
char * gpuTextToEncrypt;
hipMalloc((void**)&gpuTextToEncrypt, totalThreads * sizeof(char));
hipMemcpy(gpuTextToEncrypt, a, totalThreads * sizeof(char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( caesarCypher), dim3(numBlocks), dim3(blockSize), 0, 0, gpuTextToEncrypt, CYPHER_OFFSET);
hipMemcpy(textToEncrypt, gpuTextToEncrypt, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(textToEncrypt);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "GPU execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
}
| eb0f63270510c8b0c7ffd45cc80488d59fcfdedd.cu | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
static const int CYPHER_OFFSET = 3;
__global__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
__global__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
__global__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
__global__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
__global__ void caesarCypher(char * textToEncrypt, const int offset)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
char c;
c = textToEncrypt[thread_idx] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[thread_idx] = c;
}
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
void hostCaesarCypher(char * textToEncrypt, const int offset, const int size)
{
for (int i = 0; i < size; ++i)
{
char c;
c = textToEncrypt[i] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[i] = c;
}
}
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
hostCaesarCypher(textToEncrypt, totalThreads, CYPHER_OFFSET);
}
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_c, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// Add all of the numbers c[i] = a[i] + b[i];
add<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Subtract all of the numbers c[i] = a[i] - b[i];
subtract<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Multiply all of the numbers c[i] = a[i] * b[i];
mult<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Mod all of the numbers c[i] = a[i] % b[i];
mod<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
char * gpuTextToEncrypt;
cudaMalloc((void**)&gpuTextToEncrypt, totalThreads * sizeof(char));
cudaMemcpy(gpuTextToEncrypt, a, totalThreads * sizeof(char), cudaMemcpyHostToDevice);
caesarCypher<<<numBlocks, blockSize>>>(gpuTextToEncrypt, CYPHER_OFFSET);
cudaMemcpy(textToEncrypt, gpuTextToEncrypt, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(textToEncrypt);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "GPU execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
}
|
0df807804bf4a24cdf127ea555ded5e8c48a6329.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
/*!
* \file TwoStepLangevinFlowGPU.cu
* \brief Definition of kernel drivers and kernels for TwoStepLangevinFlowGPU
*/
#include "TwoStepLangevinFlowGPU.cuh"
#include "FlowFields.h"
namespace azplugins
{
namespace gpu
{
namespace kernel
{
__global__ void langevin_flow_step1(Scalar4 *d_pos,
int3 *d_image,
Scalar4 *d_vel,
const Scalar3 *d_accel,
const unsigned int *d_group,
const BoxDim box,
const unsigned int N,
const Scalar dt)
{
const unsigned int grp_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (grp_idx >= N) return;
const unsigned int idx = d_group[grp_idx];
// position
const Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int type = __scalar_as_int(postype.w);
// velocity
const Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar mass = velmass.w;
// acceleration
const Scalar3 accel = d_accel[idx];
// update position and wrap
pos += (vel + Scalar(0.5) * dt * accel) * dt;
int3 image = d_image[idx];
box.wrap(pos,image);
// update velocity
vel += Scalar(0.5) * dt * accel;
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, __int_as_scalar(type));
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, mass);
d_image[idx] = image;
}
} // end namespace kernel
hipError_t langevin_flow_step1(Scalar4 *d_pos,
int3 *d_image,
Scalar4 *d_vel,
const Scalar3 *d_accel,
const unsigned int *d_group,
const BoxDim& box,
const unsigned int N,
const Scalar dt,
const unsigned int block_size)
{
if (N == 0) return hipSuccess;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)kernel::langevin_flow_step1);
max_block_size = attr.maxThreadsPerBlock;
}
const int run_block_size = min(block_size, max_block_size);
hipLaunchKernelGGL(( kernel::langevin_flow_step1), dim3(N/run_block_size+1), dim3(run_block_size), 0, 0, d_pos,
d_image,
d_vel,
d_accel,
d_group,
box,
N,
dt);
return hipSuccess;
}
//! Explicit instantiation of ConstantFlow integrator
template hipError_t langevin_flow_step2<azplugins::ConstantFlow>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::ConstantFlow& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
//! Explicit instantiation of ParabolicFlow integrator
template hipError_t langevin_flow_step2<azplugins::ParabolicFlow>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::ParabolicFlow& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
//! Explicit instantiation of QuiescentFluid integrator
template hipError_t langevin_flow_step2<azplugins::QuiescentFluid>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::QuiescentFluid& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
} // end namespace gpu
} // end namespace azplugins
| 0df807804bf4a24cdf127ea555ded5e8c48a6329.cu | // Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
/*!
* \file TwoStepLangevinFlowGPU.cu
* \brief Definition of kernel drivers and kernels for TwoStepLangevinFlowGPU
*/
#include "TwoStepLangevinFlowGPU.cuh"
#include "FlowFields.h"
namespace azplugins
{
namespace gpu
{
namespace kernel
{
__global__ void langevin_flow_step1(Scalar4 *d_pos,
int3 *d_image,
Scalar4 *d_vel,
const Scalar3 *d_accel,
const unsigned int *d_group,
const BoxDim box,
const unsigned int N,
const Scalar dt)
{
const unsigned int grp_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (grp_idx >= N) return;
const unsigned int idx = d_group[grp_idx];
// position
const Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int type = __scalar_as_int(postype.w);
// velocity
const Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar mass = velmass.w;
// acceleration
const Scalar3 accel = d_accel[idx];
// update position and wrap
pos += (vel + Scalar(0.5) * dt * accel) * dt;
int3 image = d_image[idx];
box.wrap(pos,image);
// update velocity
vel += Scalar(0.5) * dt * accel;
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, __int_as_scalar(type));
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, mass);
d_image[idx] = image;
}
} // end namespace kernel
cudaError_t langevin_flow_step1(Scalar4 *d_pos,
int3 *d_image,
Scalar4 *d_vel,
const Scalar3 *d_accel,
const unsigned int *d_group,
const BoxDim& box,
const unsigned int N,
const Scalar dt,
const unsigned int block_size)
{
if (N == 0) return cudaSuccess;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)kernel::langevin_flow_step1);
max_block_size = attr.maxThreadsPerBlock;
}
const int run_block_size = min(block_size, max_block_size);
kernel::langevin_flow_step1<<<N/run_block_size+1, run_block_size>>>(d_pos,
d_image,
d_vel,
d_accel,
d_group,
box,
N,
dt);
return cudaSuccess;
}
//! Explicit instantiation of ConstantFlow integrator
template cudaError_t langevin_flow_step2<azplugins::ConstantFlow>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::ConstantFlow& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
//! Explicit instantiation of ParabolicFlow integrator
template cudaError_t langevin_flow_step2<azplugins::ParabolicFlow>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::ParabolicFlow& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
//! Explicit instantiation of QuiescentFluid integrator
template cudaError_t langevin_flow_step2<azplugins::QuiescentFluid>(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_pos,
const Scalar4 *d_net_force,
const unsigned int *d_tag,
const unsigned int *d_group,
const Scalar *d_diameter,
const Scalar lambda,
const Scalar *d_gamma,
const unsigned int ntypes,
const azplugins::QuiescentFluid& flow_field,
const unsigned int N,
const Scalar dt,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
bool noiseless,
bool use_lambda,
const unsigned int block_size);
} // end namespace gpu
} // end namespace azplugins
|
20354d41aca0c6d0ebdb7e46bff2207c596ac67c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergecg.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_scgreduce_kernel_spmv1(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_scgreduce_kernel_spmv2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_scgmerge_spmvcsr_kernel(
int n,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
float dot = MAGMA_S_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
float val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_scgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
float val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernelb1(
int n,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
float val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
float val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//float dot = MAGMA_S_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// float val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_8(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_16(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_32(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_scgmerge_spmvellpackrt_kernel2(
int n,
float * z,
float * d,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_scgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
float val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_scg_rhokernel(
float * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_s_matrix
input matrix
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dd magmaFloat_ptr
input vector d
@param[out]
dz magmaFloat_ptr
input vector z
@param[out]
skp magmaFloat_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_spmv1(
magma_s_matrix A,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_scgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
hipLaunchKernelGGL(( magma_scgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_scgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
hipsparseHandle_t cusparseHandle = 0;
hipsparseMatDescr_t descr = 0;
float c_one = MAGMA_S_ONE;
float c_zero = MAGMA_S_ZERO;
hipsparseCreate( &cusparseHandle );
hipsparseSetStream( cusparseHandle, queue->cuda_stream() );
hipsparseCreateMatDescr( &descr );
hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL );
hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO );
hipsparseScsrmv( cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
hipsparseDestroyMatDescr( descr );
hipsparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvell_kernelb1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( float );
if ( A.alignment == 8)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( float );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_rhokernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_scgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_scg_alphabetakernel(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_S_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_scg_d_kernel(
int n,
float * skp,
float * r,
float * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_scgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_spcgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_smsdot_one_kernel_1(
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta1(
magma_int_t n,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_spcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream(),
n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta2(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dh,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_smsdot_one_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_sjcgmerge_xrbeta_kernel(
int n,
float * diag,
float * x,
float * r,
float * d,
float * z,
float * h,
float * vtmp,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_sjcgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr diag,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr dh,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_sjcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 20354d41aca0c6d0ebdb7e46bff2207c596ac67c.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergecg.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_scgreduce_kernel_spmv1(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_scgreduce_kernel_spmv2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_scgmerge_spmvcsr_kernel(
int n,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
float dot = MAGMA_S_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
float val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_scgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
float val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernelb1(
int n,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
float val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
float val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//float dot = MAGMA_S_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// float val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_8(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_16(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_32(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_scgmerge_spmvellpackrt_kernel2(
int n,
float * z,
float * d,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_scgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
float val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_scg_rhokernel(
float * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_s_matrix
input matrix
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dd magmaFloat_ptr
input vector d
@param[out]
dz magmaFloat_ptr
input vector z
@param[out]
skp magmaFloat_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_spmv1(
magma_s_matrix A,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
magma_scgmerge_spmvcsr_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
magma_scgmerge_spmvellpack_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
magma_scgmerge_spmvell_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
cusparseHandle_t cusparseHandle = 0;
cusparseMatDescr_t descr = 0;
float c_one = MAGMA_S_ONE;
float c_zero = MAGMA_S_ZERO;
cusparseCreate( &cusparseHandle );
cusparseSetStream( cusparseHandle, queue->cuda_stream() );
cusparseCreateMatDescr( &descr );
cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL );
cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO );
cusparseScsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
cusparseDestroyMatDescr( descr );
cusparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
magma_scgmerge_spmvell_kernelb1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( float );
if ( A.alignment == 8)
magma_scgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
magma_scgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
magma_scgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( float );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
magma_scgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
magma_scgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
magma_scgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_rhokernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_scgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_scg_alphabetakernel(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_S_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_scg_d_kernel(
int n,
float * skp,
float * r,
float * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_scgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_spcgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_smsdot_one_kernel_1(
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta1(
magma_int_t n,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
magma_spcgmerge_xrbeta_kernel<<< Gs, Bs, 0, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta2(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dh,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_smsdot_one_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_sjcgmerge_xrbeta_kernel(
int n,
float * diag,
float * x,
float * r,
float * d,
float * z,
float * h,
float * vtmp,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_sjcgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr diag,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr dh,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_sjcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
702045048bdbd0c78ab2776d7452493e562ef544.hip | // !!! This is a file automatically generated by hipify!!!
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include "chrono_fsi/ChSystemFsi_impl.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRigid_fsiBodies_D.begin(),
velMassRigid_fsiBodies_D.begin(),
accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(),
omegaVelLRF_fsiBodies_D.begin(),
omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(),
posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(),
accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(),
q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(),
posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(),
accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(),
q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t numAllMarkers) {
gridMarkerHashD.resize(numAllMarkers);
gridMarkerIndexD.resize(numAllMarkers);
mapOriginalToSorted.resize(numAllMarkers);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(
pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl() {
numObjects = chrono_types::make_shared<NumberOfObjects>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSphMarker(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; /* Number of rigid bodies */
numObjects->numFlexBodies1D = 0; /* Number of Flexible bodies*/
numObjects->numFlexBodies2D = 0; /* Number of Flexible bodies*/
numObjects->numFlexNodes = 0; /* Number of FE nodes*/
numObjects->numGhostMarkers = 0;
numObjects->numHelperMarkers = 0;
numObjects->numFluidMarkers = 0; /* Number of fluid SPH markers*/
numObjects->numBoundaryMarkers = 0; /* Number of boundary SPH markers */
numObjects->startRigidMarkers = 0; /* */
numObjects->startFlexMarkers = 0; /* */
numObjects->numRigid_SphMarkers = 0; /* */
numObjects->numFlex_SphMarkers = 0; /* */
numObjects->numAllMarkers = 0; /* Total number of SPH markers */
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
bool flagRigid = false;
bool flagFlex = false;
std::cout << "ChSystemFsi_impl::CalcNumObjects" << std::endl;
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMerkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMerkers;
std::cout << "Added " << numMerkers << " helper particles\n";
break;
case -2:
numObjects->numGhostMarkers += numMerkers;
std::cout << "Added " << numMerkers << " ghost particles\n";
break;
case -1:
numObjects->numFluidMarkers += numMerkers;
std::cout << "Added " << numMerkers << " fluid particles\n";
break;
case 0:
numObjects->numBoundaryMarkers += numMerkers;
std::cout << "Added " << numMerkers << " boundary particles\n";
break;
case 1:
numObjects->numRigid_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " rigid particles\n";
numObjects->numRigidBodies++;
flagRigid = true;
break;
case 2:
numObjects->numFlex_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " 1D flexible particles\n";
numObjects->numFlexBodies1D++;
flagFlex = true;
break;
case 3:
numObjects->numFlex_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " 2D flexible particles\n";
numObjects->numFlexBodies2D++;
flagFlex = true;
break;
default:
std::cout << "Error! particle type not defined! Thrown from CalcNumObjects\n";
break;
}
}
std::cout << "numObjects->numFlexNodes = " << numObjects->numFlexNodes << std::endl;
std::cout << "numObjects->numGhostMarkers = " << numObjects->numGhostMarkers << std::endl;
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers
+ numObjects->numBoundaryMarkers
+ numObjects->numRigid_SphMarkers
+ numObjects->numFlex_SphMarkers;
numObjects->startRigidMarkers =
(flagRigid) ? (numObjects->numFluidMarkers + numObjects->numBoundaryMarkers)
: numObjects->numAllMarkers;
numObjects->startFlexMarkers =
(flagFlex) ? (numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigid_SphMarkers)
: numObjects->numAllMarkers;
printf("Number of Helper particles = %zd\n",numObjects->numHelperMarkers);
printf("Number of Ghost particles = %zd\n",numObjects->numGhostMarkers);
printf("Number of Fluid particles = %zd\n",numObjects->numFluidMarkers);
printf("Number of Boundary particles = %zd\n",numObjects->numBoundaryMarkers);
printf("Number of Rigid particles = %zd\n",numObjects->numRigid_SphMarkers);
printf("Number of Flexible particles = %zd\n",numObjects->numFlex_SphMarkers);
printf("Total number particles = %zd\n",numObjects->numAllMarkers);
printf("Rigid particles start at = %zd\n",numObjects->startRigidMarkers);
printf("Flexible particles start at = %zd\n",numObjects->startFlexMarkers);
}
void ChSystemFsi_impl::ConstructReferenceArray() {
// ArrangeDataManager();
CalcNumObjects();
// determine the number of each component
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
printf(
"\nChSystemFsi_impl::ConstructReferenceArray() numObjects->numAllMarkers=%zd, "
"sphMarkersH->rhoPresMuH.size()=%zd\n",
numObjects->numAllMarkers, sphMarkersH->rhoPresMuH.size());
throw std::runtime_error("Error! numObjects wrong! thrown from ConstructReferenceArray !\n");
}
thrust::host_vector<int> numComponentMarkers(numObjects->numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
printf("Number of particle types = %zd\n", numberOfComponents);
fsiGeneralData->referenceArray.resize(numberOfComponents);
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
int savedNumber = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType <= -2) {
phaseType = -1;
} else if (compType == -1) {
phaseType = -1;
} else if (compType == 0) {
phaseType = 0;
} else if (compType == 1) {
phaseType = 1;
} else if (compType == 2) {
phaseType = 1; // For Cable Elements
} else if (compType == 3) {
phaseType = 2; // For Shell Elements
} else {
phaseType = 1;
}
fsiGeneralData->referenceArray[i] = mI4(savedNumber, savedNumber + numComponentMarkers[i], compType, phaseType);
savedNumber += numComponentMarkers[i];
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
printf("Reference array \n");
for (size_t i = 0; i < fsiGeneralData->referenceArray.size(); i++) {
int4 num = fsiGeneralData->referenceArray[i];
printf("%d %d %d %d \n", num.x, num.y, num.z, num.w);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeDataManager(int numNodes) {
ConstructReferenceArray();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
throw std::runtime_error("Error! numObjects wrong! thrown from FinalizeDataManager !\n");
}
numObjects->numFlexNodes = numNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
printf("fsiData->ResizeDataManager (sphMarkersH)...\n");
// Arman: implement this in one shot function in class
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
printf("fsiData->ResizeDataManager (sphMarkersD)...\n");
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
printf("fsiData->ResizeDataManager (Rigid)...\n");
// copy rigids
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigid_SphMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigid_SphMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlex_SphMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlex_SphMarkers);
printf("fsiData->ResizeDataManager (Flex)...\n");
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlex_SphMarkers);
if (fsiGeneralData->CableElementsNodesH.size() != numObjects->numFlexBodies1D) {
printf("******************************************************************************\n");
printf("******************************************************************************\n");
printf("******************************Be Careful**************************************\n");
printf("There might be 1D Flexible bodies in Chrono that are not a part of ChSystemFSI\n");
printf("I am going to transfer nodal data for such elements back and forth although they\n");
printf("are not part of FSI calculation. If you want to have some 1D element that are \n");
printf("inside the ChSystem mesh but not FSI system, you can ignore this warning ...\n");
printf("******************************************************************************\n");
printf("******************************************************************************\n");
printf("******************************************************************************\n");
fsiGeneralData->CableElementsNodes.resize(fsiGeneralData->CableElementsNodesH.size());
} else
fsiGeneralData->CableElementsNodes.resize(numObjects->numFlexBodies1D);
fsiGeneralData->ShellElementsNodes.resize(numObjects->numFlexBodies2D);
printf("numObjects->numFlexBodies1D = %zd, numObjects->numFlexBodies2D = %zd\n",
numObjects->numFlexBodies1D, numObjects->numFlexBodies2D);
printf("fsiGeneralData->CableElementsNodesH.size() = %zd\n", fsiGeneralData->CableElementsNodesH.size());
printf("fsiGeneralData->ShellElementsNodesH.size() = %zd\n", fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodes.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodes.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
} // end namespace fsi
} // end namespace chrono
| 702045048bdbd0c78ab2776d7452493e562ef544.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include "chrono_fsi/ChSystemFsi_impl.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRigid_fsiBodies_D.begin(),
velMassRigid_fsiBodies_D.begin(),
accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(),
omegaVelLRF_fsiBodies_D.begin(),
omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(),
posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(),
accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(),
q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(),
posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(),
accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(),
q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t numAllMarkers) {
gridMarkerHashD.resize(numAllMarkers);
gridMarkerIndexD.resize(numAllMarkers);
mapOriginalToSorted.resize(numAllMarkers);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(
pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl() {
numObjects = chrono_types::make_shared<NumberOfObjects>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSphMarker(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; /* Number of rigid bodies */
numObjects->numFlexBodies1D = 0; /* Number of Flexible bodies*/
numObjects->numFlexBodies2D = 0; /* Number of Flexible bodies*/
numObjects->numFlexNodes = 0; /* Number of FE nodes*/
numObjects->numGhostMarkers = 0;
numObjects->numHelperMarkers = 0;
numObjects->numFluidMarkers = 0; /* Number of fluid SPH markers*/
numObjects->numBoundaryMarkers = 0; /* Number of boundary SPH markers */
numObjects->startRigidMarkers = 0; /* */
numObjects->startFlexMarkers = 0; /* */
numObjects->numRigid_SphMarkers = 0; /* */
numObjects->numFlex_SphMarkers = 0; /* */
numObjects->numAllMarkers = 0; /* Total number of SPH markers */
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
bool flagRigid = false;
bool flagFlex = false;
std::cout << "ChSystemFsi_impl::CalcNumObjects" << std::endl;
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMerkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMerkers;
std::cout << "Added " << numMerkers << " helper particles\n";
break;
case -2:
numObjects->numGhostMarkers += numMerkers;
std::cout << "Added " << numMerkers << " ghost particles\n";
break;
case -1:
numObjects->numFluidMarkers += numMerkers;
std::cout << "Added " << numMerkers << " fluid particles\n";
break;
case 0:
numObjects->numBoundaryMarkers += numMerkers;
std::cout << "Added " << numMerkers << " boundary particles\n";
break;
case 1:
numObjects->numRigid_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " rigid particles\n";
numObjects->numRigidBodies++;
flagRigid = true;
break;
case 2:
numObjects->numFlex_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " 1D flexible particles\n";
numObjects->numFlexBodies1D++;
flagFlex = true;
break;
case 3:
numObjects->numFlex_SphMarkers += numMerkers;
std::cout << "Added " << numMerkers << " 2D flexible particles\n";
numObjects->numFlexBodies2D++;
flagFlex = true;
break;
default:
std::cout << "Error! particle type not defined! Thrown from CalcNumObjects\n";
break;
}
}
std::cout << "numObjects->numFlexNodes = " << numObjects->numFlexNodes << std::endl;
std::cout << "numObjects->numGhostMarkers = " << numObjects->numGhostMarkers << std::endl;
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers
+ numObjects->numBoundaryMarkers
+ numObjects->numRigid_SphMarkers
+ numObjects->numFlex_SphMarkers;
numObjects->startRigidMarkers =
(flagRigid) ? (numObjects->numFluidMarkers + numObjects->numBoundaryMarkers)
: numObjects->numAllMarkers;
numObjects->startFlexMarkers =
(flagFlex) ? (numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigid_SphMarkers)
: numObjects->numAllMarkers;
printf("Number of Helper particles = %zd\n",numObjects->numHelperMarkers);
printf("Number of Ghost particles = %zd\n",numObjects->numGhostMarkers);
printf("Number of Fluid particles = %zd\n",numObjects->numFluidMarkers);
printf("Number of Boundary particles = %zd\n",numObjects->numBoundaryMarkers);
printf("Number of Rigid particles = %zd\n",numObjects->numRigid_SphMarkers);
printf("Number of Flexible particles = %zd\n",numObjects->numFlex_SphMarkers);
printf("Total number particles = %zd\n",numObjects->numAllMarkers);
printf("Rigid particles start at = %zd\n",numObjects->startRigidMarkers);
printf("Flexible particles start at = %zd\n",numObjects->startFlexMarkers);
}
void ChSystemFsi_impl::ConstructReferenceArray() {
// ArrangeDataManager();
CalcNumObjects();
// determine the number of each component
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
printf(
"\nChSystemFsi_impl::ConstructReferenceArray() numObjects->numAllMarkers=%zd, "
"sphMarkersH->rhoPresMuH.size()=%zd\n",
numObjects->numAllMarkers, sphMarkersH->rhoPresMuH.size());
throw std::runtime_error("Error! numObjects wrong! thrown from ConstructReferenceArray !\n");
}
thrust::host_vector<int> numComponentMarkers(numObjects->numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
printf("Number of particle types = %zd\n", numberOfComponents);
fsiGeneralData->referenceArray.resize(numberOfComponents);
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
int savedNumber = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)std::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType <= -2) {
phaseType = -1;
} else if (compType == -1) {
phaseType = -1;
} else if (compType == 0) {
phaseType = 0;
} else if (compType == 1) {
phaseType = 1;
} else if (compType == 2) {
phaseType = 1; // For Cable Elements
} else if (compType == 3) {
phaseType = 2; // For Shell Elements
} else {
phaseType = 1;
}
fsiGeneralData->referenceArray[i] = mI4(savedNumber, savedNumber + numComponentMarkers[i], compType, phaseType);
savedNumber += numComponentMarkers[i];
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
printf("Reference array \n");
for (size_t i = 0; i < fsiGeneralData->referenceArray.size(); i++) {
int4 num = fsiGeneralData->referenceArray[i];
printf("%d %d %d %d \n", num.x, num.y, num.z, num.w);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeDataManager(int numNodes) {
ConstructReferenceArray();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
throw std::runtime_error("Error! numObjects wrong! thrown from FinalizeDataManager !\n");
}
numObjects->numFlexNodes = numNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
printf("fsiData->ResizeDataManager (sphMarkersH)...\n");
// Arman: implement this in one shot function in class
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
printf("fsiData->ResizeDataManager (sphMarkersD)...\n");
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
printf("fsiData->ResizeDataManager (Rigid)...\n");
// copy rigids
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigid_SphMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigid_SphMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlex_SphMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlex_SphMarkers);
printf("fsiData->ResizeDataManager (Flex)...\n");
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlex_SphMarkers);
if (fsiGeneralData->CableElementsNodesH.size() != numObjects->numFlexBodies1D) {
printf("******************************************************************************\n");
printf("******************************************************************************\n");
printf("******************************Be Careful**************************************\n");
printf("There might be 1D Flexible bodies in Chrono that are not a part of ChSystemFSI\n");
printf("I am going to transfer nodal data for such elements back and forth although they\n");
printf("are not part of FSI calculation. If you want to have some 1D element that are \n");
printf("inside the ChSystem mesh but not FSI system, you can ignore this warning ...\n");
printf("******************************************************************************\n");
printf("******************************************************************************\n");
printf("******************************************************************************\n");
fsiGeneralData->CableElementsNodes.resize(fsiGeneralData->CableElementsNodesH.size());
} else
fsiGeneralData->CableElementsNodes.resize(numObjects->numFlexBodies1D);
fsiGeneralData->ShellElementsNodes.resize(numObjects->numFlexBodies2D);
printf("numObjects->numFlexBodies1D = %zd, numObjects->numFlexBodies2D = %zd\n",
numObjects->numFlexBodies1D, numObjects->numFlexBodies2D);
printf("fsiGeneralData->CableElementsNodesH.size() = %zd\n", fsiGeneralData->CableElementsNodesH.size());
printf("fsiGeneralData->ShellElementsNodesH.size() = %zd\n", fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodes.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodes.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
} // end namespace fsi
} // end namespace chrono
|
dbf515e3c9beea90dc9c0a25678b08970286b9d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<stdlib.h>
__global__ void reduce_kernel(float *d_out,float *d_in)
{
//Size of shared memory is set by third parameter of kernel launch
extern __shared__ float shared_array[];
int globalThreadId = threadIdx.x + blockDim.x*blockIdx.x;
int blockThreadId = threadIdx.x;
shared_array[blockThreadId] = d_in[globalThreadId];
int s;
//Sync to ensure full shared_array is loaded
__syncthreads();
//Actual reduction operation
for(s = blockDim.x/2;s>0;s/=2)
{
if(blockThreadId<s)
shared_array[blockThreadId]+=shared_array[blockThreadId+s];
__syncthreads();
}
//Output of reduction is written to first index of global memory
if(blockThreadId==0)
d_out[blockIdx.x] = shared_array[0];
}
void reduce(float *d_in,float *d_intermediate,float *d_out,int array_size)
{
int threads = 256;
int blocks = array_size/threads;
hipLaunchKernelGGL(( reduce_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_intermediate,d_in);
//Results of all blocks are stored in one block, which has to be reduced
threads = blocks;
blocks = 1;
hipLaunchKernelGGL(( reduce_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_out,d_intermediate);
}
int main()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
hipSetDevice(deviceId);
const int ARRAY_SIZE = 1024;
const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float);
float h_in[ARRAY_SIZE];
int i;
for(i=0;i<ARRAY_SIZE;i++)
h_in[i]=i;
float *d_in,*d_out,*d_intermediate;
hipMalloc((void**)&d_in,ARRAY_BYTES);
hipMalloc((void**)&d_intermediate,ARRAY_BYTES);
hipMalloc((void**)&d_out,sizeof(float));
hipMemcpy(d_in,h_in,ARRAY_BYTES,hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//Launch the reduce kernel
hipEventRecord(start,0);
reduce(d_in,d_intermediate,d_out,ARRAY_SIZE);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
float h_out;
hipMemcpy(&h_out,d_out,sizeof(float),hipMemcpyDeviceToHost);
printf("Sum of all array elements is %f\nElapsed time is %f\n",h_out,elapsedTime);
hipFree(d_in);
hipFree(d_intermediate);
hipFree(d_out);
return 0;
} | dbf515e3c9beea90dc9c0a25678b08970286b9d6.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<stdlib.h>
__global__ void reduce_kernel(float *d_out,float *d_in)
{
//Size of shared memory is set by third parameter of kernel launch
extern __shared__ float shared_array[];
int globalThreadId = threadIdx.x + blockDim.x*blockIdx.x;
int blockThreadId = threadIdx.x;
shared_array[blockThreadId] = d_in[globalThreadId];
int s;
//Sync to ensure full shared_array is loaded
__syncthreads();
//Actual reduction operation
for(s = blockDim.x/2;s>0;s/=2)
{
if(blockThreadId<s)
shared_array[blockThreadId]+=shared_array[blockThreadId+s];
__syncthreads();
}
//Output of reduction is written to first index of global memory
if(blockThreadId==0)
d_out[blockIdx.x] = shared_array[0];
}
void reduce(float *d_in,float *d_intermediate,float *d_out,int array_size)
{
int threads = 256;
int blocks = array_size/threads;
reduce_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_intermediate,d_in);
//Results of all blocks are stored in one block, which has to be reduced
threads = blocks;
blocks = 1;
reduce_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_intermediate);
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
const int ARRAY_SIZE = 1024;
const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float);
float h_in[ARRAY_SIZE];
int i;
for(i=0;i<ARRAY_SIZE;i++)
h_in[i]=i;
float *d_in,*d_out,*d_intermediate;
cudaMalloc((void**)&d_in,ARRAY_BYTES);
cudaMalloc((void**)&d_intermediate,ARRAY_BYTES);
cudaMalloc((void**)&d_out,sizeof(float));
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Launch the reduce kernel
cudaEventRecord(start,0);
reduce(d_in,d_intermediate,d_out,ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
float h_out;
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("Sum of all array elements is %f\nElapsed time is %f\n",h_out,elapsedTime);
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
} |
f11865cd93ed3f4e7fbcca1e0070a5f80c1aafa9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 1024
__global__ void saxpy(float *d_x, float *d_y){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) d_y[tid] = d_x[tid] * 2.0f + d_y[tid];
}
int main(){
float *h_y, *h_x;
float *d_y, *d_x;
int memSize = sizeof(float) * N;
h_y = (float*) malloc(memSize);
h_x = (float*) malloc(memSize);
hipMalloc((void**)&d_x, memSize);
hipMalloc((void**)&d_y, memSize);
for (int i = 0; i < N; ++i) {
h_x[i] = h_y[i] = 1.0f;
}
hipMemcpy(d_x, h_x, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, memSize, hipMemcpyHostToDevice);
dim3 block(N / 256);
dim3 thread(256);
hipLaunchKernelGGL(( saxpy), dim3(block), dim3(thread) , 0, 0, d_x, d_y);
hipMemcpy(h_x, d_x, memSize, hipMemcpyDeviceToHost);
hipMemcpy(h_y, d_y, memSize, hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i) {
printf("%f\n", h_y[i]);
}
free(h_y);
free(h_x);
hipFree(d_x);
hipFree(d_y);
} | f11865cd93ed3f4e7fbcca1e0070a5f80c1aafa9.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 1024
__global__ void saxpy(float *d_x, float *d_y){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) d_y[tid] = d_x[tid] * 2.0f + d_y[tid];
}
int main(){
float *h_y, *h_x;
float *d_y, *d_x;
int memSize = sizeof(float) * N;
h_y = (float*) malloc(memSize);
h_x = (float*) malloc(memSize);
cudaMalloc((void**)&d_x, memSize);
cudaMalloc((void**)&d_y, memSize);
for (int i = 0; i < N; ++i) {
h_x[i] = h_y[i] = 1.0f;
}
cudaMemcpy(d_x, h_x, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, memSize, cudaMemcpyHostToDevice);
dim3 block(N / 256);
dim3 thread(256);
saxpy<<< block, thread >>>(d_x, d_y);
cudaMemcpy(h_x, d_x, memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_y, d_y, memSize, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i) {
printf("%f\n", h_y[i]);
}
free(h_y);
free(h_x);
cudaFree(d_x);
cudaFree(d_y);
} |
71fe22e7e212990b09b87111fa0ef96bbcab857c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define N 4
#define T 2
__global__ void sum_matrix(int** mat1, int** ddmat1, int** mat2, int** ddmat2, int** mat3, int** ddmat3, int n, int m){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
mat3[1][x] = 0;
}
void create(int**&mat,int n){
mat = (int **)malloc(sizeof(int*)*n);
int i;
for(i=0;i<n;i++){
mat[i] = (int*)malloc(sizeof(int)*n);
}
}
void create2(int** & mat,int n, int m){
mat = (int** )malloc(sizeof(int*)*n);
mat[0] = (int* )malloc(sizeof(int)*n*m);
int i;
for(i=0;i<n;i++){
mat[i] = (*mat+i*m);
}
}
void fill(int** mat,int n){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++)
mat[i][j] = rand()%10;
}
}
void fill_zero(int** mat,int n, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
mat[i][j] = value;
}
void print(int** mat,int n){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++)
printf("%d",mat[i][j]);
printf("\n");
}
}
/*
void create_matrix(int**&mat, int**&h_mat, int**&d_mat, int n, int m){
int size_n=sizeof(int)*n;
int size_m=sizeof(int)*m;
h_mat = (int**)malloc(size_n);
int i;
for(i=0;i<n;i++){
printf(">>>>>\n");
hipMalloc((void**)& h_mat[i],size_n);
hipMemcpy(h_mat[i],&mat[i][0],size_m,hipMemcpyHostToDevice);
}
hipMalloc((void*** )& d_mat,size_n);
hipMemcpy(d_mat,h_mat,size_n,hipMemcpyHostToDevice);
}*/
int main(){
int n = N;
int m = N;
int** mat_a;
create(mat_a,n);
fill(mat_a,n);
print(mat_a,n);
printf("//////////////////\n");
int** mat_b;
create(mat_b,n);
fill(mat_b,n);
print(mat_b,n);
printf("//////////////////\n");
int** mat_c;
create(mat_c,n);
fill_zero(mat_c,n,-1);
print(mat_c,n);
printf("//////////////////\n");
int ** h_mat_a; int ** d_mat_a; int ** dd_mat_a;
int ** h_mat_b; int ** d_mat_b; int ** dd_mat_b;
int ** h_mat_c; int ** d_mat_c; int ** dd_mat_c;
int i;
///////////////////////
h_mat_a = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
hipMalloc((void** )& h_mat_a[i], sizeof(int)*m);
hipMemcpy(h_mat_a[i],&mat_a[i][0],sizeof(int)*m,hipMemcpyHostToDevice);
}
hipMalloc((void*** )& d_mat_a,sizeof(int*)*n);
hipMemcpy(d_mat_a,h_mat_a,sizeof(int)*n,hipMemcpyHostToDevice);
hipMalloc((void*** )& dd_mat_a,sizeof(int*)*n);
hipMemcpy(dd_mat_a,h_mat_a,sizeof(int)*n,hipMemcpyHostToDevice);
///
h_mat_b = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
hipMalloc((void** )& h_mat_b[i], sizeof(int)*m);
hipMemcpy(h_mat_b[i],&mat_b[i][0],sizeof(int)*m,hipMemcpyHostToDevice);
}
hipMalloc((void*** )& d_mat_b,sizeof(int*)*n);
hipMemcpy(d_mat_b,h_mat_b,sizeof(int)*n,hipMemcpyHostToDevice);
hipMalloc((void*** )& dd_mat_b,sizeof(int*)*n);
hipMemcpy(dd_mat_b,h_mat_b,sizeof(int)*n,hipMemcpyHostToDevice);
///
h_mat_c = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
hipMalloc((void** )& h_mat_c[i], sizeof(int)*m);
hipMemcpy(h_mat_c[i],&mat_c[i][0],sizeof(int)*m,hipMemcpyHostToDevice);
}
hipMalloc((void*** )& d_mat_c,sizeof(int*)*n);
hipMemcpy(d_mat_c,h_mat_c,sizeof(int)*n,hipMemcpyHostToDevice);
hipMalloc((void*** )& dd_mat_c,sizeof(int*)*n);
hipMemcpy(dd_mat_c,h_mat_c,sizeof(int)*n,hipMemcpyHostToDevice);
////////////////77
//create_matrix(mat_a,h_mat_a,d_mat_a,n,m);
//create_matrix(mat_b,h_mat_b,d_mat_b,n,m);
//create_matrix(mat_c,h_mat_c,d_mat_c,n,m);
dim3 grid(ceil(N/T),ceil(N/T),1);
dim3 blockNum(T,T,1);
//int size = sizeof(int)*n*n;
hipLaunchKernelGGL(( sum_matrix), dim3(grid),dim3(blockNum), 0, 0, d_mat_a,dd_mat_a,d_mat_b,dd_mat_b,d_mat_c,dd_mat_c,n,m);
for(i=0;i<n;i++){
hipMemcpy(&mat_c[i][0],h_mat_c[i],sizeof(int)*m,hipMemcpyDeviceToHost);
}
printf("///////CCCCCC///////////\n");
print(mat_c,n);
return 0;
} | 71fe22e7e212990b09b87111fa0ef96bbcab857c.cu | #include <stdlib.h>
#include <stdio.h>
#define N 4
#define T 2
__global__ void sum_matrix(int** mat1, int** ddmat1, int** mat2, int** ddmat2, int** mat3, int** ddmat3, int n, int m){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
mat3[1][x] = 0;
}
void create(int**&mat,int n){
mat = (int **)malloc(sizeof(int*)*n);
int i;
for(i=0;i<n;i++){
mat[i] = (int*)malloc(sizeof(int)*n);
}
}
void create2(int** & mat,int n, int m){
mat = (int** )malloc(sizeof(int*)*n);
mat[0] = (int* )malloc(sizeof(int)*n*m);
int i;
for(i=0;i<n;i++){
mat[i] = (*mat+i*m);
}
}
void fill(int** mat,int n){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++)
mat[i][j] = rand()%10;
}
}
void fill_zero(int** mat,int n, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
mat[i][j] = value;
}
void print(int** mat,int n){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++)
printf("%d",mat[i][j]);
printf("\n");
}
}
/*
void create_matrix(int**&mat, int**&h_mat, int**&d_mat, int n, int m){
int size_n=sizeof(int)*n;
int size_m=sizeof(int)*m;
h_mat = (int**)malloc(size_n);
int i;
for(i=0;i<n;i++){
printf(">>>>>\n");
cudaMalloc((void**)& h_mat[i],size_n);
cudaMemcpy(h_mat[i],&mat[i][0],size_m,cudaMemcpyHostToDevice);
}
cudaMalloc((void*** )& d_mat,size_n);
cudaMemcpy(d_mat,h_mat,size_n,cudaMemcpyHostToDevice);
}*/
int main(){
int n = N;
int m = N;
int** mat_a;
create(mat_a,n);
fill(mat_a,n);
print(mat_a,n);
printf("//////////////////\n");
int** mat_b;
create(mat_b,n);
fill(mat_b,n);
print(mat_b,n);
printf("//////////////////\n");
int** mat_c;
create(mat_c,n);
fill_zero(mat_c,n,-1);
print(mat_c,n);
printf("//////////////////\n");
int ** h_mat_a; int ** d_mat_a; int ** dd_mat_a;
int ** h_mat_b; int ** d_mat_b; int ** dd_mat_b;
int ** h_mat_c; int ** d_mat_c; int ** dd_mat_c;
int i;
///////////////////////
h_mat_a = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
cudaMalloc((void** )& h_mat_a[i], sizeof(int)*m);
cudaMemcpy(h_mat_a[i],&mat_a[i][0],sizeof(int)*m,cudaMemcpyHostToDevice);
}
cudaMalloc((void*** )& d_mat_a,sizeof(int*)*n);
cudaMemcpy(d_mat_a,h_mat_a,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMalloc((void*** )& dd_mat_a,sizeof(int*)*n);
cudaMemcpy(dd_mat_a,h_mat_a,sizeof(int)*n,cudaMemcpyHostToDevice);
///
h_mat_b = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
cudaMalloc((void** )& h_mat_b[i], sizeof(int)*m);
cudaMemcpy(h_mat_b[i],&mat_b[i][0],sizeof(int)*m,cudaMemcpyHostToDevice);
}
cudaMalloc((void*** )& d_mat_b,sizeof(int*)*n);
cudaMemcpy(d_mat_b,h_mat_b,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMalloc((void*** )& dd_mat_b,sizeof(int*)*n);
cudaMemcpy(dd_mat_b,h_mat_b,sizeof(int)*n,cudaMemcpyHostToDevice);
///
h_mat_c = (int** )malloc(sizeof(int*)*n);
for(i=0;i<n;i++){
printf(">>>>>\n");
cudaMalloc((void** )& h_mat_c[i], sizeof(int)*m);
cudaMemcpy(h_mat_c[i],&mat_c[i][0],sizeof(int)*m,cudaMemcpyHostToDevice);
}
cudaMalloc((void*** )& d_mat_c,sizeof(int*)*n);
cudaMemcpy(d_mat_c,h_mat_c,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMalloc((void*** )& dd_mat_c,sizeof(int*)*n);
cudaMemcpy(dd_mat_c,h_mat_c,sizeof(int)*n,cudaMemcpyHostToDevice);
////////////////77
//create_matrix(mat_a,h_mat_a,d_mat_a,n,m);
//create_matrix(mat_b,h_mat_b,d_mat_b,n,m);
//create_matrix(mat_c,h_mat_c,d_mat_c,n,m);
dim3 grid(ceil(N/T),ceil(N/T),1);
dim3 blockNum(T,T,1);
//int size = sizeof(int)*n*n;
sum_matrix<<<grid,blockNum>>>(d_mat_a,dd_mat_a,d_mat_b,dd_mat_b,d_mat_c,dd_mat_c,n,m);
for(i=0;i<n;i++){
cudaMemcpy(&mat_c[i][0],h_mat_c[i],sizeof(int)*m,cudaMemcpyDeviceToHost);
}
printf("///////CCCCCC///////////\n");
print(mat_c,n);
return 0;
} |
7506f5a1035f360ed3c8026dd31de5a2533b2afa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <ctime>
#define N 50000
const int threads_per_block = 256;
__global__
void dot_gpu(float *a, float *b, float *c) {
__shared__
float cache[threads_per_block];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
}
int main(int argc, char *argv[]) {
std::clock_t start_t;
double duration;
start_t = std::clock();
int a[N];
int b[N];
int c[N];
int *dev_a;
int *dev_b;
int *dev_c;
// gpu timer
hipEvent_t start;
hipEventCreate(&start);
hipEventRecord(start, 0);
// allocate memory on GPU
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = -1;
b[i] = i * i;
}
// copy 2 arrays to device memory
hipMemcpy(dev_a, a, N * sizeof(N), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(N), hipMemcpyHostToDevice);
// copy from device to host
hipMemcpy(c, dev_c, N * sizeof(N), hipMemcpyDeviceToHost);
// for (int i = 0; i < N; i++) {
// std::cout << a[i] << " + " << b[i] << " = " << c[i] << "\n";
// }
// add_cpu(a, b, c);
hipEvent_t stop;
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float dev_time;
hipEventElapsedTime(&dev_time, start, stop);
std::cout << "Time: " << dev_time << "\n";
std::cout << "DONE" << "\n";
duration = ( std::clock() - start_t ) / (double) CLOCKS_PER_SEC;
std::cout<<"printf: "<< duration <<'\n';
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 7506f5a1035f360ed3c8026dd31de5a2533b2afa.cu |
#include <iostream>
#include <ctime>
#define N 50000
const int threads_per_block = 256;
__global__
void dot_gpu(float *a, float *b, float *c) {
__shared__
float cache[threads_per_block];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
}
int main(int argc, char *argv[]) {
std::clock_t start_t;
double duration;
start_t = std::clock();
int a[N];
int b[N];
int c[N];
int *dev_a;
int *dev_b;
int *dev_c;
// gpu timer
cudaEvent_t start;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
// allocate memory on GPU
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = -1;
b[i] = i * i;
}
// copy 2 arrays to device memory
cudaMemcpy(dev_a, a, N * sizeof(N), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(N), cudaMemcpyHostToDevice);
// copy from device to host
cudaMemcpy(c, dev_c, N * sizeof(N), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i++) {
// std::cout << a[i] << " + " << b[i] << " = " << c[i] << "\n";
// }
// add_cpu(a, b, c);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float dev_time;
cudaEventElapsedTime(&dev_time, start, stop);
std::cout << "Time: " << dev_time << "\n";
std::cout << "DONE" << "\n";
duration = ( std::clock() - start_t ) / (double) CLOCKS_PER_SEC;
std::cout<<"printf: "<< duration <<'\n';
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
90e6589657e6827f0c59d165e91cac05698b6e29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<conio.h>
__device__ long long int mod(int base,int exponent,int den)
{
unsigned int a=(base%den)*(base%den);
unsigned long long int ret=1;
float size=(float)exponent/2;
if(exponent==0)
{
return base%den;
}
else
{
while(1)
{
if(size>0.5)
{
ret=(ret*a)%den;
size=size-1.0;
}
else if(size==0.5)
{
ret=(ret*(base%den))%den;
break;
}
else
{
break;
}
}
return ret;
}
}
__global__ void rsa(int * num,int *key,int *den,unsigned int * result)
{
int i=threadIdx.x;
int temp;
if(i<3)
{
temp=mod(num[i],*key,*den);
atomicExch(&result[i],temp);
}
}
void loadDefaultImage(char *loc_exec)
{
printf("Reading image: lena.pgm\n");
const char *image_filename = "lena.pgm";
char *image_path = sdkFindFilePath(image_filename, loc_exec);
if (image_path == NULL)
{
printf("Failed to read image file: <%s>\n", image_filename);
exit(EXIT_FAILURE);
}
initializeData(image_path);
free(image_path);
}
int main()
{
int num[3]={16,5,4};
int key=5;
int den=35;
int devcount;
hipGetDeviceCount(&devcount);
printf("%d CUDA devices found",devcount);
if(devcount>0)
{
hipSetDevice(1);
printf("\nEnter the 8 digit word:");
for(int i=0;i<3;i++)
{
printf("\n.");
scanf("%d",&num[i]);
}
printf("\nEnter key parameter 1:");
scanf("%d",&key);
printf("\nEnter key parameter 2:");
scanf("%d",&den);
int *dev_num,*dev_key,*dev_den;
unsigned int *dev_res;
unsigned int res[3]={1,1,1};
dim3 grid(1,1,1);
dim3 block(3,3,1);
hipMalloc( (void **)&dev_num, 3*sizeof(int));
hipMalloc( (void **)&dev_key,sizeof(int));
hipMalloc( (void **)&dev_den, sizeof(int));
hipMalloc( (void **)&dev_res, 3*sizeof(unsigned int));
switch (key)
{
case 27:
case 'q':
case 'Q':
printf("Shutting down...\n");
exit(EXIT_SUCCESS);
break;
case '-':
imageScale -= 0.1f;
printf("brightness = %4.2f\n", imageScale);
break;
case '=':
imageScale += 0.1f;
printf("brightness = %4.2f\n", imageScale);
break;
case 'i':
case 'I':
g_SobelDisplayMode = SOBELDISPLAY_IMAGE;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
case 's':
case 'S':
g_SobelDisplayMode = SOBELDISPLAY_SOBELSHARED;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
case 't':
case 'T':
g_SobelDisplayMode = SOBELDISPLAY_SOBELTEX;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
default:
break;
}
}
hipMemcpy(dev_num,num,3*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_key,&key,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_den,&den,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_res,res,3*sizeof(unsigned int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rsa), dim3(grid),dim3(block), 0, 0, dev_num,dev_key,dev_den,dev_res);
hipMemcpy(res,dev_res,3*sizeof(unsigned int),hipMemcpyDeviceToHost);
hipFree(dev_num);
hipFree(dev_key);
hipFree(dev_den);
hipFree(dev_res);
for(int i=0;i<3;i++)
{
printf("\n%d",res[i]);
}
}
getch();
return 0;
}
| 90e6589657e6827f0c59d165e91cac05698b6e29.cu | #include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<conio.h>
__device__ long long int mod(int base,int exponent,int den)
{
unsigned int a=(base%den)*(base%den);
unsigned long long int ret=1;
float size=(float)exponent/2;
if(exponent==0)
{
return base%den;
}
else
{
while(1)
{
if(size>0.5)
{
ret=(ret*a)%den;
size=size-1.0;
}
else if(size==0.5)
{
ret=(ret*(base%den))%den;
break;
}
else
{
break;
}
}
return ret;
}
}
__global__ void rsa(int * num,int *key,int *den,unsigned int * result)
{
int i=threadIdx.x;
int temp;
if(i<3)
{
temp=mod(num[i],*key,*den);
atomicExch(&result[i],temp);
}
}
void loadDefaultImage(char *loc_exec)
{
printf("Reading image: lena.pgm\n");
const char *image_filename = "lena.pgm";
char *image_path = sdkFindFilePath(image_filename, loc_exec);
if (image_path == NULL)
{
printf("Failed to read image file: <%s>\n", image_filename);
exit(EXIT_FAILURE);
}
initializeData(image_path);
free(image_path);
}
int main()
{
int num[3]={16,5,4};
int key=5;
int den=35;
int devcount;
cudaGetDeviceCount(&devcount);
printf("%d CUDA devices found",devcount);
if(devcount>0)
{
cudaSetDevice(1);
printf("\nEnter the 8 digit word:");
for(int i=0;i<3;i++)
{
printf("\n.");
scanf("%d",&num[i]);
}
printf("\nEnter key parameter 1:");
scanf("%d",&key);
printf("\nEnter key parameter 2:");
scanf("%d",&den);
int *dev_num,*dev_key,*dev_den;
unsigned int *dev_res;
unsigned int res[3]={1,1,1};
dim3 grid(1,1,1);
dim3 block(3,3,1);
cudaMalloc( (void **)&dev_num, 3*sizeof(int));
cudaMalloc( (void **)&dev_key,sizeof(int));
cudaMalloc( (void **)&dev_den, sizeof(int));
cudaMalloc( (void **)&dev_res, 3*sizeof(unsigned int));
switch (key)
{
case 27:
case 'q':
case 'Q':
printf("Shutting down...\n");
exit(EXIT_SUCCESS);
break;
case '-':
imageScale -= 0.1f;
printf("brightness = %4.2f\n", imageScale);
break;
case '=':
imageScale += 0.1f;
printf("brightness = %4.2f\n", imageScale);
break;
case 'i':
case 'I':
g_SobelDisplayMode = SOBELDISPLAY_IMAGE;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
case 's':
case 'S':
g_SobelDisplayMode = SOBELDISPLAY_SOBELSHARED;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
case 't':
case 'T':
g_SobelDisplayMode = SOBELDISPLAY_SOBELTEX;
sprintf(temp, "CUDA Edge Detection (%s)", filterMode[g_SobelDisplayMode]);
glutSetWindowTitle(temp);
break;
default:
break;
}
}
cudaMemcpy(dev_num,num,3*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_key,&key,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_den,&den,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_res,res,3*sizeof(unsigned int),cudaMemcpyHostToDevice);
rsa<<<grid,block>>>(dev_num,dev_key,dev_den,dev_res);
cudaMemcpy(res,dev_res,3*sizeof(unsigned int),cudaMemcpyDeviceToHost);
cudaFree(dev_num);
cudaFree(dev_key);
cudaFree(dev_den);
cudaFree(dev_res);
for(int i=0;i<3;i++)
{
printf("\n%d",res[i]);
}
}
getch();
return 0;
}
|
0eb555ea8b554f69497c0d2ad15f64a0e3a89da7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define BLOCKSIZE 32
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int globalx = blockIdx.x * blockDim.x + threadIdx.x;
int globaly = blockIdx.y * blockDim.y + threadIdx.y;
int localx = threadIdx.x;
int localy = threadIdx.y;
int dy, dx;
__shared__ unsigned char local_mem[(BLOCKSIZE+2*maxKernelSizeX)*3][BLOCKSIZE+2*maxKernelSizeY];
int yy = min(max(globaly, 0), imagesizey-1);
int xx = min(max(globalx, 0), imagesizex-1);
// copy core section
local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+0];
local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+1];
local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+2];
// handle up and down edge
yy = min(max(globaly, 0), imagesizey-1);
if (localx < kernelsizex)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
local_mem[(localx)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 -kernelsizex)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
local_mem[(localx+2*kernelsizex)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
//handle left and right edge
xx = min(max(globalx, 0), imagesizex-1);
if (localy < kernelsizey)
{
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(kernelsizex+localx)*3+0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localy > BLOCKSIZE-1 -kernelsizey)
{
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > 0)
local_mem[(kernelsizex+localx)*3+0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
//handle corner
if (localx < kernelsizex && localy < kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[localx*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[localx*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[localx*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx+2*kernelsizex)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy < kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(localx+2*kernelsizex)*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx < kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
__syncthreads();
int accumulatex[255] = {0}, countx=0, resultx, finishx=0;
int accumulatey[255] = {0}, county=0, resulty, finishy=0;
int accumulatez[255] = {0}, countz=0, resultz, finishz=0;
int i;
int total = (2*kernelsizex + 1) * (2*kernelsizey + 1);
int median = ceil(float(total)/2);
if (globalx < imagesizex && globaly < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
{
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
yy = min(max(localy+kernelsizey +dy, 0), BLOCKSIZE+2*kernelsizey-1);
xx = min(max(localx+kernelsizex +dx, 0), BLOCKSIZE+2*kernelsizex-1);
accumulatex[local_mem[(xx)*3+0][yy]]++;
accumulatey[local_mem[(xx)*3+1][yy]]++;
accumulatez[local_mem[(xx)*3+2][yy]]++;
}
}
for(i = 0; i < 256; i++){
countx += accumulatex[i];
county += accumulatey[i];
countz += accumulatez[i];
if (countx > median && !finishx){
resultx = i;
finishx = 1;
}
if (county > median && !finishy){
resulty = i;
finishy = 1;
}
if (countz > median && !finishz){
resultz = i;
finishz = 1;
}
}
out[((globaly)*imagesizex+(globalx))*3+0] = resultx;
out[((globaly)*imagesizex+(globalx))*3+1] = resulty;
out[((globaly)*imagesizex+(globalx))*3+2] = resultz;
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
double t;
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice );
hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE)),ceil(float(imagesizey)/(BLOCKSIZE)));
dim3 block(BLOCKSIZE,BLOCKSIZE);
ResetMilli();
hipLaunchKernelGGL(( filter), dim3(grid),dim3(block), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32
hipDeviceSynchronize();
t = GetSeconds();
printf("COST %lf seconds\n", t);
// Check for errors!
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
hipFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(4, 4);
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
| 0eb555ea8b554f69497c0d2ad15f64a0e3a89da7.cu | // Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define BLOCKSIZE 32
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int globalx = blockIdx.x * blockDim.x + threadIdx.x;
int globaly = blockIdx.y * blockDim.y + threadIdx.y;
int localx = threadIdx.x;
int localy = threadIdx.y;
int dy, dx;
__shared__ unsigned char local_mem[(BLOCKSIZE+2*maxKernelSizeX)*3][BLOCKSIZE+2*maxKernelSizeY];
int yy = min(max(globaly, 0), imagesizey-1);
int xx = min(max(globalx, 0), imagesizex-1);
// copy core section
local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+0];
local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+1];
local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+2];
// handle up and down edge
yy = min(max(globaly, 0), imagesizey-1);
if (localx < kernelsizex)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
local_mem[(localx)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 -kernelsizex)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
local_mem[(localx+2*kernelsizex)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
//handle left and right edge
xx = min(max(globalx, 0), imagesizex-1);
if (localy < kernelsizey)
{
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(kernelsizex+localx)*3+0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localy > BLOCKSIZE-1 -kernelsizey)
{
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > 0)
local_mem[(kernelsizex+localx)*3+0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
//handle corner
if (localx < kernelsizex && localy < kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[localx*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[localx*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[localx*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx+2*kernelsizex)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy < kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(localx+2*kernelsizex)*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx < kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
__syncthreads();
int accumulatex[255] = {0}, countx=0, resultx, finishx=0;
int accumulatey[255] = {0}, county=0, resulty, finishy=0;
int accumulatez[255] = {0}, countz=0, resultz, finishz=0;
int i;
int total = (2*kernelsizex + 1) * (2*kernelsizey + 1);
int median = ceil(float(total)/2);
if (globalx < imagesizex && globaly < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
{
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
yy = min(max(localy+kernelsizey +dy, 0), BLOCKSIZE+2*kernelsizey-1);
xx = min(max(localx+kernelsizex +dx, 0), BLOCKSIZE+2*kernelsizex-1);
accumulatex[local_mem[(xx)*3+0][yy]]++;
accumulatey[local_mem[(xx)*3+1][yy]]++;
accumulatez[local_mem[(xx)*3+2][yy]]++;
}
}
for(i = 0; i < 256; i++){
countx += accumulatex[i];
county += accumulatey[i];
countz += accumulatez[i];
if (countx > median && !finishx){
resultx = i;
finishx = 1;
}
if (county > median && !finishy){
resulty = i;
finishy = 1;
}
if (countz > median && !finishz){
resultz = i;
finishz = 1;
}
}
out[((globaly)*imagesizex+(globalx))*3+0] = resultx;
out[((globaly)*imagesizex+(globalx))*3+1] = resulty;
out[((globaly)*imagesizex+(globalx))*3+2] = resultz;
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
double t;
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE)),ceil(float(imagesizey)/(BLOCKSIZE)));
dim3 block(BLOCKSIZE,BLOCKSIZE);
ResetMilli();
filter<<<grid,block>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32
cudaThreadSynchronize();
t = GetSeconds();
printf("COST %lf seconds\n", t);
// Check for errors!
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
cudaFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(4, 4);
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
|
04bee4b67635eca8acc78878a83a34911517f086.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************************
Copyright (C) 2020 Hironori Fujimoto
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
***********************************************************************/
#include "sgm_gpu/sgm_gpu.h"
#include "sgm_gpu/cost_aggregation.h"
#include "sgm_gpu/costs.h"
#include "sgm_gpu/hamming_cost.h"
#include "sgm_gpu/left_right_consistency.h"
#include "sgm_gpu/median_filter.h"
#include <cv_bridge/cv_bridge.h>
//#include <image_geometry/stereo_camera_model.h>
namespace sgm_gpu {
// Variables which have CUDA-related type are put here
// in order to include sgm_gpu.h from non-CUDA package
hipStream_t stream1_;
hipStream_t stream2_;
hipStream_t stream3_;
dim3 BLOCK_SIZE_;
dim3 grid_size_;
SgmGpu::SgmGpu(const ros::NodeHandle &parent_node_handle, const int cols,
const int rows)
: memory_allocated_(false), cols_(cols), rows_(rows) {
private_node_handle_.reset(
new ros::NodeHandle(parent_node_handle, "libsgm_gpu"));
// Get parameters used in SGM algorithm
p1_ = 6; // static_cast<uint8_t>(private_node_handle_->param("p1", 6));
p2_ = 96; // static_cast<uint8_t>(private_node_handle_->param("p2", 96));
check_consistency_ =
true; // private_node_handle_->param("check_consistency", true);
// Create streams
hipStreamCreate(&stream1_);
hipStreamCreate(&stream2_);
hipStreamCreate(&stream3_);
}
SgmGpu::~SgmGpu() {
freeMemory();
hipStreamDestroy(stream1_);
hipStreamDestroy(stream2_);
hipStreamDestroy(stream3_);
}
void SgmGpu::allocateMemory(uint32_t cols, uint32_t rows) {
freeMemory();
cols_ = cols;
rows_ = rows;
int total_pixel = cols_ * rows_;
hipMalloc((void **)&d_im0_, sizeof(uint8_t) * total_pixel);
hipMalloc((void **)&d_im1_, sizeof(uint8_t) * total_pixel);
hipMalloc((void **)&d_transform0_, sizeof(cost_t) * total_pixel);
hipMalloc((void **)&d_transform1_, sizeof(cost_t) * total_pixel);
int cost_volume_size = total_pixel * MAX_DISPARITY;
hipMalloc((void **)&d_cost_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L0_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L1_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L2_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L3_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L4_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L5_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L6_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_L7_, sizeof(uint8_t) * cost_volume_size);
hipMalloc((void **)&d_s_, sizeof(uint16_t) * cost_volume_size);
hipMalloc((void **)&d_disparity_, sizeof(uint8_t) * total_pixel);
hipMalloc((void **)&d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel);
hipMalloc((void **)&d_disparity_right_, sizeof(uint8_t) * total_pixel);
hipMalloc((void **)&d_disparity_right_filtered_uchar_,
sizeof(uint8_t) * total_pixel);
memory_allocated_ = true;
}
void SgmGpu::freeMemory() {
if (!memory_allocated_) return;
hipFree(d_im0_);
hipFree(d_im1_);
hipFree(d_transform0_);
hipFree(d_transform1_);
hipFree(d_L0_);
hipFree(d_L1_);
hipFree(d_L2_);
hipFree(d_L3_);
hipFree(d_L4_);
hipFree(d_L5_);
hipFree(d_L6_);
hipFree(d_L7_);
hipFree(d_disparity_);
hipFree(d_disparity_filtered_uchar_);
hipFree(d_disparity_right_);
hipFree(d_disparity_right_filtered_uchar_);
hipFree(d_cost_);
hipFree(d_s_);
memory_allocated_ = false;
}
bool SgmGpu::computeDisparity(const sensor_msgs::Image &left_image,
const sensor_msgs::Image &right_image,
const sensor_msgs::CameraInfo &left_camera_info,
const sensor_msgs::CameraInfo &right_camera_info,
stereo_msgs::DisparityImage &disparity_msg) {
if (left_image.width != right_image.width ||
left_image.height != right_image.height) {
ROS_ERROR_STREAM_NAMED(
"libsgm_gpu",
"Image dimension of left and right are not same: \n"
<< "Left: " << left_image.width << "x" << left_image.height << "\n"
<< "Right: " << right_image.width << "x" << right_image.height);
return false;
}
if (left_image.encoding != right_image.encoding) {
ROS_ERROR_STREAM_NAMED("libsgm_gpu",
"Image encoding of left and right are not same: \n"
<< "Left: " << left_image.encoding << "\n"
<< "Right: " << right_image.encoding);
return false;
}
// Convert to 8 bit grayscale image
cv_bridge::CvImagePtr left_mono8 =
cv_bridge::toCvCopy(left_image, sensor_msgs::image_encodings::MONO8);
cv_bridge::CvImagePtr right_mono8 =
cv_bridge::toCvCopy(right_image, sensor_msgs::image_encodings::MONO8);
// Resize images to their width and height divisible by 4 for limit of CUDA
// code
resizeToDivisibleBy4(left_mono8->image, right_mono8->image);
// Reallocate memory if needed
bool size_changed =
(cols_ != left_mono8->image.cols || rows_ != left_mono8->image.rows);
if (!memory_allocated_ || size_changed)
allocateMemory(left_mono8->image.cols, left_mono8->image.rows);
// Copy image to GPU device
size_t mono8_image_size = left_mono8->image.total() * sizeof(uint8_t);
hipMemcpyAsync(d_im0_, left_mono8->image.ptr<uint8_t>(), mono8_image_size,
hipMemcpyHostToDevice, stream1_);
hipMemcpyAsync(d_im1_, right_mono8->image.ptr<uint8_t>(), mono8_image_size,
hipMemcpyHostToDevice, stream1_);
BLOCK_SIZE_.x = 32;
BLOCK_SIZE_.y = 32;
grid_size_.x = (cols_ + BLOCK_SIZE_.x - 1) / BLOCK_SIZE_.x;
grid_size_.y = (rows_ + BLOCK_SIZE_.y - 1) / BLOCK_SIZE_.y;
hipLaunchKernelGGL(( CenterSymmetricCensusKernelSM2), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_im0_, d_im1_, d_transform0_, d_transform1_, rows_, cols_);
hipStreamSynchronize(stream1_);
hipLaunchKernelGGL(( HammingDistanceCostKernel), dim3(rows_), dim3(MAX_DISPARITY), 0, stream1_,
d_transform0_, d_transform1_, d_cost_, rows_, cols_);
const int PIXELS_PER_BLOCK = COSTAGG_BLOCKSIZE / WARP_SIZE;
const int PIXELS_PER_BLOCK_HORIZ = COSTAGG_BLOCKSIZE_HORIZ / WARP_SIZE;
// Cost Aggregation
hipLaunchKernelGGL(( CostAggregationKernelLeftToRight), (rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
dim3(COSTAGG_BLOCKSIZE_HORIZ), 0, stream2_,
d_cost_, d_L0_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelRightToLeft), (rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
dim3(COSTAGG_BLOCKSIZE_HORIZ), 0, stream3_,
d_cost_, d_L1_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelUpToDown), (cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
dim3(COSTAGG_BLOCKSIZE), 0, stream1_,
d_cost_, d_L2_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDownToUp), (cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
dim3(COSTAGG_BLOCKSIZE), 0, stream1_,
d_cost_, d_L3_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalDownUpLeftRight),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L4_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalUpDownLeftRight),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L5_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalDownUpRightLeft),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L6_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalUpDownRightLeft),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L7_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
int total_pixel = rows_ * cols_;
hipLaunchKernelGGL(( MedianFilter3x3), dim3((total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY),
dim3(MAX_DISPARITY), 0, stream1_,
d_disparity_, d_disparity_filtered_uchar_, rows_, cols_);
if (check_consistency_) {
hipLaunchKernelGGL(( ChooseRightDisparity), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_disparity_right_, d_s_, rows_, cols_);
hipLaunchKernelGGL(( MedianFilter3x3), dim3((total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY),
dim3(MAX_DISPARITY), 0, stream1_,
d_disparity_right_, d_disparity_right_filtered_uchar_, rows_, cols_);
hipLaunchKernelGGL(( LeftRightConsistencyCheck), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_disparity_filtered_uchar_, d_disparity_right_filtered_uchar_, rows_,
cols_);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
ROS_ERROR_NAMED("libsgm_gpu", "%s %d\n", hipGetErrorString(err), err);
return false;
}
hipDeviceSynchronize();
cv::Mat disparity(rows_, cols_, CV_8UC1);
hipMemcpy(disparity.data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, hipMemcpyDeviceToHost);
// Restore image size if resized to be divisible by 4
if (cols_ != left_image.width || rows_ != left_image.height) {
cv::Size input_size(left_image.width, left_image.height);
cv::resize(disparity, disparity, input_size, 0, 0, cv::INTER_AREA);
}
// convertToMsg(disparity, left_camera_info, right_camera_info,
// disparity_msg);
return true;
}
bool SgmGpu::computeDisparity(const cv::Mat &left_image,
const cv::Mat &right_image,
cv::Mat *disparity_out) {
// Convert images to grayscale
cv::Mat left_mono8, right_mono8;
if (left_image.channels() > 1) {
cv::cvtColor(left_image, left_mono8, CV_RGB2GRAY);
}
if (right_image.channels() > 1) {
cv::cvtColor(right_image, right_mono8, CV_RGB2GRAY);
}
// Resize images to their width and height divisible by 4 for limit of CUDA
// code
resizeToDivisibleBy4(left_mono8, right_mono8);
// Reallocate memory if needed
bool size_changed = (cols_ != left_mono8.cols || rows_ != left_mono8.rows);
if (!memory_allocated_ || size_changed)
allocateMemory(left_mono8.cols, left_mono8.rows);
// Copy image to GPU device
size_t mono8_image_size = left_mono8.total() * sizeof(uint8_t);
hipMemcpyAsync(d_im0_, left_mono8.ptr<uint8_t>(), mono8_image_size,
hipMemcpyHostToDevice, stream1_);
hipMemcpyAsync(d_im1_, right_mono8.ptr<uint8_t>(), mono8_image_size,
hipMemcpyHostToDevice, stream1_);
BLOCK_SIZE_.x = 32;
BLOCK_SIZE_.y = 32;
grid_size_.x = (cols_ + BLOCK_SIZE_.x - 1) / BLOCK_SIZE_.x;
grid_size_.y = (rows_ + BLOCK_SIZE_.y - 1) / BLOCK_SIZE_.y;
hipLaunchKernelGGL(( CenterSymmetricCensusKernelSM2), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_im0_, d_im1_, d_transform0_, d_transform1_, rows_, cols_);
hipStreamSynchronize(stream1_);
hipLaunchKernelGGL(( HammingDistanceCostKernel), dim3(rows_), dim3(MAX_DISPARITY), 0, stream1_,
d_transform0_, d_transform1_, d_cost_, rows_, cols_);
const int PIXELS_PER_BLOCK = COSTAGG_BLOCKSIZE / WARP_SIZE;
const int PIXELS_PER_BLOCK_HORIZ = COSTAGG_BLOCKSIZE_HORIZ / WARP_SIZE;
// Cost Aggregation
hipLaunchKernelGGL(( CostAggregationKernelLeftToRight), (rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
dim3(COSTAGG_BLOCKSIZE_HORIZ), 0, stream2_,
d_cost_, d_L0_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelRightToLeft), (rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
dim3(COSTAGG_BLOCKSIZE_HORIZ), 0, stream3_,
d_cost_, d_L1_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelUpToDown), (cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
dim3(COSTAGG_BLOCKSIZE), 0, stream1_,
d_cost_, d_L2_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDownToUp), (cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
dim3(COSTAGG_BLOCKSIZE), 0, stream1_,
d_cost_, d_L3_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalDownUpLeftRight),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L4_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalUpDownLeftRight),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L5_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalDownUpRightLeft),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L6_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
hipLaunchKernelGGL(( CostAggregationKernelDiagonalUpDownRightLeft),
dim3((cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK), dim3(COSTAGG_BLOCKSIZE), 0,
stream1_, d_cost_, d_L7_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
int total_pixel = rows_ * cols_;
hipLaunchKernelGGL(( MedianFilter3x3), dim3((total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY),
dim3(MAX_DISPARITY), 0, stream1_,
d_disparity_, d_disparity_filtered_uchar_, rows_, cols_);
if (check_consistency_) {
hipLaunchKernelGGL(( ChooseRightDisparity), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_disparity_right_, d_s_, rows_, cols_);
hipLaunchKernelGGL(( MedianFilter3x3), dim3((total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY),
dim3(MAX_DISPARITY), 0, stream1_,
d_disparity_right_, d_disparity_right_filtered_uchar_, rows_, cols_);
hipLaunchKernelGGL(( LeftRightConsistencyCheck), dim3(grid_size_), dim3(BLOCK_SIZE_), 0, stream1_,
d_disparity_filtered_uchar_, d_disparity_right_filtered_uchar_, rows_,
cols_);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
ROS_ERROR_NAMED("libsgm_gpu", "%s %d\n", hipGetErrorString(err), err);
return false;
}
hipDeviceSynchronize();
cv::Mat disparity(rows_, cols_, CV_8UC1);
hipMemcpy(disparity.data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, hipMemcpyDeviceToHost);
hipMemcpy(disparity_out->data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, hipMemcpyDeviceToHost);
// Restore image size if resized to be divisible by 4
if (cols_ != left_image.cols || rows_ != left_image.rows) {
cv::Size input_size(left_image.cols, left_image.rows);
cv::resize(disparity, disparity, input_size, 0, 0, cv::INTER_AREA);
}
// convertToMsg(disparity, left_camera_info, right_camera_info,
// disparity_msg);
return true;
}
void SgmGpu::resizeToDivisibleBy4(cv::Mat &left_image, cv::Mat &right_image) {
bool need_resize = false;
cv::Size original_size, resized_size;
original_size = cv::Size(left_image.cols, left_image.rows);
resized_size = original_size;
if (original_size.width % 4 != 0) {
need_resize = true;
resized_size.width = (original_size.width / 4 + 1) * 4;
}
if (original_size.height % 4 != 0) {
need_resize = true;
resized_size.height = (original_size.height / 4 + 1) * 4;
}
if (need_resize) {
cv::resize(left_image, left_image, resized_size, 0, 0, cv::INTER_LINEAR);
cv::resize(right_image, right_image, resized_size, 0, 0, cv::INTER_LINEAR);
}
}
// void SgmGpu::convertToMsg(const cv::Mat_<unsigned char> &disparity,
// const sensor_msgs::CameraInfo &left_camera_info,
// const sensor_msgs::CameraInfo &right_camera_info,
// stereo_msgs::DisparityImage &disparity_msg) {
// cv::Mat disparity_32f;
// disparity.convertTo(disparity_32f, CV_32F);
// cv_bridge::CvImage disparity_converter(
// left_camera_info.header, sensor_msgs::image_encodings::TYPE_32FC1,
// disparity_32f);
// disparity_converter.toImageMsg(disparity_msg.image);
//
// disparity_msg.header = left_camera_info.header;
//
// image_geometry::StereoCameraModel stereo_model;
// stereo_model.fromCameraInfo(left_camera_info, right_camera_info);
// disparity_msg.f = stereo_model.left().fx();
// disparity_msg.T = stereo_model.baseline();
//
// disparity_msg.min_disparity = 0.0;
// disparity_msg.max_disparity = MAX_DISPARITY;
// disparity_msg.delta_d = 1.0;
//}
} // namespace sgm_gpu
| 04bee4b67635eca8acc78878a83a34911517f086.cu | /***********************************************************************
Copyright (C) 2020 Hironori Fujimoto
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
***********************************************************************/
#include "sgm_gpu/sgm_gpu.h"
#include "sgm_gpu/cost_aggregation.h"
#include "sgm_gpu/costs.h"
#include "sgm_gpu/hamming_cost.h"
#include "sgm_gpu/left_right_consistency.h"
#include "sgm_gpu/median_filter.h"
#include <cv_bridge/cv_bridge.h>
//#include <image_geometry/stereo_camera_model.h>
namespace sgm_gpu {
// Variables which have CUDA-related type are put here
// in order to include sgm_gpu.h from non-CUDA package
cudaStream_t stream1_;
cudaStream_t stream2_;
cudaStream_t stream3_;
dim3 BLOCK_SIZE_;
dim3 grid_size_;
SgmGpu::SgmGpu(const ros::NodeHandle &parent_node_handle, const int cols,
const int rows)
: memory_allocated_(false), cols_(cols), rows_(rows) {
private_node_handle_.reset(
new ros::NodeHandle(parent_node_handle, "libsgm_gpu"));
// Get parameters used in SGM algorithm
p1_ = 6; // static_cast<uint8_t>(private_node_handle_->param("p1", 6));
p2_ = 96; // static_cast<uint8_t>(private_node_handle_->param("p2", 96));
check_consistency_ =
true; // private_node_handle_->param("check_consistency", true);
// Create streams
cudaStreamCreate(&stream1_);
cudaStreamCreate(&stream2_);
cudaStreamCreate(&stream3_);
}
SgmGpu::~SgmGpu() {
freeMemory();
cudaStreamDestroy(stream1_);
cudaStreamDestroy(stream2_);
cudaStreamDestroy(stream3_);
}
void SgmGpu::allocateMemory(uint32_t cols, uint32_t rows) {
freeMemory();
cols_ = cols;
rows_ = rows;
int total_pixel = cols_ * rows_;
cudaMalloc((void **)&d_im0_, sizeof(uint8_t) * total_pixel);
cudaMalloc((void **)&d_im1_, sizeof(uint8_t) * total_pixel);
cudaMalloc((void **)&d_transform0_, sizeof(cost_t) * total_pixel);
cudaMalloc((void **)&d_transform1_, sizeof(cost_t) * total_pixel);
int cost_volume_size = total_pixel * MAX_DISPARITY;
cudaMalloc((void **)&d_cost_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L0_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L1_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L2_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L3_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L4_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L5_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L6_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_L7_, sizeof(uint8_t) * cost_volume_size);
cudaMalloc((void **)&d_s_, sizeof(uint16_t) * cost_volume_size);
cudaMalloc((void **)&d_disparity_, sizeof(uint8_t) * total_pixel);
cudaMalloc((void **)&d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel);
cudaMalloc((void **)&d_disparity_right_, sizeof(uint8_t) * total_pixel);
cudaMalloc((void **)&d_disparity_right_filtered_uchar_,
sizeof(uint8_t) * total_pixel);
memory_allocated_ = true;
}
void SgmGpu::freeMemory() {
if (!memory_allocated_) return;
cudaFree(d_im0_);
cudaFree(d_im1_);
cudaFree(d_transform0_);
cudaFree(d_transform1_);
cudaFree(d_L0_);
cudaFree(d_L1_);
cudaFree(d_L2_);
cudaFree(d_L3_);
cudaFree(d_L4_);
cudaFree(d_L5_);
cudaFree(d_L6_);
cudaFree(d_L7_);
cudaFree(d_disparity_);
cudaFree(d_disparity_filtered_uchar_);
cudaFree(d_disparity_right_);
cudaFree(d_disparity_right_filtered_uchar_);
cudaFree(d_cost_);
cudaFree(d_s_);
memory_allocated_ = false;
}
bool SgmGpu::computeDisparity(const sensor_msgs::Image &left_image,
const sensor_msgs::Image &right_image,
const sensor_msgs::CameraInfo &left_camera_info,
const sensor_msgs::CameraInfo &right_camera_info,
stereo_msgs::DisparityImage &disparity_msg) {
if (left_image.width != right_image.width ||
left_image.height != right_image.height) {
ROS_ERROR_STREAM_NAMED(
"libsgm_gpu",
"Image dimension of left and right are not same: \n"
<< "Left: " << left_image.width << "x" << left_image.height << "\n"
<< "Right: " << right_image.width << "x" << right_image.height);
return false;
}
if (left_image.encoding != right_image.encoding) {
ROS_ERROR_STREAM_NAMED("libsgm_gpu",
"Image encoding of left and right are not same: \n"
<< "Left: " << left_image.encoding << "\n"
<< "Right: " << right_image.encoding);
return false;
}
// Convert to 8 bit grayscale image
cv_bridge::CvImagePtr left_mono8 =
cv_bridge::toCvCopy(left_image, sensor_msgs::image_encodings::MONO8);
cv_bridge::CvImagePtr right_mono8 =
cv_bridge::toCvCopy(right_image, sensor_msgs::image_encodings::MONO8);
// Resize images to their width and height divisible by 4 for limit of CUDA
// code
resizeToDivisibleBy4(left_mono8->image, right_mono8->image);
// Reallocate memory if needed
bool size_changed =
(cols_ != left_mono8->image.cols || rows_ != left_mono8->image.rows);
if (!memory_allocated_ || size_changed)
allocateMemory(left_mono8->image.cols, left_mono8->image.rows);
// Copy image to GPU device
size_t mono8_image_size = left_mono8->image.total() * sizeof(uint8_t);
cudaMemcpyAsync(d_im0_, left_mono8->image.ptr<uint8_t>(), mono8_image_size,
cudaMemcpyHostToDevice, stream1_);
cudaMemcpyAsync(d_im1_, right_mono8->image.ptr<uint8_t>(), mono8_image_size,
cudaMemcpyHostToDevice, stream1_);
BLOCK_SIZE_.x = 32;
BLOCK_SIZE_.y = 32;
grid_size_.x = (cols_ + BLOCK_SIZE_.x - 1) / BLOCK_SIZE_.x;
grid_size_.y = (rows_ + BLOCK_SIZE_.y - 1) / BLOCK_SIZE_.y;
CenterSymmetricCensusKernelSM2<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_im0_, d_im1_, d_transform0_, d_transform1_, rows_, cols_);
cudaStreamSynchronize(stream1_);
HammingDistanceCostKernel<<<rows_, MAX_DISPARITY, 0, stream1_>>>(
d_transform0_, d_transform1_, d_cost_, rows_, cols_);
const int PIXELS_PER_BLOCK = COSTAGG_BLOCKSIZE / WARP_SIZE;
const int PIXELS_PER_BLOCK_HORIZ = COSTAGG_BLOCKSIZE_HORIZ / WARP_SIZE;
// Cost Aggregation
CostAggregationKernelLeftToRight<<<(rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
COSTAGG_BLOCKSIZE_HORIZ, 0, stream2_>>>(
d_cost_, d_L0_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelRightToLeft<<<(rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
COSTAGG_BLOCKSIZE_HORIZ, 0, stream3_>>>(
d_cost_, d_L1_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelUpToDown<<<(cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
COSTAGG_BLOCKSIZE, 0, stream1_>>>(
d_cost_, d_L2_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelDownToUp<<<(cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
COSTAGG_BLOCKSIZE, 0, stream1_>>>(
d_cost_, d_L3_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelDiagonalDownUpLeftRight<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L4_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalUpDownLeftRight<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L5_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalDownUpRightLeft<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L6_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalUpDownRightLeft<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L7_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
int total_pixel = rows_ * cols_;
MedianFilter3x3<<<(total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY,
MAX_DISPARITY, 0, stream1_>>>(
d_disparity_, d_disparity_filtered_uchar_, rows_, cols_);
if (check_consistency_) {
ChooseRightDisparity<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_disparity_right_, d_s_, rows_, cols_);
MedianFilter3x3<<<(total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY,
MAX_DISPARITY, 0, stream1_>>>(
d_disparity_right_, d_disparity_right_filtered_uchar_, rows_, cols_);
LeftRightConsistencyCheck<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_disparity_filtered_uchar_, d_disparity_right_filtered_uchar_, rows_,
cols_);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
ROS_ERROR_NAMED("libsgm_gpu", "%s %d\n", cudaGetErrorString(err), err);
return false;
}
cudaDeviceSynchronize();
cv::Mat disparity(rows_, cols_, CV_8UC1);
cudaMemcpy(disparity.data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, cudaMemcpyDeviceToHost);
// Restore image size if resized to be divisible by 4
if (cols_ != left_image.width || rows_ != left_image.height) {
cv::Size input_size(left_image.width, left_image.height);
cv::resize(disparity, disparity, input_size, 0, 0, cv::INTER_AREA);
}
// convertToMsg(disparity, left_camera_info, right_camera_info,
// disparity_msg);
return true;
}
bool SgmGpu::computeDisparity(const cv::Mat &left_image,
const cv::Mat &right_image,
cv::Mat *disparity_out) {
// Convert images to grayscale
cv::Mat left_mono8, right_mono8;
if (left_image.channels() > 1) {
cv::cvtColor(left_image, left_mono8, CV_RGB2GRAY);
}
if (right_image.channels() > 1) {
cv::cvtColor(right_image, right_mono8, CV_RGB2GRAY);
}
// Resize images to their width and height divisible by 4 for limit of CUDA
// code
resizeToDivisibleBy4(left_mono8, right_mono8);
// Reallocate memory if needed
bool size_changed = (cols_ != left_mono8.cols || rows_ != left_mono8.rows);
if (!memory_allocated_ || size_changed)
allocateMemory(left_mono8.cols, left_mono8.rows);
// Copy image to GPU device
size_t mono8_image_size = left_mono8.total() * sizeof(uint8_t);
cudaMemcpyAsync(d_im0_, left_mono8.ptr<uint8_t>(), mono8_image_size,
cudaMemcpyHostToDevice, stream1_);
cudaMemcpyAsync(d_im1_, right_mono8.ptr<uint8_t>(), mono8_image_size,
cudaMemcpyHostToDevice, stream1_);
BLOCK_SIZE_.x = 32;
BLOCK_SIZE_.y = 32;
grid_size_.x = (cols_ + BLOCK_SIZE_.x - 1) / BLOCK_SIZE_.x;
grid_size_.y = (rows_ + BLOCK_SIZE_.y - 1) / BLOCK_SIZE_.y;
CenterSymmetricCensusKernelSM2<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_im0_, d_im1_, d_transform0_, d_transform1_, rows_, cols_);
cudaStreamSynchronize(stream1_);
HammingDistanceCostKernel<<<rows_, MAX_DISPARITY, 0, stream1_>>>(
d_transform0_, d_transform1_, d_cost_, rows_, cols_);
const int PIXELS_PER_BLOCK = COSTAGG_BLOCKSIZE / WARP_SIZE;
const int PIXELS_PER_BLOCK_HORIZ = COSTAGG_BLOCKSIZE_HORIZ / WARP_SIZE;
// Cost Aggregation
CostAggregationKernelLeftToRight<<<(rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
COSTAGG_BLOCKSIZE_HORIZ, 0, stream2_>>>(
d_cost_, d_L0_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelRightToLeft<<<(rows_ + PIXELS_PER_BLOCK_HORIZ - 1) /
PIXELS_PER_BLOCK_HORIZ,
COSTAGG_BLOCKSIZE_HORIZ, 0, stream3_>>>(
d_cost_, d_L1_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelUpToDown<<<(cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
COSTAGG_BLOCKSIZE, 0, stream1_>>>(
d_cost_, d_L2_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelDownToUp<<<(cols_ + PIXELS_PER_BLOCK - 1) /
PIXELS_PER_BLOCK,
COSTAGG_BLOCKSIZE, 0, stream1_>>>(
d_cost_, d_L3_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_, d_L4_, d_L5_,
d_L6_);
CostAggregationKernelDiagonalDownUpLeftRight<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L4_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalUpDownLeftRight<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L5_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalDownUpRightLeft<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L6_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
CostAggregationKernelDiagonalUpDownRightLeft<<<
(cols_ + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK, COSTAGG_BLOCKSIZE, 0,
stream1_>>>(d_cost_, d_L7_, d_s_, p1_, p2_, rows_, cols_, d_transform0_,
d_transform1_, d_disparity_, d_L0_, d_L1_, d_L2_, d_L3_,
d_L4_, d_L5_, d_L6_);
int total_pixel = rows_ * cols_;
MedianFilter3x3<<<(total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY,
MAX_DISPARITY, 0, stream1_>>>(
d_disparity_, d_disparity_filtered_uchar_, rows_, cols_);
if (check_consistency_) {
ChooseRightDisparity<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_disparity_right_, d_s_, rows_, cols_);
MedianFilter3x3<<<(total_pixel + MAX_DISPARITY - 1) / MAX_DISPARITY,
MAX_DISPARITY, 0, stream1_>>>(
d_disparity_right_, d_disparity_right_filtered_uchar_, rows_, cols_);
LeftRightConsistencyCheck<<<grid_size_, BLOCK_SIZE_, 0, stream1_>>>(
d_disparity_filtered_uchar_, d_disparity_right_filtered_uchar_, rows_,
cols_);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
ROS_ERROR_NAMED("libsgm_gpu", "%s %d\n", cudaGetErrorString(err), err);
return false;
}
cudaDeviceSynchronize();
cv::Mat disparity(rows_, cols_, CV_8UC1);
cudaMemcpy(disparity.data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, cudaMemcpyDeviceToHost);
cudaMemcpy(disparity_out->data, d_disparity_filtered_uchar_,
sizeof(uint8_t) * total_pixel, cudaMemcpyDeviceToHost);
// Restore image size if resized to be divisible by 4
if (cols_ != left_image.cols || rows_ != left_image.rows) {
cv::Size input_size(left_image.cols, left_image.rows);
cv::resize(disparity, disparity, input_size, 0, 0, cv::INTER_AREA);
}
// convertToMsg(disparity, left_camera_info, right_camera_info,
// disparity_msg);
return true;
}
void SgmGpu::resizeToDivisibleBy4(cv::Mat &left_image, cv::Mat &right_image) {
bool need_resize = false;
cv::Size original_size, resized_size;
original_size = cv::Size(left_image.cols, left_image.rows);
resized_size = original_size;
if (original_size.width % 4 != 0) {
need_resize = true;
resized_size.width = (original_size.width / 4 + 1) * 4;
}
if (original_size.height % 4 != 0) {
need_resize = true;
resized_size.height = (original_size.height / 4 + 1) * 4;
}
if (need_resize) {
cv::resize(left_image, left_image, resized_size, 0, 0, cv::INTER_LINEAR);
cv::resize(right_image, right_image, resized_size, 0, 0, cv::INTER_LINEAR);
}
}
// void SgmGpu::convertToMsg(const cv::Mat_<unsigned char> &disparity,
// const sensor_msgs::CameraInfo &left_camera_info,
// const sensor_msgs::CameraInfo &right_camera_info,
// stereo_msgs::DisparityImage &disparity_msg) {
// cv::Mat disparity_32f;
// disparity.convertTo(disparity_32f, CV_32F);
// cv_bridge::CvImage disparity_converter(
// left_camera_info.header, sensor_msgs::image_encodings::TYPE_32FC1,
// disparity_32f);
// disparity_converter.toImageMsg(disparity_msg.image);
//
// disparity_msg.header = left_camera_info.header;
//
// image_geometry::StereoCameraModel stereo_model;
// stereo_model.fromCameraInfo(left_camera_info, right_camera_info);
// disparity_msg.f = stereo_model.left().fx();
// disparity_msg.T = stereo_model.baseline();
//
// disparity_msg.min_disparity = 0.0;
// disparity_msg.max_disparity = MAX_DISPARITY;
// disparity_msg.delta_d = 1.0;
//}
} // namespace sgm_gpu
|
68ac339a879572e1627ea66ff6e5d3ca2a6eaca0.hip | // !!! This is a file automatically generated by hipify!!!
/* CREDITS
* Author : Martin Garaj <[email protected]>
* Date : 12/2017
* Project : HPC MPI Testing application
*
* REDISTRIBUTION
* The software is meant for internal use at City University of Hong Kong only.
* Further redistribution/sharing of the software or any parts of the source code without author's permission is disapproved.
*
* DISCLAIMER
* This software comes as is, without any warranty. Use on your own risk.
*
* CHANGE LOG
* Please log any changes below, log the Author, Date and Change done.
* Author | Date | Change
* | YYYY/MM/DD |
*/
// header
#include "Cuda_GPU.cuh"
/** Constructor
*/
Cuda_GPU::Cuda_GPU(void){
// count visible GPUs
int devCount;
hipGetDeviceCount(&devCount);
// set private variable
this->device_count = devCount;
this->assigned_gpu_id = _ERROR_;
// initialize file descriptor structure of gpu
this->fd_gpu.clockRate = -1;
this->fd_gpu.integrated = -1;
this->fd_gpu.isMultiGpuBoard = -1;
this->fd_gpu.major = -1;
this->fd_gpu.managedMemory = -1;
this->fd_gpu.minor = -1;
this->fd_gpu.multiGpuBoardGroupID = -1;
this->fd_gpu.multiProcessorCount = -1;
strncpy(this->fd_gpu.name , "NOT_ASSIGNED", 250);
this->fd_gpu.pciBusID = -1;
this->fd_gpu.pciDeviceID = -1;
this->fd_gpu.pciDomainID = -1;
this->fd_gpu.totalConstMem = 0;
this->fd_gpu.totalGlobalMem = 0;
};
/** Destructor
*/
Cuda_GPU::~Cuda_GPU(void){};
/** Returns the number of visible cuda-capable GPUs
* @return number of visible GPUs
*/
int Cuda_GPU::dev_count(void){
return this->device_count;
};
/** Assigns a GPU according to provided GPU ID
* @param GPU ID ranging from 0 to (hipGetDeviceCount() - 1)
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::assign(int gpu_id){
if( (gpu_id > this->device_count-1) or (gpu_id < 0) ){
return _ERROR_;
}else if( hipSuccess == hipSetDevice(gpu_id) ){
// remember the assigned gpu id
this->assigned_gpu_id = gpu_id;
// store the data about the assigned gpu
query(gpu_id);
return _SUCCESS_;
}
return _ERROR_;
};
/** Fills in the private GPU File Descriptor structure
* @param GPU ID ranging from 0 to (hipGetDeviceCount() - 1)
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::query(int gpu_id){
if( (gpu_id > this->device_count-1) or (gpu_id < 0)){
return _ERROR_;
}else{
// use Cuda to get GPU information
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, gpu_id);
this->fd_gpu.clockRate = devProp.clockRate;
this->fd_gpu.integrated = devProp.integrated;
this->fd_gpu.isMultiGpuBoard = devProp.isMultiGpuBoard;
this->fd_gpu.major = devProp.major;
this->fd_gpu.managedMemory = devProp.managedMemory;
this->fd_gpu.minor = devProp.minor;
this->fd_gpu.multiGpuBoardGroupID = devProp.multiGpuBoardGroupID;
this->fd_gpu.multiProcessorCount = devProp.multiProcessorCount;
strncpy(this->fd_gpu.name, devProp.name, 256);
this->fd_gpu.pciBusID = devProp.pciBusID;
this->fd_gpu.pciDeviceID = devProp.pciDeviceID;
this->fd_gpu.pciDomainID = devProp.pciDomainID;
this->fd_gpu.totalConstMem = devProp.totalConstMem;
this->fd_gpu.totalGlobalMem = devProp.totalGlobalMem;
}
return _SUCCESS_;
};
/** Copies the content of private GPU File Descriptor into provided structure
* @param struct_fd_gpu structure defined in Cuda_GPU::
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::get_fd_gpu(struct_fd_gpu * pt_fd_gpu){
if(this->assigned_gpu_id != -1){
*pt_fd_gpu = this->fd_gpu;
return _SUCCESS_;
}
return _ERROR_;
};
/** Getter of the GPU name
* @param pointer to char array
* @param lenght to be copied
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::get_name(char * pt_name, int len){
if(this->assigned_gpu_id != _ERROR_){
strncpy(pt_name, this->fd_gpu.name, len);
return _SUCCESS_;
}
return _ERROR_;
};
/** Getter of the PCI Bus ID
* @return PCI Bus ID / _ERROR_
*/
int Cuda_GPU::get_pciBusID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciBusID;
}
return _ERROR_;
};
/** Getter of the PCI Device ID
* @return PCI Device ID / _ERROR_
*/
int Cuda_GPU::get_pciDeviceID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciDeviceID;
}
return _ERROR_;
};
/** Getter of the PCI Domain ID
* @return PCI Domain ID / _ERROR_
*/
int Cuda_GPU::get_pciDomainID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciDomainID;
}
return _ERROR_;
};
/** Getter of major version release
* @return major / _ERROR_
*/
int Cuda_GPU::get_major(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.major;
}
return _ERROR_;
};
/** Getter of minor version release
* @return minor / _ERROR_
*/
int Cuda_GPU::get_minor(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.minor;
}
return _ERROR_;
};
/** Getter of Assigned GPU ID
* @return Assigned GPU ID / _ERROR_
*/
int Cuda_GPU::get_gpu_id(void){
if(this->assigned_gpu_id != _ERROR_){
return this->assigned_gpu_id;
}
return _ERROR_;
};
| 68ac339a879572e1627ea66ff6e5d3ca2a6eaca0.cu | /* CREDITS
* Author : Martin Garaj <[email protected]>
* Date : 12/2017
* Project : HPC MPI Testing application
*
* REDISTRIBUTION
* The software is meant for internal use at City University of Hong Kong only.
* Further redistribution/sharing of the software or any parts of the source code without author's permission is disapproved.
*
* DISCLAIMER
* This software comes as is, without any warranty. Use on your own risk.
*
* CHANGE LOG
* Please log any changes below, log the Author, Date and Change done.
* Author | Date | Change
* | YYYY/MM/DD |
*/
// header
#include "Cuda_GPU.cuh"
/** Constructor
*/
Cuda_GPU::Cuda_GPU(void){
// count visible GPUs
int devCount;
cudaGetDeviceCount(&devCount);
// set private variable
this->device_count = devCount;
this->assigned_gpu_id = _ERROR_;
// initialize file descriptor structure of gpu
this->fd_gpu.clockRate = -1;
this->fd_gpu.integrated = -1;
this->fd_gpu.isMultiGpuBoard = -1;
this->fd_gpu.major = -1;
this->fd_gpu.managedMemory = -1;
this->fd_gpu.minor = -1;
this->fd_gpu.multiGpuBoardGroupID = -1;
this->fd_gpu.multiProcessorCount = -1;
strncpy(this->fd_gpu.name , "NOT_ASSIGNED", 250);
this->fd_gpu.pciBusID = -1;
this->fd_gpu.pciDeviceID = -1;
this->fd_gpu.pciDomainID = -1;
this->fd_gpu.totalConstMem = 0;
this->fd_gpu.totalGlobalMem = 0;
};
/** Destructor
*/
Cuda_GPU::~Cuda_GPU(void){};
/** Returns the number of visible cuda-capable GPUs
* @return number of visible GPUs
*/
int Cuda_GPU::dev_count(void){
return this->device_count;
};
/** Assigns a GPU according to provided GPU ID
* @param GPU ID ranging from 0 to (cudaGetDeviceCount() - 1)
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::assign(int gpu_id){
if( (gpu_id > this->device_count-1) or (gpu_id < 0) ){
return _ERROR_;
}else if( cudaSuccess == cudaSetDevice(gpu_id) ){
// remember the assigned gpu id
this->assigned_gpu_id = gpu_id;
// store the data about the assigned gpu
query(gpu_id);
return _SUCCESS_;
}
return _ERROR_;
};
/** Fills in the private GPU File Descriptor structure
* @param GPU ID ranging from 0 to (cudaGetDeviceCount() - 1)
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::query(int gpu_id){
if( (gpu_id > this->device_count-1) or (gpu_id < 0)){
return _ERROR_;
}else{
// use Cuda to get GPU information
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, gpu_id);
this->fd_gpu.clockRate = devProp.clockRate;
this->fd_gpu.integrated = devProp.integrated;
this->fd_gpu.isMultiGpuBoard = devProp.isMultiGpuBoard;
this->fd_gpu.major = devProp.major;
this->fd_gpu.managedMemory = devProp.managedMemory;
this->fd_gpu.minor = devProp.minor;
this->fd_gpu.multiGpuBoardGroupID = devProp.multiGpuBoardGroupID;
this->fd_gpu.multiProcessorCount = devProp.multiProcessorCount;
strncpy(this->fd_gpu.name, devProp.name, 256);
this->fd_gpu.pciBusID = devProp.pciBusID;
this->fd_gpu.pciDeviceID = devProp.pciDeviceID;
this->fd_gpu.pciDomainID = devProp.pciDomainID;
this->fd_gpu.totalConstMem = devProp.totalConstMem;
this->fd_gpu.totalGlobalMem = devProp.totalGlobalMem;
}
return _SUCCESS_;
};
/** Copies the content of private GPU File Descriptor into provided structure
* @param struct_fd_gpu structure defined in Cuda_GPU::
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::get_fd_gpu(struct_fd_gpu * pt_fd_gpu){
if(this->assigned_gpu_id != -1){
*pt_fd_gpu = this->fd_gpu;
return _SUCCESS_;
}
return _ERROR_;
};
/** Getter of the GPU name
* @param pointer to char array
* @param lenght to be copied
* @return _SUCCESS_ / _ERROR_
*/
int Cuda_GPU::get_name(char * pt_name, int len){
if(this->assigned_gpu_id != _ERROR_){
strncpy(pt_name, this->fd_gpu.name, len);
return _SUCCESS_;
}
return _ERROR_;
};
/** Getter of the PCI Bus ID
* @return PCI Bus ID / _ERROR_
*/
int Cuda_GPU::get_pciBusID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciBusID;
}
return _ERROR_;
};
/** Getter of the PCI Device ID
* @return PCI Device ID / _ERROR_
*/
int Cuda_GPU::get_pciDeviceID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciDeviceID;
}
return _ERROR_;
};
/** Getter of the PCI Domain ID
* @return PCI Domain ID / _ERROR_
*/
int Cuda_GPU::get_pciDomainID(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.pciDomainID;
}
return _ERROR_;
};
/** Getter of major version release
* @return major / _ERROR_
*/
int Cuda_GPU::get_major(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.major;
}
return _ERROR_;
};
/** Getter of minor version release
* @return minor / _ERROR_
*/
int Cuda_GPU::get_minor(void){
if(this->assigned_gpu_id != _ERROR_){
return this->fd_gpu.minor;
}
return _ERROR_;
};
/** Getter of Assigned GPU ID
* @return Assigned GPU ID / _ERROR_
*/
int Cuda_GPU::get_gpu_id(void){
if(this->assigned_gpu_id != _ERROR_){
return this->assigned_gpu_id;
}
return _ERROR_;
};
|
36ac9ec480058a164ab2c9e6ae6b3cbdbc23f717.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------
* Programmer(s): David J. Gardner @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2023, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the performance of the
* NVECTOR CUDA module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <sundials/sundials_types.h>
#include <nvector/nvector_cuda.h>
#include <sundials/sundials_math.h>
#include "test_nvector_performance.h"
/* private functions */
static int InitializeClearCache(int cachesize);
static int FinalizeClearCache();
/* private data for clearing cache */
static sunindextype N; /* data length */
static realtype* h_data; /* host data */
static realtype* h_sum; /* host sum */
static realtype* d_data; /* device data */
static realtype* d_sum; /* device sum */
static int blocksPerGrid;
/* cuda reduction kernel to clearing cache between tests */
__global__
void ClearCacheKernel(sunindextype N, realtype* data, realtype* out)
{
__shared__ realtype shared[256];
int sharedidx = blockIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
realtype tmp = 0;
while (tid < N) {
tmp += data[tid];
tid += blockDim.x * gridDim.x;
}
shared[sharedidx] = tmp;
__syncthreads();
/* assues blockDim is a power of 2 */
int i = blockDim.x/2;
while (i != 0) {
if (sharedidx < i)
shared[sharedidx] += shared[sharedidx + i];
__syncthreads();
i /= 2;
}
if (sharedidx == 0)
out[sharedidx] = shared[0];
}
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
SUNContext ctx = NULL; /* SUNDIALS context */
N_Vector X = NULL; /* test vector */
sunindextype veclen; /* vector length */
int print_timing; /* output timings */
int ntests; /* number of tests */
int nvecs; /* number of tests */
int nsums; /* number of sums */
int cachesize; /* size of cache (MB) */
int flag; /* return flag */
printf("\nStart Tests\n");
printf("Vector Name: Cuda\n");
/* check input and set vector length */
if (argc < 7){
printf("ERROR: SIX (6) arguments required: ");
printf("<vector length> <number of vectors> <number of sums> <number of tests> ");
printf("<cache size (MB)> <print timing>\n");
return(-1);
}
veclen = atol(argv[1]);
if (veclen <= 0) {
printf("ERROR: length of vector must be a positive integer \n");
return(-1);
}
nvecs = atol(argv[2]);
if (nvecs < 1) {
printf("WARNING: Fused operation tests disabled\n");
}
nsums = atol(argv[3]);
if (nsums < 1) {
printf("WARNING: Some fused operation tests disabled\n");
}
ntests = atol(argv[4]);
if (ntests <= 0) {
printf("ERROR: number of tests must be a positive integer \n");
return(-1);
}
cachesize = atol(argv[5]);
if (cachesize < 0) {
printf("ERROR: cache size (MB) must be a non-negative integer \n");
return(-1);
}
InitializeClearCache(cachesize);
print_timing = atoi(argv[6]);
SetTiming(print_timing, 0);
printf("\nRunning with: \n");
printf(" vector length %ld \n", (long int) veclen);
printf(" max number of vectors %d \n", nvecs);
printf(" max number of sums %d \n", nsums);
printf(" number of tests %d \n", ntests);
printf(" timing on/off %d \n", print_timing);
flag = SUNContext_Create(NULL, &ctx);
if (flag) return flag;
/* Create vectors */
X = N_VNew_Cuda(veclen, ctx);
/* run tests */
if (print_timing) printf("\n\n standard operations:\n");
if (print_timing) PrintTableHeader(1);
flag = Test_N_VLinearSum(X, veclen, ntests);
flag = Test_N_VConst(X, veclen, ntests);
flag = Test_N_VProd(X, veclen, ntests);
flag = Test_N_VDiv(X, veclen, ntests);
flag = Test_N_VScale(X, veclen, ntests);
flag = Test_N_VAbs(X, veclen, ntests);
flag = Test_N_VInv(X, veclen, ntests);
flag = Test_N_VAddConst(X, veclen, ntests);
flag = Test_N_VDotProd(X, veclen, ntests);
flag = Test_N_VMaxNorm(X, veclen, ntests);
flag = Test_N_VWrmsNorm(X, veclen, ntests);
flag = Test_N_VWrmsNormMask(X, veclen, ntests);
flag = Test_N_VMin(X, veclen, ntests);
flag = Test_N_VWL2Norm(X, veclen, ntests);
flag = Test_N_VL1Norm(X, veclen, ntests);
flag = Test_N_VCompare(X, veclen, ntests);
flag = Test_N_VInvTest(X, veclen, ntests);
flag = Test_N_VConstrMask(X, veclen, ntests);
flag = Test_N_VMinQuotient(X, veclen, ntests);
if (nvecs > 0)
{
if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests);
flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests);
flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests);
flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests);
if (nsums > 0)
{
if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests);
flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests);
}
}
/* Free vectors */
N_VDestroy(X);
FinalizeClearCache();
flag = SUNContext_Free(&ctx);
if (flag) return flag;
printf("\nFinished Tests\n");
return(flag);
}
/* ----------------------------------------------------------------------
* Functions required by testing routines to fill vector data
* --------------------------------------------------------------------*/
/* random data between lower and upper */
void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper)
{
rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper);
N_VCopyToDevice_Cuda(Xvec);
}
/* series of 0 and 1 */
void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* random values for constraint array */
void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* ----------------------------------------------------------------------
* Functions required for MPI or GPU testing
* --------------------------------------------------------------------*/
void collect_times(N_Vector X, double *times, int ntimes)
{
/* not running with MPI, just return */
return;
}
void sync_device(N_Vector x)
{
hipDeviceSynchronize();
return;
}
/* ----------------------------------------------------------------------
* Functions required for clearing cache
* --------------------------------------------------------------------*/
static int InitializeClearCache(int cachesize)
{
hipError_t err; /* cuda error flag */
size_t nbytes; /* cache size in bytes */
/* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */
nbytes = (size_t) (2 * cachesize * 1024 * 1024);
N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype));
/* allocate host data */
blocksPerGrid = SUNMIN(32,(N+255)/256);
h_data = (realtype*) malloc(N*sizeof(realtype));
h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype));
/* allocate device data */
err = hipMalloc((void**) &d_data, N*sizeof(realtype));
if (err != hipSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
err = hipMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype));
if (err != hipSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
/* fill host vector with random data and copy to device */
rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0));
err = hipMemcpy(d_data, h_data, N*sizeof(realtype), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err);
return(-1);
}
return(0);
}
static int FinalizeClearCache()
{
hipError_t err; /* cuda error flag */
free(h_data);
free(h_sum);
err = hipFree(d_data);
if (err != hipSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
err = hipFree(d_sum);
if (err != hipSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
return(0);
}
void ClearCache()
{
/* call cuda kernel to clear the cache */
hipLaunchKernelGGL(( ClearCacheKernel), dim3(SUNMIN(32,(N+255)/256)), dim3(256), 0, 0, N, d_data, d_sum);
hipMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
return;
}
| 36ac9ec480058a164ab2c9e6ae6b3cbdbc23f717.cu | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2023, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the performance of the
* NVECTOR CUDA module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <sundials/sundials_types.h>
#include <nvector/nvector_cuda.h>
#include <sundials/sundials_math.h>
#include "test_nvector_performance.h"
/* private functions */
static int InitializeClearCache(int cachesize);
static int FinalizeClearCache();
/* private data for clearing cache */
static sunindextype N; /* data length */
static realtype* h_data; /* host data */
static realtype* h_sum; /* host sum */
static realtype* d_data; /* device data */
static realtype* d_sum; /* device sum */
static int blocksPerGrid;
/* cuda reduction kernel to clearing cache between tests */
__global__
void ClearCacheKernel(sunindextype N, realtype* data, realtype* out)
{
__shared__ realtype shared[256];
int sharedidx = blockIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
realtype tmp = 0;
while (tid < N) {
tmp += data[tid];
tid += blockDim.x * gridDim.x;
}
shared[sharedidx] = tmp;
__syncthreads();
/* assues blockDim is a power of 2 */
int i = blockDim.x/2;
while (i != 0) {
if (sharedidx < i)
shared[sharedidx] += shared[sharedidx + i];
__syncthreads();
i /= 2;
}
if (sharedidx == 0)
out[sharedidx] = shared[0];
}
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
SUNContext ctx = NULL; /* SUNDIALS context */
N_Vector X = NULL; /* test vector */
sunindextype veclen; /* vector length */
int print_timing; /* output timings */
int ntests; /* number of tests */
int nvecs; /* number of tests */
int nsums; /* number of sums */
int cachesize; /* size of cache (MB) */
int flag; /* return flag */
printf("\nStart Tests\n");
printf("Vector Name: Cuda\n");
/* check input and set vector length */
if (argc < 7){
printf("ERROR: SIX (6) arguments required: ");
printf("<vector length> <number of vectors> <number of sums> <number of tests> ");
printf("<cache size (MB)> <print timing>\n");
return(-1);
}
veclen = atol(argv[1]);
if (veclen <= 0) {
printf("ERROR: length of vector must be a positive integer \n");
return(-1);
}
nvecs = atol(argv[2]);
if (nvecs < 1) {
printf("WARNING: Fused operation tests disabled\n");
}
nsums = atol(argv[3]);
if (nsums < 1) {
printf("WARNING: Some fused operation tests disabled\n");
}
ntests = atol(argv[4]);
if (ntests <= 0) {
printf("ERROR: number of tests must be a positive integer \n");
return(-1);
}
cachesize = atol(argv[5]);
if (cachesize < 0) {
printf("ERROR: cache size (MB) must be a non-negative integer \n");
return(-1);
}
InitializeClearCache(cachesize);
print_timing = atoi(argv[6]);
SetTiming(print_timing, 0);
printf("\nRunning with: \n");
printf(" vector length %ld \n", (long int) veclen);
printf(" max number of vectors %d \n", nvecs);
printf(" max number of sums %d \n", nsums);
printf(" number of tests %d \n", ntests);
printf(" timing on/off %d \n", print_timing);
flag = SUNContext_Create(NULL, &ctx);
if (flag) return flag;
/* Create vectors */
X = N_VNew_Cuda(veclen, ctx);
/* run tests */
if (print_timing) printf("\n\n standard operations:\n");
if (print_timing) PrintTableHeader(1);
flag = Test_N_VLinearSum(X, veclen, ntests);
flag = Test_N_VConst(X, veclen, ntests);
flag = Test_N_VProd(X, veclen, ntests);
flag = Test_N_VDiv(X, veclen, ntests);
flag = Test_N_VScale(X, veclen, ntests);
flag = Test_N_VAbs(X, veclen, ntests);
flag = Test_N_VInv(X, veclen, ntests);
flag = Test_N_VAddConst(X, veclen, ntests);
flag = Test_N_VDotProd(X, veclen, ntests);
flag = Test_N_VMaxNorm(X, veclen, ntests);
flag = Test_N_VWrmsNorm(X, veclen, ntests);
flag = Test_N_VWrmsNormMask(X, veclen, ntests);
flag = Test_N_VMin(X, veclen, ntests);
flag = Test_N_VWL2Norm(X, veclen, ntests);
flag = Test_N_VL1Norm(X, veclen, ntests);
flag = Test_N_VCompare(X, veclen, ntests);
flag = Test_N_VInvTest(X, veclen, ntests);
flag = Test_N_VConstrMask(X, veclen, ntests);
flag = Test_N_VMinQuotient(X, veclen, ntests);
if (nvecs > 0)
{
if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests);
flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests);
flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests);
flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests);
if (nsums > 0)
{
if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests);
flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests);
}
}
/* Free vectors */
N_VDestroy(X);
FinalizeClearCache();
flag = SUNContext_Free(&ctx);
if (flag) return flag;
printf("\nFinished Tests\n");
return(flag);
}
/* ----------------------------------------------------------------------
* Functions required by testing routines to fill vector data
* --------------------------------------------------------------------*/
/* random data between lower and upper */
void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper)
{
rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper);
N_VCopyToDevice_Cuda(Xvec);
}
/* series of 0 and 1 */
void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* random values for constraint array */
void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* ----------------------------------------------------------------------
* Functions required for MPI or GPU testing
* --------------------------------------------------------------------*/
void collect_times(N_Vector X, double *times, int ntimes)
{
/* not running with MPI, just return */
return;
}
void sync_device(N_Vector x)
{
cudaDeviceSynchronize();
return;
}
/* ----------------------------------------------------------------------
* Functions required for clearing cache
* --------------------------------------------------------------------*/
static int InitializeClearCache(int cachesize)
{
cudaError_t err; /* cuda error flag */
size_t nbytes; /* cache size in bytes */
/* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */
nbytes = (size_t) (2 * cachesize * 1024 * 1024);
N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype));
/* allocate host data */
blocksPerGrid = SUNMIN(32,(N+255)/256);
h_data = (realtype*) malloc(N*sizeof(realtype));
h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype));
/* allocate device data */
err = cudaMalloc((void**) &d_data, N*sizeof(realtype));
if (err != cudaSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
err = cudaMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype));
if (err != cudaSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
/* fill host vector with random data and copy to device */
rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0));
err = cudaMemcpy(d_data, h_data, N*sizeof(realtype), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err);
return(-1);
}
return(0);
}
static int FinalizeClearCache()
{
cudaError_t err; /* cuda error flag */
free(h_data);
free(h_sum);
err = cudaFree(d_data);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
err = cudaFree(d_sum);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
return(0);
}
void ClearCache()
{
/* call cuda kernel to clear the cache */
ClearCacheKernel<<<SUNMIN(32,(N+255)/256), 256>>>(N, d_data, d_sum);
cudaMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
return;
}
|
21413c0193a45b441a7d3ba7afdeafa6ffd0ddeb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kMultiSoftmaxCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *probs = NULL;
hipMalloc(&probs, XSIZE*YSIZE);
float *labels = NULL;
hipMalloc(&labels, XSIZE*YSIZE);
float *maxProbs = NULL;
hipMalloc(&maxProbs, XSIZE*YSIZE);
float *labelLogProbs = NULL;
hipMalloc(&labelLogProbs, XSIZE*YSIZE);
float *correctProbs = NULL;
hipMalloc(&correctProbs, XSIZE*YSIZE);
float *top5Probs = NULL;
hipMalloc(&top5Probs, XSIZE*YSIZE);
const int numCases = 1;
const int numOut = 1;
const int setSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kMultiSoftmaxCost), dim3(gridBlock),dim3(threadBlock), 0, 0, probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kMultiSoftmaxCost), dim3(gridBlock),dim3(threadBlock), 0, 0, probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kMultiSoftmaxCost), dim3(gridBlock),dim3(threadBlock), 0, 0, probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 21413c0193a45b441a7d3ba7afdeafa6ffd0ddeb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kMultiSoftmaxCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *probs = NULL;
cudaMalloc(&probs, XSIZE*YSIZE);
float *labels = NULL;
cudaMalloc(&labels, XSIZE*YSIZE);
float *maxProbs = NULL;
cudaMalloc(&maxProbs, XSIZE*YSIZE);
float *labelLogProbs = NULL;
cudaMalloc(&labelLogProbs, XSIZE*YSIZE);
float *correctProbs = NULL;
cudaMalloc(&correctProbs, XSIZE*YSIZE);
float *top5Probs = NULL;
cudaMalloc(&top5Probs, XSIZE*YSIZE);
const int numCases = 1;
const int numOut = 1;
const int setSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kMultiSoftmaxCost<<<gridBlock,threadBlock>>>(probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kMultiSoftmaxCost<<<gridBlock,threadBlock>>>(probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kMultiSoftmaxCost<<<gridBlock,threadBlock>>>(probs,labels,maxProbs,labelLogProbs,correctProbs,top5Probs,numCases,numOut,setSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e34ee7eb9767a90a72949d5ada1ab43e998bc2bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "GoLgeneric.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#define getl(X,Y) local[((X)+1) + (blockDim.x+2) * ((Y)+1)]
__global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ int local[];
if (idx < width && idy < height) { //If we are not a edge
dim3 li(threadIdx.x, threadIdx.y);
//load own
getl(li.x, li.y) = get_rm(src, idx, idy);
size_t idxm1 = (size_t) (idx == 0) * (width - 1) + (size_t) (idx > 0) * (idx - 1);
size_t idxp1 = (size_t) (idx + 1 < width) * (idx + 1);
size_t idym1 = (size_t) (idy == 0) * (height - 1) + (size_t) (idy > 0) * (idy - 1);
size_t idyp1 = (size_t) (idy + 1 < height) * (idy + 1);
//Load extra
if (li.x == 0) //Left edge
getl(-1, li.y) = get_rm(src, idxm1, idy);
if (li.y == 0) //Upper edge
getl(li.x, -1) = get_rm(src, idx, idym1);
if (li.x == 0 && li.y == 0) //Upper left corner
getl(-1, -1) = get_rm(src, idxm1, idym1);
if (li.x == blockDim.x - 1 || idx == width - 1) // right edge
getl(li.x + 1, li.y) = get_rm(src, idxp1, idy);
if (li.y == blockDim.y - 1 || idy == height - 1) //bottom edge
getl(li.x, li.y + 1) = get_rm(src, idx, idyp1);
if ((li.y == blockDim.y - 1 || idy == height - 1) && li.x == 0) // lower left corner
getl(li.x - 1, li.y + 1) = get_rm(src, idxp1, idy);
if ((li.x == blockDim.x - 1 || idx == width - 1) || idy == 0) //upper right corner
getl(li.x + 1, li.y - 1) = get_rm(src, idx, idyp1);
if ((li.y == blockDim.y - 1 || idy == height - 1) && (li.x == blockDim.x - 1 || idx == width - 1)) //lower right corner
getl(li.x + 1, li.y + 1) = get_rm(src, idxp1, idyp1);
//Calculate
int acc = 0;
acc += getl(li.x - 1, li.y + 1);
acc += getl(li.x - 1, li.y + 0);
acc += getl(li.x - 1, li.y - 1);
acc += getl(li.x - 0, li.y + 1);
// acc += getl(li.x - 0, li.y + 0);
acc += getl(li.x - 0, li.y - 1);
acc += getl(li.x + 1, li.y + 1);
acc += getl(li.x + 1, li.y + 0);
acc += getl(li.x + 1, li.y - 1);
//acc = 2 : x * 1 + 0
//acc = 3 : x * 0 + 1
//acc = ? : x * 0 + 0
get_rm(dst, idx, idy) = getl(li.x, li.y) * (int) (acc == 2) + (int) (acc == 3);
}
} | e34ee7eb9767a90a72949d5ada1ab43e998bc2bd.cu | #include <stdlib.h>
#include "GoLgeneric.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#define getl(X,Y) local[((X)+1) + (blockDim.x+2) * ((Y)+1)]
__global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ int local[];
if (idx < width && idy < height) { //If we are not a edge
dim3 li(threadIdx.x, threadIdx.y);
//load own
getl(li.x, li.y) = get_rm(src, idx, idy);
size_t idxm1 = (size_t) (idx == 0) * (width - 1) + (size_t) (idx > 0) * (idx - 1);
size_t idxp1 = (size_t) (idx + 1 < width) * (idx + 1);
size_t idym1 = (size_t) (idy == 0) * (height - 1) + (size_t) (idy > 0) * (idy - 1);
size_t idyp1 = (size_t) (idy + 1 < height) * (idy + 1);
//Load extra
if (li.x == 0) //Left edge
getl(-1, li.y) = get_rm(src, idxm1, idy);
if (li.y == 0) //Upper edge
getl(li.x, -1) = get_rm(src, idx, idym1);
if (li.x == 0 && li.y == 0) //Upper left corner
getl(-1, -1) = get_rm(src, idxm1, idym1);
if (li.x == blockDim.x - 1 || idx == width - 1) // right edge
getl(li.x + 1, li.y) = get_rm(src, idxp1, idy);
if (li.y == blockDim.y - 1 || idy == height - 1) //bottom edge
getl(li.x, li.y + 1) = get_rm(src, idx, idyp1);
if ((li.y == blockDim.y - 1 || idy == height - 1) && li.x == 0) // lower left corner
getl(li.x - 1, li.y + 1) = get_rm(src, idxp1, idy);
if ((li.x == blockDim.x - 1 || idx == width - 1) || idy == 0) //upper right corner
getl(li.x + 1, li.y - 1) = get_rm(src, idx, idyp1);
if ((li.y == blockDim.y - 1 || idy == height - 1) && (li.x == blockDim.x - 1 || idx == width - 1)) //lower right corner
getl(li.x + 1, li.y + 1) = get_rm(src, idxp1, idyp1);
//Calculate
int acc = 0;
acc += getl(li.x - 1, li.y + 1);
acc += getl(li.x - 1, li.y + 0);
acc += getl(li.x - 1, li.y - 1);
acc += getl(li.x - 0, li.y + 1);
// acc += getl(li.x - 0, li.y + 0);
acc += getl(li.x - 0, li.y - 1);
acc += getl(li.x + 1, li.y + 1);
acc += getl(li.x + 1, li.y + 0);
acc += getl(li.x + 1, li.y - 1);
//acc = 2 : x * 1 + 0
//acc = 3 : x * 0 + 1
//acc = ? : x * 0 + 0
get_rm(dst, idx, idy) = getl(li.x, li.y) * (int) (acc == 2) + (int) (acc == 3);
}
} |
644216e8d6cfd3360b62f678780fb09c6f8d4ef8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Name: Hiu Yin Tang
* Student ID: 1831535
* the Assignment goals achieved: block scan / full scan for large vectors / Bank conflict avoidance optimization (BCAO)
* time to execute the different scans on a vector of 10,000,000 entries:
* Block scan without BCAO: ms
* Block scan with BCAO: ms
* Full scan without BCAO: ms
* Full scan with BCAO: ms
* The model of CPU:
* The model of GPU:
* A short description of any implementation details or performance improvement strategies that you successfully
* implemented and which improve upon a base level implementation of the target goals.
*
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// A helper macro to simplify handling CUDA error checking
#define CUDA_ERROR( err, msg ) { \
if (err != hipSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, hipGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
#define NUM_BANKS 32
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n)\((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__ void blockScan(int *g_odata, int *g_idata, int n){
extern __shared__ int temp[];
int tid = threadIdx.x;
int offset = 1;
//load global input into shared memory
temp[2*tid] = g_idata[2*tid];
temp[2*tid+1] = g_idata[2*tid+1];
// build sum in place up the tree
for(int d = n>>1; d>0; d>>=1){
__syncthreads();
if(tid<d){
int ai = offset *(2*tid+1)-1;
int bi = offset *(2*tid+2)-1;
temp[bi] += temp[ai];
}
offset *=2;
}
//clear the last element
if(tid==0)
temp[n-1]=0;
//traverse down tree & build scan
for(int d = 1; d<n; d*=2){
offset >>=1;
__syncthreads();
if(tid<d){
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//write results to device memory
g_odata[2*tid] = temp[2*tid];
g_odata[2*tid+1] = temp[2*tid+1];
}
__global__ void blockScanBCAO(int *g_odata, int *g_idata, int n){
extern __shared__ int temp[];
int tid = threadIdx.x;
int offset = 1;
//load global input into shared memory
int ai = tid;
int bi = tid + (n/2);
int bankOffsetA = CONFLITCT_FREE_OFFSET(ai);
int bankOffsetB = CONFLITCT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = g_idata[ai];
temp[bi + bankOffsetB] = g_idata[bi];
// build sum in place up the tree
for(int d = n>>1; d>0; d>>=1){
__syncthreads();
if(tid<d){
int ai = offset *(2*tid+1)-1;
int bi = offset *(2*tid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *=2;
}
//clear the last element
if(tid==0)
temp[n-1 + CONFLICT_FREE_OFFSET(n-1)] = 0;
//traverse down tree & build scan
for(int d = 1; d<n; d*=2){
offset >>=1;
__syncthreads();
if(tid<d){
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//write results to device memory
g_odata[ai] = temp[ai + bankOffsetA];
g_odata[bi] = temp[bi + bankOffsetB];
}
__global__ void fullScan(){
}
__global__ void fullScanBCAO(){
}
int main(void){
return 0;
}
| 644216e8d6cfd3360b62f678780fb09c6f8d4ef8.cu | /*
* Name: Hiu Yin Tang
* Student ID: 1831535
* the Assignment goals achieved: block scan / full scan for large vectors / Bank conflict avoidance optimization (BCAO)
* time to execute the different scans on a vector of 10,000,000 entries:
* Block scan without BCAO: ms
* Block scan with BCAO: ms
* Full scan without BCAO: ms
* Full scan with BCAO: ms
* The model of CPU:
* The model of GPU:
* A short description of any implementation details or performance improvement strategies that you successfully
* implemented and which improve upon a base level implementation of the target goals.
*
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// A helper macro to simplify handling CUDA error checking
#define CUDA_ERROR( err, msg ) { \
if (err != cudaSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, cudaGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
#define NUM_BANKS 32
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n)\((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__ void blockScan(int *g_odata, int *g_idata, int n){
extern __shared__ int temp[];
int tid = threadIdx.x;
int offset = 1;
//load global input into shared memory
temp[2*tid] = g_idata[2*tid];
temp[2*tid+1] = g_idata[2*tid+1];
// build sum in place up the tree
for(int d = n>>1; d>0; d>>=1){
__syncthreads();
if(tid<d){
int ai = offset *(2*tid+1)-1;
int bi = offset *(2*tid+2)-1;
temp[bi] += temp[ai];
}
offset *=2;
}
//clear the last element
if(tid==0)
temp[n-1]=0;
//traverse down tree & build scan
for(int d = 1; d<n; d*=2){
offset >>=1;
__syncthreads();
if(tid<d){
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//write results to device memory
g_odata[2*tid] = temp[2*tid];
g_odata[2*tid+1] = temp[2*tid+1];
}
__global__ void blockScanBCAO(int *g_odata, int *g_idata, int n){
extern __shared__ int temp[];
int tid = threadIdx.x;
int offset = 1;
//load global input into shared memory
int ai = tid;
int bi = tid + (n/2);
int bankOffsetA = CONFLITCT_FREE_OFFSET(ai);
int bankOffsetB = CONFLITCT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = g_idata[ai];
temp[bi + bankOffsetB] = g_idata[bi];
// build sum in place up the tree
for(int d = n>>1; d>0; d>>=1){
__syncthreads();
if(tid<d){
int ai = offset *(2*tid+1)-1;
int bi = offset *(2*tid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *=2;
}
//clear the last element
if(tid==0)
temp[n-1 + CONFLICT_FREE_OFFSET(n-1)] = 0;
//traverse down tree & build scan
for(int d = 1; d<n; d*=2){
offset >>=1;
__syncthreads();
if(tid<d){
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//write results to device memory
g_odata[ai] = temp[ai + bankOffsetA];
g_odata[bi] = temp[bi + bankOffsetB];
}
__global__ void fullScan(){
}
__global__ void fullScanBCAO(){
}
int main(void){
return 0;
}
|
00903d64d68331e81c49dc0a69dfdbfc13cbcc69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ComputationalScheme/include/LBM/LatticeBoltzmannScheme.hpp>
typedef LatticeBoltzmannScheme::Cell Cell;
#include <string>
#include <exception>
#include <cmath>
/**
* @brief
*/
__host__ __device__ void initLBParams(LBParams* p)
{
STRUCT_DATA_TYPE W0 = 4.0 / 9.0;
STRUCT_DATA_TYPE Wx = 1.0 / 9.0;
STRUCT_DATA_TYPE Wxx = 1.0 / 36.0;
p->Cs2 = 1.0 / 3.0;
p->tau = 0.9;
p->c[0] = make_float2(0.0f, 0.0f); p->w[0] = W0;
p->c[1] = make_float2(1.0f, 0.0f); p->w[1] = Wx;
p->c[2] = make_float2(-1.0f, 0.0f); p->w[2] = Wx;
p->c[3] = make_float2(0.0f, 1.0f); p->w[3] = Wx;
p->c[4] = make_float2(0.0f, -1.0f); p->w[4] = Wx;
p->c[5] = make_float2(1.0f, 1.0f); p->w[5] = Wxx;
p->c[6] = make_float2(1.0f, -1.0f); p->w[6] = Wxx;
p->c[7] = make_float2(-1.0f, 1.0f); p->w[7] = Wxx;
p->c[8] = make_float2(-1.0f, -1.0f); p->w[8] = Wxx;
}
/**
* @brief
*/
__device__ void uploadTopBoundary(Cell* cu_field, Cell* cu_tb_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_x = threadIdx.x;
if(tid_y == 0) {
/// Global top border
blockCells[bid_x + 1] = cu_tb_halo[bid_x];
} else {
blockCells[bid_x + 1] = cu_field[(tid_y - 1) * N_X + tid_x];
}
}
/**
* @brief
*/
__device__ void uploadBottomBoundary(Cell* cu_field, Cell* cu_tb_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_x = threadIdx.x;
if(tid_y == N_Y - 1) {
/// Global bottom border
blockCells[(SHARED_Y - 1) * SHARED_X + bid_x + 1] = cu_tb_halo[N_X + bid_x];
} else {
blockCells[(SHARED_Y - 1) * SHARED_X + bid_x + 1] = cu_field[(tid_y + 1) * N_X + tid_x];
}
}
/**
* @brief
*/
__device__ void uploadLeftBoundary(Cell* cu_field, Cell* cu_lr_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
if(tid_x == 0) {
/// Global left border
blockCells[(bid_y + 1) * SHARED_X] = cu_lr_halo[bid_y];
} else {
blockCells[(bid_y + 1) * SHARED_X] = cu_field[tid_y * N_X + tid_x - 1];
}
}
/**
* @brief
*/
__device__ void uploadRightBoundary(Cell* cu_field, Cell* cu_lr_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
if(tid_x == N_X - 1) {
/// Global right border
blockCells[(bid_y + 2) * SHARED_X - 1] = cu_lr_halo[N_Y + bid_y];
} else {
blockCells[(bid_y + 2) * SHARED_X - 1] = cu_field[tid_y * N_X + tid_x + 1];
}
}
/**
* @brief
*/
__device__ void uploadDiagonalCells(Cell* cu_field, Cell* cu_tb_halo, Cell* cu_lr_halo,
Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.y == 0) {
/// First row
if(blockIdx.x == 0) {
/// First cell of the first row
blockCells[0] = cu_lrtb_halo[CU_LEFT_TOP_BORDER];
blockCells[SHARED_X - 1] = cu_tb_halo[tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_lr_halo[tid_y + SHARED_Y];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
} else if(blockIdx.x == blockDim.x - 1) {
/// Last cell of the first row
blockCells[0] = cu_tb_halo[tid_x - 1];
blockCells[SHARED_X - 1] = cu_lrtb_halo[CU_RIGHT_TOP_BORDER];;
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_lr_halo[N_Y + tid_y + SHARED_Y];
} else {
/// Internal cell of the first row
blockCells[0] = cu_tb_halo[tid_x - 1];
blockCells[SHARED_X - 1] = cu_tb_halo[tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
}
} else if(blockIdx.y == blockDim.y - 1) {
/// Last row
if(blockIdx.x == 0) {
/// First cell of the last row
blockCells[0] = cu_lr_halo[tid_y - 1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_lrtb_halo[CU_LEFT_BOTTOM_BORDER];
blockCells[SHARED_Y * SHARED_X - 1] = cu_tb_halo[N_X + tid_x + SHARED_X];
} else if(blockIdx.x == blockDim.x - 1) {
/// Last cell of the last row
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_lr_halo[N_Y + tid_y - 1];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_tb_halo[N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_lrtb_halo[CU_RIGHT_BOTTOM_BORDER];
} else {
/// Internal cell of the last row
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_tb_halo[N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_tb_halo[N_X + tid_x + SHARED_X];
}
} else {
/// Internal cell of the grid
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
}
}
/**
* @brief
*/
__device__ void initBlockCells(Cell* cu_field, Cell* cu_lr_halo, Cell* cu_tb_halo,
Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
/// Size of the dynamic shared memory is ((CUDA_X_THREADS+2) * (CUDA_Y_THREADS+2)) to take into
/// consideration the boundary values
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t bid_x = threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
/// Upload the internal part of the cell grid
blockCells[(bid_y + 1) * SHARED_X + bid_x + 1] = cu_field[tid_y * N_X + tid_x];
if(bid_y == 0)
uploadTopBoundary(cu_field, cu_tb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_y == blockDim.y - 1)
uploadBottomBoundary(cu_field, cu_tb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == 0)
uploadLeftBoundary(cu_field, cu_lr_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == blockDim.x - 1)
uploadRightBoundary(cu_field, cu_lr_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == 0 && bid_y == 0)
uploadDiagonalCells(cu_field, cu_tb_halo, cu_lr_halo, cu_lrtb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
}
/**
* @brief
*/
__device__ Cell* sCell(size_t x, size_t y)
{
extern __shared__ Cell blockCells[];
return &blockCells[(y + 1) * blockDim.x + x + 1];
}
/**
* @brief
*/
__host__ __device__ float dot_float2(const float2& a1, const float2& a2)
{
return a1.x * a2.x + a1.y * a2.y;
}
/**
* @brief
*/
__device__ void streamingStep(Cell* C, LBParams* P)
{
extern __shared__ Cell blockCells[];
//size_t x = threadIdx.x;
//size_t y = threadIdx.y;
STRUCT_DATA_TYPE r = 0.0;
//float2 u;
//STRUCT_DATA_TYPE p = 0.0;
/// Obtain values of mactoscopic parameters from populations of currounding cells
/// Compute density of the cell
for(size_t i = 0; i < DIRECTIONS_OF_INTERACTION; ++i) {
r += C->F[i];
}
/*C->r = C->F0 + sCell(x+1,y)->Fmx + sCell(x-1,y)->Fx + sCell(x,y+1)->Fmy + sCell(x,y-1)->Fy +
sCell(x-1,y-1)->Fxy + sCell(x+1,y-1)->Fmxy + sCell(x-1,y+1)->Fxmy + sCell(x+1,y+1)->Fmxmy;
/// Compute velocity of the cell
C->u = (P->c0.x * C->F0 + P->cmx.x * sCell(x+1,y)->Fmx + P->cx.x * sCell(x-1,y)->Fx +
P->cmy.x * sCell(x,y+1)->Fmy + P->cy.x * sCell(x,y-1)->Fy + P->cxy.x * sCell(x-1,y-1)->Fxy +
P->cmxy.x * sCell(x+1,y-1)->Fmxy + P->cxmy.x * sCell(x-1,y+1)->Fxmy +
P->cmxmy.x * sCell(x+1,y+1)->Fmxmy) / C->r;
C->v = (P->c0.y * C->F0 + P->cmx.y * sCell(x+1,y)->Fmx + P->cx.y * sCell(x-1,y)->Fx +
P->cmy.y * sCell(x,y+1)->Fmy + P->cy.y * sCell(x,y-1)->Fy + P->cxy.y * sCell(x-1,y-1)->Fxy +
P->cmxy.y * sCell(x+1,y-1)->Fmxy + P->cxmy.y * sCell(x-1,y+1)->Fxmy +
P->cmxmy.y * sCell(x+1,y+1)->Fmxmy) / C->r;
// Compute pressure of the cell
C->p = dot_float2(P->c0, P->c0) * C->F0 + dot_float2(P->cmx, P->cmx) * sCell(x+1,y)->Fmx +
dot_float2(P->cx, P->cx) * sCell(x-1,y)->Fx + dot_float2(P->cmy, P->cmy) * sCell(x,y+1)->Fmy +
dot_float2(P->cy, P->cy) * sCell(x,y-1)->Fy + dot_float2(P->cxy, P->cxy) * sCell(x-1,y-1)->Fxy +
dot_float2(P->cmxy, P->cmxy) * sCell(x+1,y-1)->Fmxy + dot_float2(P->cxmy, P->cxmy) * sCell(x-1,y+1)->Fxmy +
dot_float2(P->cmxmy, P->cmxmy) * sCell(x+1,y+1)->Fmxmy;*/
}
/**
* @brief
*/
__host__ __device__ STRUCT_DATA_TYPE computeFiEq(const STRUCT_DATA_TYPE& w, const STRUCT_DATA_TYPE& r, const float2 u,
const float2 c, const STRUCT_DATA_TYPE& Cs2)
{
STRUCT_DATA_TYPE dotCU = dot_float2(c, u);
return w * r * (1 + dotCU / Cs2 + dotCU * dotCU / 2 / Cs2 / Cs2 - dot_float2(u, u) / 2 / Cs2);
}
/**
* @brief
*/
__device__ void collisionStep(Cell* C, LBParams* P)
{
float2 u = make_float2(C->u,C->v);
/*C->F0 = C->F0 - (C->F0 - computeFiEq(P->W0, C->r, u, P->c0, P->Cs2)) / P->tau;
C->Fx = C->Fx - (C->Fx - computeFiEq(P->Wx, C->r, u, P->cx, P->Cs2)) / P->tau;
C->Fmx = C->Fmx - (C->Fmx - computeFiEq(P->Wx, C->r, u, P->cmx, P->Cs2)) / P->tau;
C->Fy = C->Fy - (C->Fy - computeFiEq(P->Wx, C->r, u, P->cy, P->Cs2)) / P->tau;
C->Fmy = C->Fmy - (C->Fmy - computeFiEq(P->Wx, C->r, u, P->cmy, P->Cs2)) / P->tau;
C->Fxy = C->Fxy - (C->Fxy - computeFiEq(P->Wxx, C->r, u, P->cxy, P->Cs2)) / P->tau;
C->Fmxy = C->Fmxy - (C->Fmxy - computeFiEq(P->Wxx, C->r, u, P->cmxy, P->Cs2)) / P->tau;
C->Fxmy = C->Fxmy - (C->Fxmy - computeFiEq(P->Wxx, C->r, u, P->cxmy, P->Cs2)) / P->tau;
C->Fmxmy = C->Fmxmy - (C->Fmxmy - computeFiEq(P->Wxx, C->r, u, P->cmxmy, P->Cs2)) / P->tau;*/
}
/**
* @brief
*/
__global__ void performGPUSimulationStep_kernel(Cell* cu_field, Cell* cu_lr_halo,
Cell* cu_tb_halo, Cell* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t SHARED_X, size_t SHARED_Y)
{
__shared__ LBParams P;
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
Cell* C = &cu_field[tid_y * N_X + tid_x];
if(tid_x == 0 && tid_y == 0)
initLBParams(&P);
__syncthreads();
/// Initialize the shared block Cells for faster memory accessible
initBlockCells(cu_field, cu_lr_halo, cu_tb_halo, cu_lrtb_halo,
N_X, N_Y, SHARED_X, SHARED_Y);
__syncthreads();
/// Streaming step (computation of local macroscopic parameters)
streamingStep(C, &P);
/// Synchronization of threads is not necessary since no interaction with
/// other threads is performed
/// Collision step (redistribution of the population)
collisionStep(C, &P);
}
__device__ void initBlockCellsBorders(Cell* cu_field, size_t N_X, size_t N_Y, size_t type)
{
extern __shared__ Cell blockCells[];
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t bid = threadIdx.x;
if(type == CU_LEFT_BORDER) {
blockCells[bid + 1] = cu_field[tid * N_X];
/// Upload two border elements
if(bid == 0) {
/** The first left border element has a diagonal neighbor which
* is located in the top border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the top border, the first element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != 0) {
blockCells[0] = cu_field[(tid - 1) * N_X];
} else {
blockCells[0] = cu_field[tid * N_X];
}
/** The last left border element has a diagonal neighbor which
* is located in the bottom border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the bottom border, the last element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x) * N_X];
} else {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x - 1) * N_X];
}
}
} else if(type == CU_RIGHT_BORDER) {
blockCells[bid] = cu_field[(tid + 1) * N_X - 1];
/// Upload two border elements
if(bid == 0) {
/** The first left border element has a diagonal neighbor which
* is located in the top border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the top border, the first element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != 0) {
blockCells[0] = cu_field[tid * N_X - 1];
} else {
blockCells[0] = cu_field[(tid + 1) * N_X - 1];
}
/** The last left border element has a diagonal neighbor which
* is located in the bottom border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the bottom border, the last element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x + 1) * N_X - 1];
} else {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x) * N_X - 1];
}
}
} else if(type == CU_TOP_BORDER) {
blockCells[bid] = cu_field[tid];
/// Upload two border elements
if(bid == 0) {
if(blockIdx.x != 0) {
blockCells[0] = cu_field[tid - 1];
} else {
blockCells[0] = cu_field[tid];
}
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[tid + gridDim.x];
} else {
blockCells[gridDim.x] = cu_field[tid + gridDim.x - 1];
}
}
} else if(type == CU_BOTTOM_BORDER) {
blockCells[bid] = cu_field[(N_Y - 1) * N_X + tid];
/// Upload two border elements
if(bid == 0) {
if(blockIdx.x != 0) {
blockCells[0] = cu_field[(N_Y - 1) * N_X + tid - 1];
} else {
blockCells[0] = cu_field[(N_Y - 1) * N_X + tid];
}
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(N_Y - 1) * N_X + tid + gridDim.x];
} else {
blockCells[gridDim.x] = cu_field[(N_Y - 1) * N_X + tid + gridDim.x - 1];
}
}
}
}
/**
* @brief
*/
__global__ void updateGPUGlobalBorders_kernel(Cell* cu_field, Cell* cu_lr_halo,
Cell* cu_tb_halo, Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t type)
{
extern __shared__ Cell blockCells[];
//size_t tid = threadIdx.x;
initBlockCellsBorders(cu_field, N_X, N_Y, type);
__syncthreads();
/*if(type == CU_LEFT_BORDER) {
cu_lr_halo[tid].Fx = blockCells[tid + 1].Fmx;
cu_lr_halo[tid].Fxy = blockCells[tid].Fmxmy;
cu_lr_halo[tid].Fxmy = blockCells[tid + 2].Fmxy;
} else if(type == CU_RIGHT_BORDER) {
cu_lr_halo[N_Y + tid].Fmx = blockCells[tid + 1].Fx;
cu_lr_halo[N_Y + tid].Fmxmy = blockCells[tid].Fxy;
cu_lr_halo[N_Y + tid].Fmxy = blockCells[tid + 2].Fxmy;
} else if(type == CU_TOP_BORDER) {
cu_tb_halo[tid].Fmy = blockCells[tid + 1].Fy;
cu_tb_halo[tid].Fxmy = blockCells[tid].Fmxy;
cu_tb_halo[tid].Fmxmy = blockCells[tid + 2].Fxy;
} else if(type == CU_BOTTOM_BORDER) {
cu_tb_halo[N_X + tid].Fy = blockCells[tid + 1].Fmy;
cu_tb_halo[N_X + tid].Fmxy = blockCells[tid].Fxmy;
cu_tb_halo[N_X + tid].Fxy = blockCells[tid + 2].Fmxmy;
} else if(type == CU_LEFT_TOP_BORDER) {
cu_lrtb_halo[LEFT_TOP_BORDER].Fxmy = cu_field[0].Fmxy;
} else if(type == CU_RIGHT_TOP_BORDER) {
cu_lrtb_halo[RIGHT_TOP_BORDER].Fmxmy = cu_field[N_X - 1].Fxy;
} else if(type == CU_LEFT_BOTTOM_BORDER) {
cu_lrtb_halo[LEFT_BOTTOM_BORDER].Fxy = cu_field[(N_Y - 1) * N_X].Fmxmy;
} else if(type == CU_RIGHT_BOTTOM_BORDER) {
cu_lrtb_halo[RIGHT_BOTTOM_BORDER].Fmxy = cu_field[N_Y * N_X - 1].Fxmy;
}*/
}
void* LatticeBoltzmannScheme::createField(size_t N_X, size_t N_Y)
{
return (void*)(new Cell[N_X * N_Y]);
}
void* LatticeBoltzmannScheme::createPageLockedField(size_t N_X, size_t N_Y)
{
Cell* ptr;
HANDLE_CUERROR_PTR( hipHostMalloc((void**)&ptr, N_X * N_Y * sizeof(Cell), hipHostMallocDefault) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::createGPUField(size_t N_X, size_t N_Y)
{
Cell* ptr;
HANDLE_CUERROR_PTR( hipMalloc((void**)&ptr, N_X * N_Y * sizeof(Cell)) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::initHalos(size_t N)
{
return (void*)(new Cell[N]);
}
void* LatticeBoltzmannScheme::initPageLockedHalos(size_t N)
{
Cell* ptr;
HANDLE_CUERROR_PTR( hipHostMalloc((void**)&ptr, N * sizeof(Cell), hipHostMallocDefault) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::initGPUHalos(size_t N)
{
Cell* ptr;
HANDLE_CUERROR_PTR( hipMalloc((void**)&ptr, N * sizeof(Cell)) );
return (void*)ptr;
}
ErrorStatus LatticeBoltzmannScheme::performGPUSimulationStep(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
return GPU_SUCCESS;
/*size_t SHARED_X = CUDA_X_THREADS + 2;
size_t SHARED_Y = CUDA_Y_THREADS + 2;
size_t SharedMemoryPerBlock = SHARED_X * SHARED_Y * sizeof(Cell);
float blocksPerSM = ceil((float)CUDA_X_BLOCKS * (float)CUDA_Y_BLOCKS / (float)amountSMs);
size_t totalSharedMemoryPerBlock = ceil((float)totalSharedMemoryPerSM / blocksPerSM);*/
/// Check if there is enough shared memory
/*if(totalSharedMemoryPerBlock < SharedMemoryPerBlock) {
errorString = std::string("Trying to allocate too much CUDA shared memory: ") +
std::to_string(totalSharedMemoryPerBlock) + std::string(" bytes is available per block, ") +
std::to_string(SharedMemoryPerBlock) + std::string(" bytes per block is requested!");
return GPU_ERROR;
}*/
/*hipStream_t* cuStream = (hipStream_t*)stream;
/// Launch the CUDA kernel
performGPUSimulationStep_kernel <<< dim3(CUDA_X_BLOCKS, CUDA_Y_BLOCKS, 1),
dim3(CUDA_X_THREADS, CUDA_Y_THREADS, 1), SharedMemoryPerBlock,
*cuStream >>> ((Cell*)cu_field, (Cell*)cu_lr_halo, (Cell*)cu_tb_halo,
(Cell*)cu_lrtb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
/// Check if the kernel executed without errors
lastCudaError = hipGetLastError();
if(lastCudaError != hipSuccess) {
errorString = std::string("performGPUSimulationStep: ") +
std::string(hipGetErrorString(lastCudaError));
return GPU_ERROR;
}
return GPU_SUCCESS;*/
}
ErrorStatus LatticeBoltzmannScheme::updateGPUGlobalBorders(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t type, size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
return GPU_SUCCESS;
/*/// Calculate the amount of shared memory that is required for the kernel
size_t sharedMemory = 0;
if(type == CU_LEFT_BORDER) {
sharedMemory = (N_Y + 2) * sizeof(Cell);
} else if(type == CU_RIGHT_BORDER) {
sharedMemory = (N_Y + 2) * sizeof(Cell);
} else if(type == CU_TOP_BORDER) {
sharedMemory = (N_X + 2) * sizeof(Cell);
} else if(type == CU_BOTTOM_BORDER) {
sharedMemory = (N_X + 2) * sizeof(Cell);
}
hipStream_t* cuStream = (hipStream_t*)stream;
/// Launch the CUDA kernel
hipLaunchKernelGGL(( updateGPUGlobalBorders_kernel) , dim3(dim3(CUDA_X_BLOCKS, CUDA_Y_BLOCKS, 1)),
dim3( dim3(CUDA_X_THREADS, CUDA_Y_THREADS, 1)), sharedMemory,
*cuStream , (Cell*)cu_field, (Cell*)cu_lr_halo, (Cell*)cu_tb_halo,
(Cell*)cu_lrtb_halo, N_X, N_Y, type);
/// Check if the kernel executed without errors
lastCudaError = hipGetLastError();
if(lastCudaError != hipSuccess) {
errorString = std::string("updateGPUGlobalBorders: ") +
std::string(hipGetErrorString(lastCudaError));
return GPU_ERROR;
}
return GPU_SUCCESS;*/
}
void* LatticeBoltzmannScheme::getMarkerValue()
{
return (void*)(&marker);
}
#ifdef __DEBUG__
ErrorStatus LatticeBoltzmannScheme::dbg_performSimulationStep(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
Cell* field = (Cell*)cu_field;
Cell* lr_halo = (Cell*)cu_lr_halo;
Cell* tb_halo = (Cell*)cu_tb_halo;
Cell* lrtb_halo = (Cell*)cu_lrtb_halo;
LBParams P;
initLBParams(&P);
dbg_streamingStep(field, lr_halo, tb_halo, lrtb_halo, N_X, N_Y, &P);
dbg_collisionStep(field, lr_halo, tb_halo, lrtb_halo, N_X, N_Y, &P);
return GPU_SUCCESS;
}
ErrorStatus LatticeBoltzmannScheme::dbg_updateGlobalBorders(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t type, size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
Cell* field = (Cell*)cu_field;
Cell* lr_halo = (Cell*)cu_lr_halo;
Cell* tb_halo = (Cell*)cu_tb_halo;
Cell* lrtb_halo = (Cell*)cu_lrtb_halo;
size_t F_0 = 0, F_X = 1, F_mX = 2, F_Y = 3, F_mY = 4,
F_XY = 5, F_XmY = 6, F_mXY = 7, F_mXmY = 8;
if(type == CU_LEFT_BORDER) {
for(int y = 0; y < N_Y; ++y) {
lr_halo[y].F[F_X] = getCurCell(field, 0, y, N_X, N_Y)->F[F_mX];
lr_halo[y].F[F_XY] = y != 0 ? getCurCell(field, 0, y-1, N_X, N_Y)->F[F_mXmY] : 0;
lr_halo[y].F[F_XmY] = y != N_Y-1 ? getCurCell(field, 0, y+1, N_X, N_Y)->F[F_mXY] : 0;
}
} else if(type == CU_RIGHT_BORDER) {
for(int y = 0; y < N_Y; ++y) {
lr_halo[N_Y + y].F[F_mX] = getCurCell(field, N_X-1, y, N_X, N_Y)->F[F_X];
lr_halo[N_Y + y].F[F_mXmY] = y != 0 ? getCurCell(field, N_X-1, y-1, N_X, N_Y)->F[F_XY] : 0;
lr_halo[N_Y + y].F[F_mXY] = y != N_Y-1 ? getCurCell(field, N_X-1, y+1, N_X, N_Y)->F[F_XmY] : 0;
}
} else if(type == CU_TOP_BORDER) {
for(int x = 0; x < N_X; ++x) {
tb_halo[x].F[F_mY] = getCurCell(field, x, 0, N_X, N_Y)->F[F_Y];
tb_halo[x].F[F_XmY] = x != N_X-1 ? getCurCell(field, x+1, 0, N_X, N_Y)->F[F_mXY] : 0;
tb_halo[x].F[F_mXmY] = x != 0 ? getCurCell(field, x-1, 0, N_X, N_Y)->F[F_XY] : 0;
}
} else if(type == CU_BOTTOM_BORDER) {
for(int x = 0; x < N_X; ++x) {
tb_halo[N_X + x].F[F_Y] = getCurCell(field, x, N_Y-1, N_X, N_Y)->F[F_mY];
tb_halo[N_X + x].F[F_XY] = x != N_X-1 ? getCurCell(field, x+1, N_Y-1, N_X, N_Y)->F[F_mXmY] : 0;
tb_halo[N_X + x].F[F_mXY] = x != 0 ? getCurCell(field, x-1, N_Y-1, N_X, N_Y)->F[F_XmY] : 0;
}
} else if(type == CU_LEFT_TOP_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_XmY] = getCurCell(field, 0, 0, N_X, N_Y)->F[F_mXY];
} else if(type == CU_RIGHT_TOP_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_mXmY] = getCurCell(field, N_X - 1, 0, N_X, N_Y)->F[F_XY];
} else if(type == CU_LEFT_BOTTOM_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_XY] = getCurCell(field, 0, N_Y - 1, N_X, N_Y)->F[F_mXmY];
} else if(type == CU_RIGHT_BOTTOM_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_mXY] = getCurCell(field, N_X - 1, N_Y - 1, N_X, N_Y)->F[F_XmY];
}
return GPU_SUCCESS;
}
#endif
| 00903d64d68331e81c49dc0a69dfdbfc13cbcc69.cu | #include <ComputationalScheme/include/LBM/LatticeBoltzmannScheme.hpp>
typedef LatticeBoltzmannScheme::Cell Cell;
#include <string>
#include <exception>
#include <cmath>
/**
* @brief
*/
__host__ __device__ void initLBParams(LBParams* p)
{
STRUCT_DATA_TYPE W0 = 4.0 / 9.0;
STRUCT_DATA_TYPE Wx = 1.0 / 9.0;
STRUCT_DATA_TYPE Wxx = 1.0 / 36.0;
p->Cs2 = 1.0 / 3.0;
p->tau = 0.9;
p->c[0] = make_float2(0.0f, 0.0f); p->w[0] = W0;
p->c[1] = make_float2(1.0f, 0.0f); p->w[1] = Wx;
p->c[2] = make_float2(-1.0f, 0.0f); p->w[2] = Wx;
p->c[3] = make_float2(0.0f, 1.0f); p->w[3] = Wx;
p->c[4] = make_float2(0.0f, -1.0f); p->w[4] = Wx;
p->c[5] = make_float2(1.0f, 1.0f); p->w[5] = Wxx;
p->c[6] = make_float2(1.0f, -1.0f); p->w[6] = Wxx;
p->c[7] = make_float2(-1.0f, 1.0f); p->w[7] = Wxx;
p->c[8] = make_float2(-1.0f, -1.0f); p->w[8] = Wxx;
}
/**
* @brief
*/
__device__ void uploadTopBoundary(Cell* cu_field, Cell* cu_tb_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_x = threadIdx.x;
if(tid_y == 0) {
/// Global top border
blockCells[bid_x + 1] = cu_tb_halo[bid_x];
} else {
blockCells[bid_x + 1] = cu_field[(tid_y - 1) * N_X + tid_x];
}
}
/**
* @brief
*/
__device__ void uploadBottomBoundary(Cell* cu_field, Cell* cu_tb_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_x = threadIdx.x;
if(tid_y == N_Y - 1) {
/// Global bottom border
blockCells[(SHARED_Y - 1) * SHARED_X + bid_x + 1] = cu_tb_halo[N_X + bid_x];
} else {
blockCells[(SHARED_Y - 1) * SHARED_X + bid_x + 1] = cu_field[(tid_y + 1) * N_X + tid_x];
}
}
/**
* @brief
*/
__device__ void uploadLeftBoundary(Cell* cu_field, Cell* cu_lr_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
if(tid_x == 0) {
/// Global left border
blockCells[(bid_y + 1) * SHARED_X] = cu_lr_halo[bid_y];
} else {
blockCells[(bid_y + 1) * SHARED_X] = cu_field[tid_y * N_X + tid_x - 1];
}
}
/**
* @brief
*/
__device__ void uploadRightBoundary(Cell* cu_field, Cell* cu_lr_halo, size_t N_X,
size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
if(tid_x == N_X - 1) {
/// Global right border
blockCells[(bid_y + 2) * SHARED_X - 1] = cu_lr_halo[N_Y + bid_y];
} else {
blockCells[(bid_y + 2) * SHARED_X - 1] = cu_field[tid_y * N_X + tid_x + 1];
}
}
/**
* @brief
*/
__device__ void uploadDiagonalCells(Cell* cu_field, Cell* cu_tb_halo, Cell* cu_lr_halo,
Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.y == 0) {
/// First row
if(blockIdx.x == 0) {
/// First cell of the first row
blockCells[0] = cu_lrtb_halo[CU_LEFT_TOP_BORDER];
blockCells[SHARED_X - 1] = cu_tb_halo[tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_lr_halo[tid_y + SHARED_Y];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
} else if(blockIdx.x == blockDim.x - 1) {
/// Last cell of the first row
blockCells[0] = cu_tb_halo[tid_x - 1];
blockCells[SHARED_X - 1] = cu_lrtb_halo[CU_RIGHT_TOP_BORDER];;
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_lr_halo[N_Y + tid_y + SHARED_Y];
} else {
/// Internal cell of the first row
blockCells[0] = cu_tb_halo[tid_x - 1];
blockCells[SHARED_X - 1] = cu_tb_halo[tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
}
} else if(blockIdx.y == blockDim.y - 1) {
/// Last row
if(blockIdx.x == 0) {
/// First cell of the last row
blockCells[0] = cu_lr_halo[tid_y - 1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_lrtb_halo[CU_LEFT_BOTTOM_BORDER];
blockCells[SHARED_Y * SHARED_X - 1] = cu_tb_halo[N_X + tid_x + SHARED_X];
} else if(blockIdx.x == blockDim.x - 1) {
/// Last cell of the last row
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_lr_halo[N_Y + tid_y - 1];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_tb_halo[N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_lrtb_halo[CU_RIGHT_BOTTOM_BORDER];
} else {
/// Internal cell of the last row
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_tb_halo[N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_tb_halo[N_X + tid_x + SHARED_X];
}
} else {
/// Internal cell of the grid
blockCells[0] = cu_field[(tid_y - 1) * N_X + tid_x-1];
blockCells[SHARED_X - 1] = cu_field[(tid_y - 1) * N_X + tid_x + SHARED_X];
blockCells[(SHARED_Y - 1) * SHARED_X] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x - 1];
blockCells[SHARED_Y * SHARED_X - 1] = cu_field[(tid_y + SHARED_Y) * N_X + tid_x + SHARED_X];
}
}
/**
* @brief
*/
__device__ void initBlockCells(Cell* cu_field, Cell* cu_lr_halo, Cell* cu_tb_halo,
Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t SHARED_X, size_t SHARED_Y)
{
/// Size of the dynamic shared memory is ((CUDA_X_THREADS+2) * (CUDA_Y_THREADS+2)) to take into
/// consideration the boundary values
extern __shared__ Cell blockCells[];
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t bid_x = threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
size_t bid_y = threadIdx.y;
/// Upload the internal part of the cell grid
blockCells[(bid_y + 1) * SHARED_X + bid_x + 1] = cu_field[tid_y * N_X + tid_x];
if(bid_y == 0)
uploadTopBoundary(cu_field, cu_tb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_y == blockDim.y - 1)
uploadBottomBoundary(cu_field, cu_tb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == 0)
uploadLeftBoundary(cu_field, cu_lr_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == blockDim.x - 1)
uploadRightBoundary(cu_field, cu_lr_halo, N_X, N_Y, SHARED_X, SHARED_Y);
if(bid_x == 0 && bid_y == 0)
uploadDiagonalCells(cu_field, cu_tb_halo, cu_lr_halo, cu_lrtb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
}
/**
* @brief
*/
__device__ Cell* sCell(size_t x, size_t y)
{
extern __shared__ Cell blockCells[];
return &blockCells[(y + 1) * blockDim.x + x + 1];
}
/**
* @brief
*/
__host__ __device__ float dot_float2(const float2& a1, const float2& a2)
{
return a1.x * a2.x + a1.y * a2.y;
}
/**
* @brief
*/
__device__ void streamingStep(Cell* C, LBParams* P)
{
extern __shared__ Cell blockCells[];
//size_t x = threadIdx.x;
//size_t y = threadIdx.y;
STRUCT_DATA_TYPE r = 0.0;
//float2 u;
//STRUCT_DATA_TYPE p = 0.0;
/// Obtain values of mactoscopic parameters from populations of currounding cells
/// Compute density of the cell
for(size_t i = 0; i < DIRECTIONS_OF_INTERACTION; ++i) {
r += C->F[i];
}
/*C->r = C->F0 + sCell(x+1,y)->Fmx + sCell(x-1,y)->Fx + sCell(x,y+1)->Fmy + sCell(x,y-1)->Fy +
sCell(x-1,y-1)->Fxy + sCell(x+1,y-1)->Fmxy + sCell(x-1,y+1)->Fxmy + sCell(x+1,y+1)->Fmxmy;
/// Compute velocity of the cell
C->u = (P->c0.x * C->F0 + P->cmx.x * sCell(x+1,y)->Fmx + P->cx.x * sCell(x-1,y)->Fx +
P->cmy.x * sCell(x,y+1)->Fmy + P->cy.x * sCell(x,y-1)->Fy + P->cxy.x * sCell(x-1,y-1)->Fxy +
P->cmxy.x * sCell(x+1,y-1)->Fmxy + P->cxmy.x * sCell(x-1,y+1)->Fxmy +
P->cmxmy.x * sCell(x+1,y+1)->Fmxmy) / C->r;
C->v = (P->c0.y * C->F0 + P->cmx.y * sCell(x+1,y)->Fmx + P->cx.y * sCell(x-1,y)->Fx +
P->cmy.y * sCell(x,y+1)->Fmy + P->cy.y * sCell(x,y-1)->Fy + P->cxy.y * sCell(x-1,y-1)->Fxy +
P->cmxy.y * sCell(x+1,y-1)->Fmxy + P->cxmy.y * sCell(x-1,y+1)->Fxmy +
P->cmxmy.y * sCell(x+1,y+1)->Fmxmy) / C->r;
// Compute pressure of the cell
C->p = dot_float2(P->c0, P->c0) * C->F0 + dot_float2(P->cmx, P->cmx) * sCell(x+1,y)->Fmx +
dot_float2(P->cx, P->cx) * sCell(x-1,y)->Fx + dot_float2(P->cmy, P->cmy) * sCell(x,y+1)->Fmy +
dot_float2(P->cy, P->cy) * sCell(x,y-1)->Fy + dot_float2(P->cxy, P->cxy) * sCell(x-1,y-1)->Fxy +
dot_float2(P->cmxy, P->cmxy) * sCell(x+1,y-1)->Fmxy + dot_float2(P->cxmy, P->cxmy) * sCell(x-1,y+1)->Fxmy +
dot_float2(P->cmxmy, P->cmxmy) * sCell(x+1,y+1)->Fmxmy;*/
}
/**
* @brief
*/
__host__ __device__ STRUCT_DATA_TYPE computeFiEq(const STRUCT_DATA_TYPE& w, const STRUCT_DATA_TYPE& r, const float2 u,
const float2 c, const STRUCT_DATA_TYPE& Cs2)
{
STRUCT_DATA_TYPE dotCU = dot_float2(c, u);
return w * r * (1 + dotCU / Cs2 + dotCU * dotCU / 2 / Cs2 / Cs2 - dot_float2(u, u) / 2 / Cs2);
}
/**
* @brief
*/
__device__ void collisionStep(Cell* C, LBParams* P)
{
float2 u = make_float2(C->u,C->v);
/*C->F0 = C->F0 - (C->F0 - computeFiEq(P->W0, C->r, u, P->c0, P->Cs2)) / P->tau;
C->Fx = C->Fx - (C->Fx - computeFiEq(P->Wx, C->r, u, P->cx, P->Cs2)) / P->tau;
C->Fmx = C->Fmx - (C->Fmx - computeFiEq(P->Wx, C->r, u, P->cmx, P->Cs2)) / P->tau;
C->Fy = C->Fy - (C->Fy - computeFiEq(P->Wx, C->r, u, P->cy, P->Cs2)) / P->tau;
C->Fmy = C->Fmy - (C->Fmy - computeFiEq(P->Wx, C->r, u, P->cmy, P->Cs2)) / P->tau;
C->Fxy = C->Fxy - (C->Fxy - computeFiEq(P->Wxx, C->r, u, P->cxy, P->Cs2)) / P->tau;
C->Fmxy = C->Fmxy - (C->Fmxy - computeFiEq(P->Wxx, C->r, u, P->cmxy, P->Cs2)) / P->tau;
C->Fxmy = C->Fxmy - (C->Fxmy - computeFiEq(P->Wxx, C->r, u, P->cxmy, P->Cs2)) / P->tau;
C->Fmxmy = C->Fmxmy - (C->Fmxmy - computeFiEq(P->Wxx, C->r, u, P->cmxmy, P->Cs2)) / P->tau;*/
}
/**
* @brief
*/
__global__ void performGPUSimulationStep_kernel(Cell* cu_field, Cell* cu_lr_halo,
Cell* cu_tb_halo, Cell* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t SHARED_X, size_t SHARED_Y)
{
__shared__ LBParams P;
size_t tid_x = blockIdx.x * blockDim.x + threadIdx.x;
size_t tid_y = blockIdx.y * blockDim.y + threadIdx.y;
Cell* C = &cu_field[tid_y * N_X + tid_x];
if(tid_x == 0 && tid_y == 0)
initLBParams(&P);
__syncthreads();
/// Initialize the shared block Cells for faster memory accessible
initBlockCells(cu_field, cu_lr_halo, cu_tb_halo, cu_lrtb_halo,
N_X, N_Y, SHARED_X, SHARED_Y);
__syncthreads();
/// Streaming step (computation of local macroscopic parameters)
streamingStep(C, &P);
/// Synchronization of threads is not necessary since no interaction with
/// other threads is performed
/// Collision step (redistribution of the population)
collisionStep(C, &P);
}
__device__ void initBlockCellsBorders(Cell* cu_field, size_t N_X, size_t N_Y, size_t type)
{
extern __shared__ Cell blockCells[];
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t bid = threadIdx.x;
if(type == CU_LEFT_BORDER) {
blockCells[bid + 1] = cu_field[tid * N_X];
/// Upload two border elements
if(bid == 0) {
/** The first left border element has a diagonal neighbor which
* is located in the top border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the top border, the first element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != 0) {
blockCells[0] = cu_field[(tid - 1) * N_X];
} else {
blockCells[0] = cu_field[tid * N_X];
}
/** The last left border element has a diagonal neighbor which
* is located in the bottom border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the bottom border, the last element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x) * N_X];
} else {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x - 1) * N_X];
}
}
} else if(type == CU_RIGHT_BORDER) {
blockCells[bid] = cu_field[(tid + 1) * N_X - 1];
/// Upload two border elements
if(bid == 0) {
/** The first left border element has a diagonal neighbor which
* is located in the top border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the top border, the first element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != 0) {
blockCells[0] = cu_field[tid * N_X - 1];
} else {
blockCells[0] = cu_field[(tid + 1) * N_X - 1];
}
/** The last left border element has a diagonal neighbor which
* is located in the bottom border. Since this interaction is not
* important for computations, we will use insted of the
* neighbor in the bottom border, the last element in the field
* (to fill in the space for easier logic)
*/
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x + 1) * N_X - 1];
} else {
blockCells[gridDim.x] = cu_field[(tid + gridDim.x) * N_X - 1];
}
}
} else if(type == CU_TOP_BORDER) {
blockCells[bid] = cu_field[tid];
/// Upload two border elements
if(bid == 0) {
if(blockIdx.x != 0) {
blockCells[0] = cu_field[tid - 1];
} else {
blockCells[0] = cu_field[tid];
}
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[tid + gridDim.x];
} else {
blockCells[gridDim.x] = cu_field[tid + gridDim.x - 1];
}
}
} else if(type == CU_BOTTOM_BORDER) {
blockCells[bid] = cu_field[(N_Y - 1) * N_X + tid];
/// Upload two border elements
if(bid == 0) {
if(blockIdx.x != 0) {
blockCells[0] = cu_field[(N_Y - 1) * N_X + tid - 1];
} else {
blockCells[0] = cu_field[(N_Y - 1) * N_X + tid];
}
if(blockIdx.x != gridDim.x - 1) {
blockCells[gridDim.x] = cu_field[(N_Y - 1) * N_X + tid + gridDim.x];
} else {
blockCells[gridDim.x] = cu_field[(N_Y - 1) * N_X + tid + gridDim.x - 1];
}
}
}
}
/**
* @brief
*/
__global__ void updateGPUGlobalBorders_kernel(Cell* cu_field, Cell* cu_lr_halo,
Cell* cu_tb_halo, Cell* cu_lrtb_halo, size_t N_X, size_t N_Y, size_t type)
{
extern __shared__ Cell blockCells[];
//size_t tid = threadIdx.x;
initBlockCellsBorders(cu_field, N_X, N_Y, type);
__syncthreads();
/*if(type == CU_LEFT_BORDER) {
cu_lr_halo[tid].Fx = blockCells[tid + 1].Fmx;
cu_lr_halo[tid].Fxy = blockCells[tid].Fmxmy;
cu_lr_halo[tid].Fxmy = blockCells[tid + 2].Fmxy;
} else if(type == CU_RIGHT_BORDER) {
cu_lr_halo[N_Y + tid].Fmx = blockCells[tid + 1].Fx;
cu_lr_halo[N_Y + tid].Fmxmy = blockCells[tid].Fxy;
cu_lr_halo[N_Y + tid].Fmxy = blockCells[tid + 2].Fxmy;
} else if(type == CU_TOP_BORDER) {
cu_tb_halo[tid].Fmy = blockCells[tid + 1].Fy;
cu_tb_halo[tid].Fxmy = blockCells[tid].Fmxy;
cu_tb_halo[tid].Fmxmy = blockCells[tid + 2].Fxy;
} else if(type == CU_BOTTOM_BORDER) {
cu_tb_halo[N_X + tid].Fy = blockCells[tid + 1].Fmy;
cu_tb_halo[N_X + tid].Fmxy = blockCells[tid].Fxmy;
cu_tb_halo[N_X + tid].Fxy = blockCells[tid + 2].Fmxmy;
} else if(type == CU_LEFT_TOP_BORDER) {
cu_lrtb_halo[LEFT_TOP_BORDER].Fxmy = cu_field[0].Fmxy;
} else if(type == CU_RIGHT_TOP_BORDER) {
cu_lrtb_halo[RIGHT_TOP_BORDER].Fmxmy = cu_field[N_X - 1].Fxy;
} else if(type == CU_LEFT_BOTTOM_BORDER) {
cu_lrtb_halo[LEFT_BOTTOM_BORDER].Fxy = cu_field[(N_Y - 1) * N_X].Fmxmy;
} else if(type == CU_RIGHT_BOTTOM_BORDER) {
cu_lrtb_halo[RIGHT_BOTTOM_BORDER].Fmxy = cu_field[N_Y * N_X - 1].Fxmy;
}*/
}
void* LatticeBoltzmannScheme::createField(size_t N_X, size_t N_Y)
{
return (void*)(new Cell[N_X * N_Y]);
}
void* LatticeBoltzmannScheme::createPageLockedField(size_t N_X, size_t N_Y)
{
Cell* ptr;
HANDLE_CUERROR_PTR( cudaHostAlloc((void**)&ptr, N_X * N_Y * sizeof(Cell), cudaHostAllocDefault) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::createGPUField(size_t N_X, size_t N_Y)
{
Cell* ptr;
HANDLE_CUERROR_PTR( cudaMalloc((void**)&ptr, N_X * N_Y * sizeof(Cell)) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::initHalos(size_t N)
{
return (void*)(new Cell[N]);
}
void* LatticeBoltzmannScheme::initPageLockedHalos(size_t N)
{
Cell* ptr;
HANDLE_CUERROR_PTR( cudaHostAlloc((void**)&ptr, N * sizeof(Cell), cudaHostAllocDefault) );
return (void*)ptr;
}
void* LatticeBoltzmannScheme::initGPUHalos(size_t N)
{
Cell* ptr;
HANDLE_CUERROR_PTR( cudaMalloc((void**)&ptr, N * sizeof(Cell)) );
return (void*)ptr;
}
ErrorStatus LatticeBoltzmannScheme::performGPUSimulationStep(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
return GPU_SUCCESS;
/*size_t SHARED_X = CUDA_X_THREADS + 2;
size_t SHARED_Y = CUDA_Y_THREADS + 2;
size_t SharedMemoryPerBlock = SHARED_X * SHARED_Y * sizeof(Cell);
float blocksPerSM = ceil((float)CUDA_X_BLOCKS * (float)CUDA_Y_BLOCKS / (float)amountSMs);
size_t totalSharedMemoryPerBlock = ceil((float)totalSharedMemoryPerSM / blocksPerSM);*/
/// Check if there is enough shared memory
/*if(totalSharedMemoryPerBlock < SharedMemoryPerBlock) {
errorString = std::string("Trying to allocate too much CUDA shared memory: ") +
std::to_string(totalSharedMemoryPerBlock) + std::string(" bytes is available per block, ") +
std::to_string(SharedMemoryPerBlock) + std::string(" bytes per block is requested!");
return GPU_ERROR;
}*/
/*cudaStream_t* cuStream = (cudaStream_t*)stream;
/// Launch the CUDA kernel
performGPUSimulationStep_kernel <<< dim3(CUDA_X_BLOCKS, CUDA_Y_BLOCKS, 1),
dim3(CUDA_X_THREADS, CUDA_Y_THREADS, 1), SharedMemoryPerBlock,
*cuStream >>> ((Cell*)cu_field, (Cell*)cu_lr_halo, (Cell*)cu_tb_halo,
(Cell*)cu_lrtb_halo, N_X, N_Y, SHARED_X, SHARED_Y);
/// Check if the kernel executed without errors
lastCudaError = cudaGetLastError();
if(lastCudaError != cudaSuccess) {
errorString = std::string("performGPUSimulationStep: ") +
std::string(cudaGetErrorString(lastCudaError));
return GPU_ERROR;
}
return GPU_SUCCESS;*/
}
ErrorStatus LatticeBoltzmannScheme::updateGPUGlobalBorders(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t type, size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
return GPU_SUCCESS;
/*/// Calculate the amount of shared memory that is required for the kernel
size_t sharedMemory = 0;
if(type == CU_LEFT_BORDER) {
sharedMemory = (N_Y + 2) * sizeof(Cell);
} else if(type == CU_RIGHT_BORDER) {
sharedMemory = (N_Y + 2) * sizeof(Cell);
} else if(type == CU_TOP_BORDER) {
sharedMemory = (N_X + 2) * sizeof(Cell);
} else if(type == CU_BOTTOM_BORDER) {
sharedMemory = (N_X + 2) * sizeof(Cell);
}
cudaStream_t* cuStream = (cudaStream_t*)stream;
/// Launch the CUDA kernel
updateGPUGlobalBorders_kernel <<< dim3(CUDA_X_BLOCKS, CUDA_Y_BLOCKS, 1),
dim3(CUDA_X_THREADS, CUDA_Y_THREADS, 1), sharedMemory,
*cuStream >>> ((Cell*)cu_field, (Cell*)cu_lr_halo, (Cell*)cu_tb_halo,
(Cell*)cu_lrtb_halo, N_X, N_Y, type);
/// Check if the kernel executed without errors
lastCudaError = cudaGetLastError();
if(lastCudaError != cudaSuccess) {
errorString = std::string("updateGPUGlobalBorders: ") +
std::string(cudaGetErrorString(lastCudaError));
return GPU_ERROR;
}
return GPU_SUCCESS;*/
}
void* LatticeBoltzmannScheme::getMarkerValue()
{
return (void*)(&marker);
}
#ifdef __DEBUG__
ErrorStatus LatticeBoltzmannScheme::dbg_performSimulationStep(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
Cell* field = (Cell*)cu_field;
Cell* lr_halo = (Cell*)cu_lr_halo;
Cell* tb_halo = (Cell*)cu_tb_halo;
Cell* lrtb_halo = (Cell*)cu_lrtb_halo;
LBParams P;
initLBParams(&P);
dbg_streamingStep(field, lr_halo, tb_halo, lrtb_halo, N_X, N_Y, &P);
dbg_collisionStep(field, lr_halo, tb_halo, lrtb_halo, N_X, N_Y, &P);
return GPU_SUCCESS;
}
ErrorStatus LatticeBoltzmannScheme::dbg_updateGlobalBorders(void* cu_field, void* cu_lr_halo,
void* cu_tb_halo, void* cu_lrtb_halo, size_t N_X, size_t N_Y,
size_t type, size_t CUDA_X_THREADS, size_t CUDA_Y_THREADS, void* stream)
{
Cell* field = (Cell*)cu_field;
Cell* lr_halo = (Cell*)cu_lr_halo;
Cell* tb_halo = (Cell*)cu_tb_halo;
Cell* lrtb_halo = (Cell*)cu_lrtb_halo;
size_t F_0 = 0, F_X = 1, F_mX = 2, F_Y = 3, F_mY = 4,
F_XY = 5, F_XmY = 6, F_mXY = 7, F_mXmY = 8;
if(type == CU_LEFT_BORDER) {
for(int y = 0; y < N_Y; ++y) {
lr_halo[y].F[F_X] = getCurCell(field, 0, y, N_X, N_Y)->F[F_mX];
lr_halo[y].F[F_XY] = y != 0 ? getCurCell(field, 0, y-1, N_X, N_Y)->F[F_mXmY] : 0;
lr_halo[y].F[F_XmY] = y != N_Y-1 ? getCurCell(field, 0, y+1, N_X, N_Y)->F[F_mXY] : 0;
}
} else if(type == CU_RIGHT_BORDER) {
for(int y = 0; y < N_Y; ++y) {
lr_halo[N_Y + y].F[F_mX] = getCurCell(field, N_X-1, y, N_X, N_Y)->F[F_X];
lr_halo[N_Y + y].F[F_mXmY] = y != 0 ? getCurCell(field, N_X-1, y-1, N_X, N_Y)->F[F_XY] : 0;
lr_halo[N_Y + y].F[F_mXY] = y != N_Y-1 ? getCurCell(field, N_X-1, y+1, N_X, N_Y)->F[F_XmY] : 0;
}
} else if(type == CU_TOP_BORDER) {
for(int x = 0; x < N_X; ++x) {
tb_halo[x].F[F_mY] = getCurCell(field, x, 0, N_X, N_Y)->F[F_Y];
tb_halo[x].F[F_XmY] = x != N_X-1 ? getCurCell(field, x+1, 0, N_X, N_Y)->F[F_mXY] : 0;
tb_halo[x].F[F_mXmY] = x != 0 ? getCurCell(field, x-1, 0, N_X, N_Y)->F[F_XY] : 0;
}
} else if(type == CU_BOTTOM_BORDER) {
for(int x = 0; x < N_X; ++x) {
tb_halo[N_X + x].F[F_Y] = getCurCell(field, x, N_Y-1, N_X, N_Y)->F[F_mY];
tb_halo[N_X + x].F[F_XY] = x != N_X-1 ? getCurCell(field, x+1, N_Y-1, N_X, N_Y)->F[F_mXmY] : 0;
tb_halo[N_X + x].F[F_mXY] = x != 0 ? getCurCell(field, x-1, N_Y-1, N_X, N_Y)->F[F_XmY] : 0;
}
} else if(type == CU_LEFT_TOP_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_XmY] = getCurCell(field, 0, 0, N_X, N_Y)->F[F_mXY];
} else if(type == CU_RIGHT_TOP_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_mXmY] = getCurCell(field, N_X - 1, 0, N_X, N_Y)->F[F_XY];
} else if(type == CU_LEFT_BOTTOM_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_XY] = getCurCell(field, 0, N_Y - 1, N_X, N_Y)->F[F_mXmY];
} else if(type == CU_RIGHT_BOTTOM_BORDER) {
type -= CU_LEFT_TOP_BORDER;
lrtb_halo[type].F[F_mXY] = getCurCell(field, N_X - 1, N_Y - 1, N_X, N_Y)->F[F_XmY];
}
return GPU_SUCCESS;
}
#endif
|
8fe6fb2ee306bc571b50902f84273e57b04d3ef2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cusp/complex.h>
#include <cusp/blas/blas.h>
#include<cusp/csr_matrix.h>
#include<cusp/multiply.h>
#include <cusp/array1d.h>
#include <cusp/copy.h>
#include <thrust/device_ptr.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
/* Input Arguments */
#define VAL prhs[0]
#define COL prhs[1]
#define ROWPTR prhs[2]
// #define NCOL prhs[3]
// #define NROW prhs[4]
// #define NNZ prhs[5]
#define XV prhs[3]
/* Output Arguments */
#define Y plhs[0]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
mxGPUArray const *Aval;
mxGPUArray const *Acol;
mxGPUArray const *Aptr;
mxGPUArray const *x;
mxGPUArray *y;
// int nnzs = lrint(mxGetScalar(NCOL));
// int nrows = lrint(mxGetScalar(NROW));
// int nptr=nrows+1;
// int nnz = lrint(mxGetScalar(NNZ));
//
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/*get matlab variables*/
Aval = mxGPUCreateFromMxArray(VAL);
Acol = mxGPUCreateFromMxArray(COL);
Aptr = mxGPUCreateFromMxArray(ROWPTR);
x = mxGPUCreateFromMxArray(XV);
int nnz=mxGPUGetNumberOfElements(Acol);
int nrowp1=mxGPUGetNumberOfElements(Aptr);
int ncol =mxGPUGetNumberOfElements(x);
mxComplexity isXVreal = mxGPUGetComplexity(x);
mxComplexity isAreal = mxGPUGetComplexity(Aval);
const mwSize ndim= 1;
const mwSize dims[]={(mwSize) (nrowp1-1)};
if (isAreal!=isXVreal)
{
mexErrMsgTxt("Aval and X must have the same complexity");
return;
}
// single or double
if(((mxGPUGetClassID(Aval) != mxSINGLE_CLASS || mxGPUGetClassID(x)!= mxSINGLE_CLASS) &&
((mxGPUGetClassID(Aval) != mxDOUBLE_CLASS) || mxGPUGetClassID(x)!= mxDOUBLE_CLASS))||
mxGPUGetClassID(Aptr)!= mxINT32_CLASS|| mxGPUGetClassID(Acol)!= mxINT32_CLASS){
mexErrMsgTxt("usage: gspmv(single/double, int32, int32, single/double )");
return;
};
// if(mxGPUGetClassID(Aval) != mxSINGLE_CLASS||
// mxGPUGetClassID(x)!= mxSINGLE_CLASS||
// mxGPUGetClassID(Aptr)!= mxINT32_CLASS||
// mxGPUGetClassID(Acol)!= mxINT32_CLASS){
// mexErrMsgTxt("usage: gspmv(single, int32, int32, single )");
// return;
// }
//create output vector
y = mxGPUCreateGPUArray(ndim,dims,mxGPUGetClassID(x),isAreal, MX_GPU_DO_NOT_INITIALIZE);
/* wrap indices from matlab */
typedef const int TI; /* the type for index */
TI *d_col =(TI *)(mxGPUGetDataReadOnly(Acol));
TI *d_ptr =(TI *)(mxGPUGetDataReadOnly(Aptr));
// wrap with thrust::device_ptr
thrust::device_ptr<TI> wrap_d_col (d_col);
thrust::device_ptr<TI> wrap_d_ptr (d_ptr);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TI> > idx2Av;
// wrap index arrays
idx2Av colIndex (wrap_d_col , wrap_d_col + nnz);
idx2Av ptrIndex (wrap_d_ptr , wrap_d_ptr + nrowp1);
if (isAreal!=mxREAL){
if (mxGPUGetClassID(Aval) != mxSINGLE_CLASS)
{
typedef const cusp::complex<double> TA; /* the type for A */
typedef const cusp::complex<double> TXV; /* the type for X */
typedef cusp::complex<double> TYV; /* the type for Y */
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}else{
typedef const cusp::complex<float> TA; /* the type for A */
typedef const cusp::complex<float> TXV; /* the type for X */
typedef cusp::complex<float> TYV; /* the type for Y */
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}
} else{
if (mxGPUGetClassID(Aval) != mxSINGLE_CLASS)
{
typedef const double TA; /* the type for A */
typedef const double TXV; /* the type for X */
typedef double TYV; /* the type for Y */
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr!
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
//y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}else{
typedef const float TA; /* the type for A */
typedef const float TXV; /* the type for X */
typedef float TYV; /* the type for Y */
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr!
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
//y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}
}
Y = mxGPUCreateMxArrayOnGPU(y);
mxGPUDestroyGPUArray(Aval);
mxGPUDestroyGPUArray(Aptr);
mxGPUDestroyGPUArray(Acol);
mxGPUDestroyGPUArray(x);
mxGPUDestroyGPUArray(y);
return;
}
| 8fe6fb2ee306bc571b50902f84273e57b04d3ef2.cu | #include <cuda.h>
#include <cusp/complex.h>
#include <cusp/blas/blas.h>
#include<cusp/csr_matrix.h>
#include<cusp/multiply.h>
#include <cusp/array1d.h>
#include <cusp/copy.h>
#include <thrust/device_ptr.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
/* Input Arguments */
#define VAL prhs[0]
#define COL prhs[1]
#define ROWPTR prhs[2]
// #define NCOL prhs[3]
// #define NROW prhs[4]
// #define NNZ prhs[5]
#define XV prhs[3]
/* Output Arguments */
#define Y plhs[0]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
mxGPUArray const *Aval;
mxGPUArray const *Acol;
mxGPUArray const *Aptr;
mxGPUArray const *x;
mxGPUArray *y;
// int nnzs = lrint(mxGetScalar(NCOL));
// int nrows = lrint(mxGetScalar(NROW));
// int nptr=nrows+1;
// int nnz = lrint(mxGetScalar(NNZ));
//
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/*get matlab variables*/
Aval = mxGPUCreateFromMxArray(VAL);
Acol = mxGPUCreateFromMxArray(COL);
Aptr = mxGPUCreateFromMxArray(ROWPTR);
x = mxGPUCreateFromMxArray(XV);
int nnz=mxGPUGetNumberOfElements(Acol);
int nrowp1=mxGPUGetNumberOfElements(Aptr);
int ncol =mxGPUGetNumberOfElements(x);
mxComplexity isXVreal = mxGPUGetComplexity(x);
mxComplexity isAreal = mxGPUGetComplexity(Aval);
const mwSize ndim= 1;
const mwSize dims[]={(mwSize) (nrowp1-1)};
if (isAreal!=isXVreal)
{
mexErrMsgTxt("Aval and X must have the same complexity");
return;
}
// single or double
if(((mxGPUGetClassID(Aval) != mxSINGLE_CLASS || mxGPUGetClassID(x)!= mxSINGLE_CLASS) &&
((mxGPUGetClassID(Aval) != mxDOUBLE_CLASS) || mxGPUGetClassID(x)!= mxDOUBLE_CLASS))||
mxGPUGetClassID(Aptr)!= mxINT32_CLASS|| mxGPUGetClassID(Acol)!= mxINT32_CLASS){
mexErrMsgTxt("usage: gspmv(single/double, int32, int32, single/double )");
return;
};
// if(mxGPUGetClassID(Aval) != mxSINGLE_CLASS||
// mxGPUGetClassID(x)!= mxSINGLE_CLASS||
// mxGPUGetClassID(Aptr)!= mxINT32_CLASS||
// mxGPUGetClassID(Acol)!= mxINT32_CLASS){
// mexErrMsgTxt("usage: gspmv(single, int32, int32, single )");
// return;
// }
//create output vector
y = mxGPUCreateGPUArray(ndim,dims,mxGPUGetClassID(x),isAreal, MX_GPU_DO_NOT_INITIALIZE);
/* wrap indices from matlab */
typedef const int TI; /* the type for index */
TI *d_col =(TI *)(mxGPUGetDataReadOnly(Acol));
TI *d_ptr =(TI *)(mxGPUGetDataReadOnly(Aptr));
// wrap with thrust::device_ptr
thrust::device_ptr<TI> wrap_d_col (d_col);
thrust::device_ptr<TI> wrap_d_ptr (d_ptr);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TI> > idx2Av;
// wrap index arrays
idx2Av colIndex (wrap_d_col , wrap_d_col + nnz);
idx2Av ptrIndex (wrap_d_ptr , wrap_d_ptr + nrowp1);
if (isAreal!=mxREAL){
if (mxGPUGetClassID(Aval) != mxSINGLE_CLASS)
{
typedef const cusp::complex<double> TA; /* the type for A */
typedef const cusp::complex<double> TXV; /* the type for X */
typedef cusp::complex<double> TYV; /* the type for Y */
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}else{
typedef const cusp::complex<float> TA; /* the type for A */
typedef const cusp::complex<float> TXV; /* the type for X */
typedef cusp::complex<float> TYV; /* the type for Y */
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}
} else{
if (mxGPUGetClassID(Aval) != mxSINGLE_CLASS)
{
typedef const double TA; /* the type for A */
typedef const double TXV; /* the type for X */
typedef double TYV; /* the type for Y */
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr!
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
//y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}else{
typedef const float TA; /* the type for A */
typedef const float TXV; /* the type for X */
typedef float TYV; /* the type for Y */
/* pointers from matlab */
TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval));
TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x));
TYV *d_y =(TYV *)(mxGPUGetData(y));
// wrap with thrust::device_ptr!
thrust::device_ptr<TA > wrap_d_val (d_val);
thrust::device_ptr<TXV > wrap_d_x (d_x);
thrust::device_ptr<TYV > wrap_d_y (d_y);
// wrap with array1d_view
typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av;
typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av;
// wrap arrays
val2Av valIndex (wrap_d_val , wrap_d_val + nnz);
x2Av xIndex (wrap_d_x , wrap_d_x + ncol);
//y2Av yIndex(wrap_d_y, wrap_d_y+ ncol);
y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1);
// combine info in CSR matrix
typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView;
DeviceView As(nrowp1-1, ncol, nnz, ptrIndex, colIndex, valIndex);
// multiply matrix
cusp::multiply(As, xIndex, yIndex);
}
}
Y = mxGPUCreateMxArrayOnGPU(y);
mxGPUDestroyGPUArray(Aval);
mxGPUDestroyGPUArray(Aptr);
mxGPUDestroyGPUArray(Acol);
mxGPUDestroyGPUArray(x);
mxGPUDestroyGPUArray(y);
return;
}
|
d65e5c4a9fb6c175716769fe5c4c3c0cf3c129d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
//extern "C" {
#endif
#include <float.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include "../external/cub-1.3.2/hipcub/hipcub.hpp"
#define CUDA
#include "common_funcs.h"
#include "constants.h"
#include "gpu_kernels.h"
#define MAX_BLOCKS 65535
/* UTIL FUNCTIONS */
int PrintOnCudaError(const char* fn_name)
{
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in %s: %s\n", fn_name, hipGetErrorString(err));
return 0;
}
return 1;
}
size_t GetSharedMemPerBlock(int device)
{
hipDeviceProp_t p;
hipGetDeviceProperties(&p, device);
if(!PrintOnCudaError("GetSharedMemPerBlock"))
return 0;
else
return p.sharedMemPerBlock;
}
/* Layer Funcs */
__global__
void kernel_convsp(
const float* qlocs,
const float* locs,
const float* data,
const float* neighbors,
const float* weight,
const float* bias,
const int batch_size,
const int M,
const int N,
const int nchannels,
const int ndims,
const int max_neighbors,
const int nkernels,
const int ncells,
const float radius,
const float* kernel_size,
const float* dilation,
const int dis_norm,
const int kernel_fn,
float* out,
float* dqlocs,
float* dlocs,
float* ddata,
float* dweight)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < M*batch_size; i += stride)
{
int b = i/M;
int n = i%M;
compute_kernel_cells(qlocs, locs, data, neighbors, weight, bias, batch_size, M, N,
nchannels, ndims, max_neighbors, nkernels, ncells, radius, kernel_size, dilation,
dis_norm, kernel_fn, out, b, n, dqlocs, dlocs, ddata, dweight);
}
}
int cuda_convsp(
const float* qlocs,
const float* locs,
const float* data,
const float* neighbors,
const float* weight,
const float* bias,
const int batch_size,
const int M,
const int N,
const int nchannels,
const int ndims,
const int max_neighbors,
const int nkernels,
const int ncells,
const float radius,
const float* kernel_size,
const float* dilation,
const int dis_norm,
const int kernel_fn,
float* out,
float* dqlocs,
float* dlocs,
float* ddata,
float* dweight,
hipStream_t stream,
const size_t nshared_device_mem)
{
int nops = batch_size*M;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
hipLaunchKernelGGL(( kernel_convsp), dim3(blocks), dim3(threads), 0, stream, qlocs, locs, data, neighbors, weight, bias,
batch_size, M, N, nchannels, ndims, max_neighbors, nkernels, ncells, radius,
kernel_size, dilation, dis_norm, kernel_fn, out, dqlocs, dlocs, ddata, dweight);
hipDeviceSynchronize();
return PrintOnCudaError("cuda_convsp");
}
__global__
void kernel_convsdf(
const float* locs,
const int batch_size,
const int N,
const int ndims,
const float* idxs,
const float* poses,
const float* scales,
const int M,
const int pose_len,
const float* sdfs,
const float* sdf_offsets,
const float* sdf_shapes,
const float* weight,
const float* bias,
const int nkernels,
const int ncells,
const float* kernel_size,
const float* dilation,
const float max_distance,
float* out,
float* dlocs,
float* dweight,
float* dposes)
{
int _isdf_cache[64];
float _fsdf_cache[64];
int* isdf_cache;
float* fsdf_cache;
if(M < 64)
{
isdf_cache = _isdf_cache;
fsdf_cache = _fsdf_cache;
}
else
{
isdf_cache = (int*)malloc(sizeof(int)*M);
fsdf_cache = (float*)malloc(sizeof(float)*M);
}
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N*batch_size*nkernels; i += stride)
{
int b = i/(N*nkernels);
int n = (i % (N*nkernels))/nkernels;
int outk = i % nkernels;
compute_sdf_kernel_cells(locs, batch_size, N, ndims, idxs, poses,
scales, M, pose_len, sdfs, sdf_offsets, sdf_shapes, weight, bias,
nkernels, ncells, kernel_size, dilation, max_distance, out, b, n,
outk, dlocs, dweight, dposes, isdf_cache, fsdf_cache);
}
if(M >= 64)
{
free(isdf_cache);
free(fsdf_cache);
}
}
int cuda_convsdf(
const float* locs,
const int batch_size,
const int N,
const int ndims,
const float* idxs,
const float* poses,
const float* scales,
const int M,
const int pose_len,
const float* sdfs,
const float* sdf_offsets,
const float* sdf_shapes,
const float* weight,
const float* bias,
const int nkernels,
const int ncells,
const float* kernel_size,
const float* dilation,
const float max_distance,
float* out,
float* dlocs,
float* dweight,
float* dposes,
hipStream_t stream)
{
int nops = batch_size*N*nkernels;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Stack overflow happens with the default stack size (1024).
hipError_t err = hipDeviceSetLimit(hipLimitStackSize, 4096);
if (err != hipSuccess) {
printf("error trying to set the stack size limit to 4096: %s\n",
hipGetErrorString(err));
return 0;
}
hipLaunchKernelGGL(( kernel_convsdf), dim3(blocks), dim3(threads), 0, stream, locs, batch_size, N, ndims, idxs, poses,
scales, M, pose_len, sdfs, sdf_offsets, sdf_shapes, weight, bias, nkernels, ncells,
kernel_size, dilation, max_distance, out, dlocs, dweight, dposes);
hipDeviceSynchronize();
return PrintOnCudaError("cuda_convsdf");
}
// Functions for the ParticleCollision layer.
__global__
void kernel_compute_cellIDs(
const float* locs,
const float* low,
const float* grid_dims,
uint32_t* cellIDs,
float* idxs,
const int batch_size,
const int N,
const int ndims,
const float cellEdge,
uint32_t* maxhash)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i, d;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
int hash = 0;
for(d = 0; d < ndims; ++d)
hash += partial_grid_hash(
loc2grid(locs[b*N*ndims + n*ndims + d], low[b*ndims + d], cellEdge),
grid_dims + b*ndims, d, ndims);
cellIDs[i] = hash;
idxs[i] = n;
if(n == 0)
{
uint32_t mh = 0;
for(d = 0; d < ndims; ++d)
mh += partial_grid_hash(grid_dims[b*ndims + d] - 1,
grid_dims + b*ndims, d, ndims);
atomicMax(maxhash, mh);
}
}
}
int cuda_hashgrid_order(
float* locs,
const float* low,
const float* grid_dims,
float* cellIDs,
float* idxs,
float* buffer,
const int batch_size,
const int N,
const int ndims,
const float cellEdge,
hipStream_t stream)
{
uint32_t* cellIDsi = (uint32_t*) cellIDs;
int b;
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
hipLaunchKernelGGL(( kernel_compute_cellIDs), dim3(blocks), dim3(threads), 0, stream, locs, low, grid_dims, cellIDsi,
idxs, batch_size, N, ndims, cellEdge, (uint32_t*)buffer);
hipStreamSynchronize(stream);
if(!PrintOnCudaError("cuda_hashgrid_order: kernel_compute_cellIDs")) return 0;
uint32_t maxhash;
hipMemcpy(&maxhash, buffer, sizeof(uint32_t), hipMemcpyDeviceToHost);
uint32_t numBits = (uint32_t)ceil(log2((float)maxhash)) + 1;
// TODO this seems to not work right, hard coding it at max val for now.
numBits = sizeof(uint32_t)*8;
// Sort the particles by cell ID.
for(b = 0; b < batch_size; ++b)
{
cub::DoubleBuffer<uint32_t> d_keys(cellIDsi + b*N, cellIDsi + batch_size*N);
cub::DoubleBuffer<float> d_values(idxs + b*N, cellIDs + (1 + batch_size)*N);
size_t sortTempSize;
hipcub::DeviceRadixSort::SortPairs(buffer, sortTempSize, d_keys, d_values, N, 0,
numBits, stream);
hipStreamSynchronize(stream);
if (d_keys.Current() != cellIDsi + b*N)
hipMemcpyAsync(cellIDsi + b*N, d_keys.Current(),
sizeof(uint32_t)*N, hipMemcpyDeviceToDevice, stream);
if (d_values.Current() != idxs + b*N)
hipMemcpyAsync(idxs + b*N, d_values.Current(), sizeof(float)*N,
hipMemcpyDeviceToDevice, stream);
hipStreamSynchronize(stream);
}
// BUG: For some reason, CUDA won't finish the above hipMemcpy's (async or
// otherwise) unless it copies some data to the heap (not the stack).
float* didxs = new float;
hipMemcpy(didxs, idxs + b*N, sizeof(float), hipMemcpyDeviceToHost);
delete didxs;
hipDeviceSynchronize();
return PrintOnCudaError("cuda_hashgrid_order");
}
__global__
void kernel_fill_cells(
const uint32_t* cellIDs,
float* cellStarts,
float* cellEnds,
const int batch_size,
const int N,
const int ncells)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
int c = cellIDs[i];
if(n == 0)
{
cellStarts[b*ncells + c] = n;
}
else
{
int p = cellIDs[b*N + n-1];
if (c != p)
{
cellStarts[b*ncells + c] = n;
cellEnds[b*ncells + p] = n;
}
}
if(n == N-1)
{
cellEnds[b*ncells + c] = n+1;
}
}
}
__global__
void kernel_compute_collisions(
const float* qlocs,
const float* locs,
const float* cellStarts,
const float* cellEnds,
const int batch_size,
const int M,
const int N,
const int ndims,
const int ncells,
const float* low,
const float* grid_dims,
const float cellEdge,
const float radius2,
float* collisions,
const int max_collisions,
const int include_self)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < M*batch_size; i += stride)
{
int b = i/M;
int n = i%M;
compute_collisions(
qlocs,
locs,
cellStarts,
cellEnds,
batch_size,
M,
N,
ndims,
ncells,
low,
grid_dims,
cellEdge,
radius2,
collisions,
max_collisions,
include_self,
b,
n);
}
}
int cuda_compute_collisions(
const float* qlocs,
const float* locs,
const float* low,
const float* grid_dims,
const float* cellIDs,
float* cellStarts,
float* cellEnds,
float* collisions,
const int batch_size,
const int M,
const int N,
const int ndims,
const int max_collisions,
const int ncells,
const float cellEdge,
const float radius,
const int include_self,
hipStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
const uint32_t* cellIDsi = (const uint32_t*) cellIDs;
// Create the cell start and end lists.
hipLaunchKernelGGL(( kernel_fill_cells), dim3(blocks), dim3(threads), 0, stream, cellIDsi, cellStarts, cellEnds,
batch_size, N, ncells);
hipStreamSynchronize(stream);
if(!PrintOnCudaError("compute_collisions")) return 0;
nops = batch_size*N;
numBlocks = ceil(nops * (1.0/256));
blocks = dim3(min(MAX_BLOCKS, numBlocks));
threads = dim3(256);
// Make collision lists.
hipLaunchKernelGGL(( kernel_compute_collisions), dim3(blocks), dim3(threads), 0, stream,
qlocs,
locs,
cellStarts,
cellEnds,
batch_size,
M,
N,
ndims,
ncells,
low,
grid_dims,
cellEdge,
radius*radius,
collisions,
max_collisions,
include_self);
hipStreamSynchronize(stream);
return PrintOnCudaError("compute_collisions");
}
__global__
void kernel_reorder_data(
const float* locs,
const float* data,
const float* idxs,
float* nlocs,
float* ndata,
const int batch_size,
const int N,
const int ndims,
const int nchannels,
const int reverse)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i, d;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int nn = i%N;
int on = idxs[i];
if(reverse)
{
nn = idxs[i];
on = i%N;
}
for(d = 0; d < ndims; ++d)
nlocs[b*N*ndims + nn*ndims + d] = locs[b*N*ndims + on*ndims + d];
if(data != NULL)
{
for(d = 0; d < nchannels; ++d)
ndata[b*N*nchannels + nn*nchannels + d] = data[b*N*nchannels + on*nchannels + d];
}
}
}
int cuda_reorder_data(
float* locs,
float* data,
float* idxs,
float* nlocs,
float* ndata,
const int batch_size,
const int N,
const int ndims,
const int nchannels,
const int reverse,
hipStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
hipLaunchKernelGGL(( kernel_reorder_data), dim3(blocks), dim3(threads), 0, stream, locs, data, idxs, nlocs,
ndata, batch_size, N, ndims, nchannels, reverse);
hipDeviceSynchronize();
return PrintOnCudaError("cuda_reorder_data");
}
size_t get_radixsort_buffer_size(hipStream_t stream)
{
cub::DoubleBuffer<int> d_keys(NULL, NULL);
cub::DoubleBuffer<float> d_values(NULL, NULL);
size_t sortTempSize;
hipcub::DeviceRadixSort::SortPairs(NULL, sortTempSize, d_keys, d_values, 1, 0,
1, stream);
return sortTempSize;
}
__global__
void kernel_particleprojection(
const float* locs,
const float camera_fl,
const float filter_std,
const float filter_scale,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
float* out,
float* dlocs)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
compute_particle_projection(
locs,
batch_size,
N,
camera_fl,
width,
height,
filter_std,
filter_scale,
depth_mask,
n,
b,
out,
dlocs);
}
}
int cuda_particleprojection(
const float* locs,
const float camera_fl,
const float filter_std,
const float filter_scale,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
float* out,
float* dlocs,
hipStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
hipLaunchKernelGGL(( kernel_particleprojection), dim3(blocks), dim3(threads), 0, stream, locs,
camera_fl,
filter_std,
filter_scale,
depth_mask,
batch_size,
N,
width,
height,
out,
dlocs);
hipDeviceSynchronize();
return PrintOnCudaError("cuda_particleprojection");
}
__global__
void kernel_imageprojection(
const float* locs,
const float* image,
const float camera_fl,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
const int channels,
float* out,
float* dlocs,
float* dimage)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
compute_image_projection(
locs,
image,
batch_size,
N,
camera_fl,
width,
height,
channels,
depth_mask,
n,
b,
out,
dlocs,
dimage);
}
}
int cuda_imageprojection(
const float* locs,
const float* image,
const float camera_fl,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
const int channels,
float* out,
float* dlocs,
float* dimage,
hipStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
hipLaunchKernelGGL(( kernel_imageprojection), dim3(blocks), dim3(threads), 0, stream, locs,
image,
camera_fl,
depth_mask,
batch_size,
N,
width,
height,
channels,
out,
dlocs,
dimage);
hipDeviceSynchronize();
return PrintOnCudaError("cuda_imageprojection");
}
#ifdef __cplusplus
//}
#endif
| d65e5c4a9fb6c175716769fe5c4c3c0cf3c129d7.cu |
#ifdef __cplusplus
//extern "C" {
#endif
#include <float.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include "../external/cub-1.3.2/cub/cub.cuh"
#define CUDA
#include "common_funcs.h"
#include "constants.h"
#include "gpu_kernels.h"
#define MAX_BLOCKS 65535
/* UTIL FUNCTIONS */
int PrintOnCudaError(const char* fn_name)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in %s: %s\n", fn_name, cudaGetErrorString(err));
return 0;
}
return 1;
}
size_t GetSharedMemPerBlock(int device)
{
cudaDeviceProp p;
cudaGetDeviceProperties(&p, device);
if(!PrintOnCudaError("GetSharedMemPerBlock"))
return 0;
else
return p.sharedMemPerBlock;
}
/* Layer Funcs */
__global__
void kernel_convsp(
const float* qlocs,
const float* locs,
const float* data,
const float* neighbors,
const float* weight,
const float* bias,
const int batch_size,
const int M,
const int N,
const int nchannels,
const int ndims,
const int max_neighbors,
const int nkernels,
const int ncells,
const float radius,
const float* kernel_size,
const float* dilation,
const int dis_norm,
const int kernel_fn,
float* out,
float* dqlocs,
float* dlocs,
float* ddata,
float* dweight)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < M*batch_size; i += stride)
{
int b = i/M;
int n = i%M;
compute_kernel_cells(qlocs, locs, data, neighbors, weight, bias, batch_size, M, N,
nchannels, ndims, max_neighbors, nkernels, ncells, radius, kernel_size, dilation,
dis_norm, kernel_fn, out, b, n, dqlocs, dlocs, ddata, dweight);
}
}
int cuda_convsp(
const float* qlocs,
const float* locs,
const float* data,
const float* neighbors,
const float* weight,
const float* bias,
const int batch_size,
const int M,
const int N,
const int nchannels,
const int ndims,
const int max_neighbors,
const int nkernels,
const int ncells,
const float radius,
const float* kernel_size,
const float* dilation,
const int dis_norm,
const int kernel_fn,
float* out,
float* dqlocs,
float* dlocs,
float* ddata,
float* dweight,
cudaStream_t stream,
const size_t nshared_device_mem)
{
int nops = batch_size*M;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
kernel_convsp<<<blocks, threads, 0, stream>>>(qlocs, locs, data, neighbors, weight, bias,
batch_size, M, N, nchannels, ndims, max_neighbors, nkernels, ncells, radius,
kernel_size, dilation, dis_norm, kernel_fn, out, dqlocs, dlocs, ddata, dweight);
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_convsp");
}
__global__
void kernel_convsdf(
const float* locs,
const int batch_size,
const int N,
const int ndims,
const float* idxs,
const float* poses,
const float* scales,
const int M,
const int pose_len,
const float* sdfs,
const float* sdf_offsets,
const float* sdf_shapes,
const float* weight,
const float* bias,
const int nkernels,
const int ncells,
const float* kernel_size,
const float* dilation,
const float max_distance,
float* out,
float* dlocs,
float* dweight,
float* dposes)
{
int _isdf_cache[64];
float _fsdf_cache[64];
int* isdf_cache;
float* fsdf_cache;
if(M < 64)
{
isdf_cache = _isdf_cache;
fsdf_cache = _fsdf_cache;
}
else
{
isdf_cache = (int*)malloc(sizeof(int)*M);
fsdf_cache = (float*)malloc(sizeof(float)*M);
}
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N*batch_size*nkernels; i += stride)
{
int b = i/(N*nkernels);
int n = (i % (N*nkernels))/nkernels;
int outk = i % nkernels;
compute_sdf_kernel_cells(locs, batch_size, N, ndims, idxs, poses,
scales, M, pose_len, sdfs, sdf_offsets, sdf_shapes, weight, bias,
nkernels, ncells, kernel_size, dilation, max_distance, out, b, n,
outk, dlocs, dweight, dposes, isdf_cache, fsdf_cache);
}
if(M >= 64)
{
free(isdf_cache);
free(fsdf_cache);
}
}
int cuda_convsdf(
const float* locs,
const int batch_size,
const int N,
const int ndims,
const float* idxs,
const float* poses,
const float* scales,
const int M,
const int pose_len,
const float* sdfs,
const float* sdf_offsets,
const float* sdf_shapes,
const float* weight,
const float* bias,
const int nkernels,
const int ncells,
const float* kernel_size,
const float* dilation,
const float max_distance,
float* out,
float* dlocs,
float* dweight,
float* dposes,
cudaStream_t stream)
{
int nops = batch_size*N*nkernels;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Stack overflow happens with the default stack size (1024).
cudaError_t err = cudaDeviceSetLimit(cudaLimitStackSize, 4096);
if (err != cudaSuccess) {
printf("error trying to set the stack size limit to 4096: %s\n",
cudaGetErrorString(err));
return 0;
}
kernel_convsdf<<<blocks, threads, 0, stream>>>(locs, batch_size, N, ndims, idxs, poses,
scales, M, pose_len, sdfs, sdf_offsets, sdf_shapes, weight, bias, nkernels, ncells,
kernel_size, dilation, max_distance, out, dlocs, dweight, dposes);
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_convsdf");
}
// Functions for the ParticleCollision layer.
__global__
void kernel_compute_cellIDs(
const float* locs,
const float* low,
const float* grid_dims,
uint32_t* cellIDs,
float* idxs,
const int batch_size,
const int N,
const int ndims,
const float cellEdge,
uint32_t* maxhash)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i, d;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
int hash = 0;
for(d = 0; d < ndims; ++d)
hash += partial_grid_hash(
loc2grid(locs[b*N*ndims + n*ndims + d], low[b*ndims + d], cellEdge),
grid_dims + b*ndims, d, ndims);
cellIDs[i] = hash;
idxs[i] = n;
if(n == 0)
{
uint32_t mh = 0;
for(d = 0; d < ndims; ++d)
mh += partial_grid_hash(grid_dims[b*ndims + d] - 1,
grid_dims + b*ndims, d, ndims);
atomicMax(maxhash, mh);
}
}
}
int cuda_hashgrid_order(
float* locs,
const float* low,
const float* grid_dims,
float* cellIDs,
float* idxs,
float* buffer,
const int batch_size,
const int N,
const int ndims,
const float cellEdge,
cudaStream_t stream)
{
uint32_t* cellIDsi = (uint32_t*) cellIDs;
int b;
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
kernel_compute_cellIDs<<<blocks, threads, 0, stream>>>(locs, low, grid_dims, cellIDsi,
idxs, batch_size, N, ndims, cellEdge, (uint32_t*)buffer);
cudaStreamSynchronize(stream);
if(!PrintOnCudaError("cuda_hashgrid_order: kernel_compute_cellIDs")) return 0;
uint32_t maxhash;
cudaMemcpy(&maxhash, buffer, sizeof(uint32_t), cudaMemcpyDeviceToHost);
uint32_t numBits = (uint32_t)ceil(log2((float)maxhash)) + 1;
// TODO this seems to not work right, hard coding it at max val for now.
numBits = sizeof(uint32_t)*8;
// Sort the particles by cell ID.
for(b = 0; b < batch_size; ++b)
{
cub::DoubleBuffer<uint32_t> d_keys(cellIDsi + b*N, cellIDsi + batch_size*N);
cub::DoubleBuffer<float> d_values(idxs + b*N, cellIDs + (1 + batch_size)*N);
size_t sortTempSize;
cub::DeviceRadixSort::SortPairs(buffer, sortTempSize, d_keys, d_values, N, 0,
numBits, stream);
cudaStreamSynchronize(stream);
if (d_keys.Current() != cellIDsi + b*N)
cudaMemcpyAsync(cellIDsi + b*N, d_keys.Current(),
sizeof(uint32_t)*N, cudaMemcpyDeviceToDevice, stream);
if (d_values.Current() != idxs + b*N)
cudaMemcpyAsync(idxs + b*N, d_values.Current(), sizeof(float)*N,
cudaMemcpyDeviceToDevice, stream);
cudaStreamSynchronize(stream);
}
// BUG: For some reason, CUDA won't finish the above cudaMemcpy's (async or
// otherwise) unless it copies some data to the heap (not the stack).
float* didxs = new float;
cudaMemcpy(didxs, idxs + b*N, sizeof(float), cudaMemcpyDeviceToHost);
delete didxs;
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_hashgrid_order");
}
__global__
void kernel_fill_cells(
const uint32_t* cellIDs,
float* cellStarts,
float* cellEnds,
const int batch_size,
const int N,
const int ncells)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
int c = cellIDs[i];
if(n == 0)
{
cellStarts[b*ncells + c] = n;
}
else
{
int p = cellIDs[b*N + n-1];
if (c != p)
{
cellStarts[b*ncells + c] = n;
cellEnds[b*ncells + p] = n;
}
}
if(n == N-1)
{
cellEnds[b*ncells + c] = n+1;
}
}
}
__global__
void kernel_compute_collisions(
const float* qlocs,
const float* locs,
const float* cellStarts,
const float* cellEnds,
const int batch_size,
const int M,
const int N,
const int ndims,
const int ncells,
const float* low,
const float* grid_dims,
const float cellEdge,
const float radius2,
float* collisions,
const int max_collisions,
const int include_self)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < M*batch_size; i += stride)
{
int b = i/M;
int n = i%M;
compute_collisions(
qlocs,
locs,
cellStarts,
cellEnds,
batch_size,
M,
N,
ndims,
ncells,
low,
grid_dims,
cellEdge,
radius2,
collisions,
max_collisions,
include_self,
b,
n);
}
}
int cuda_compute_collisions(
const float* qlocs,
const float* locs,
const float* low,
const float* grid_dims,
const float* cellIDs,
float* cellStarts,
float* cellEnds,
float* collisions,
const int batch_size,
const int M,
const int N,
const int ndims,
const int max_collisions,
const int ncells,
const float cellEdge,
const float radius,
const int include_self,
cudaStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
const uint32_t* cellIDsi = (const uint32_t*) cellIDs;
// Create the cell start and end lists.
kernel_fill_cells<<<blocks, threads, 0, stream>>>(cellIDsi, cellStarts, cellEnds,
batch_size, N, ncells);
cudaStreamSynchronize(stream);
if(!PrintOnCudaError("compute_collisions")) return 0;
nops = batch_size*N;
numBlocks = ceil(nops * (1.0/256));
blocks = dim3(min(MAX_BLOCKS, numBlocks));
threads = dim3(256);
// Make collision lists.
kernel_compute_collisions<<<blocks, threads, 0, stream>>>(
qlocs,
locs,
cellStarts,
cellEnds,
batch_size,
M,
N,
ndims,
ncells,
low,
grid_dims,
cellEdge,
radius*radius,
collisions,
max_collisions,
include_self);
cudaStreamSynchronize(stream);
return PrintOnCudaError("compute_collisions");
}
__global__
void kernel_reorder_data(
const float* locs,
const float* data,
const float* idxs,
float* nlocs,
float* ndata,
const int batch_size,
const int N,
const int ndims,
const int nchannels,
const int reverse)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i, d;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int nn = i%N;
int on = idxs[i];
if(reverse)
{
nn = idxs[i];
on = i%N;
}
for(d = 0; d < ndims; ++d)
nlocs[b*N*ndims + nn*ndims + d] = locs[b*N*ndims + on*ndims + d];
if(data != NULL)
{
for(d = 0; d < nchannels; ++d)
ndata[b*N*nchannels + nn*nchannels + d] = data[b*N*nchannels + on*nchannels + d];
}
}
}
int cuda_reorder_data(
float* locs,
float* data,
float* idxs,
float* nlocs,
float* ndata,
const int batch_size,
const int N,
const int ndims,
const int nchannels,
const int reverse,
cudaStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
kernel_reorder_data<<<blocks, threads, 0, stream>>>(locs, data, idxs, nlocs,
ndata, batch_size, N, ndims, nchannels, reverse);
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_reorder_data");
}
size_t get_radixsort_buffer_size(cudaStream_t stream)
{
cub::DoubleBuffer<int> d_keys(NULL, NULL);
cub::DoubleBuffer<float> d_values(NULL, NULL);
size_t sortTempSize;
cub::DeviceRadixSort::SortPairs(NULL, sortTempSize, d_keys, d_values, 1, 0,
1, stream);
return sortTempSize;
}
__global__
void kernel_particleprojection(
const float* locs,
const float camera_fl,
const float filter_std,
const float filter_scale,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
float* out,
float* dlocs)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
compute_particle_projection(
locs,
batch_size,
N,
camera_fl,
width,
height,
filter_std,
filter_scale,
depth_mask,
n,
b,
out,
dlocs);
}
}
int cuda_particleprojection(
const float* locs,
const float camera_fl,
const float filter_std,
const float filter_scale,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
float* out,
float* dlocs,
cudaStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
kernel_particleprojection<<<blocks, threads, 0, stream>>>(locs,
camera_fl,
filter_std,
filter_scale,
depth_mask,
batch_size,
N,
width,
height,
out,
dlocs);
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_particleprojection");
}
__global__
void kernel_imageprojection(
const float* locs,
const float* image,
const float camera_fl,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
const int channels,
float* out,
float* dlocs,
float* dimage)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i;
for(i = index; i < N*batch_size; i += stride)
{
int b = i/N;
int n = i%N;
compute_image_projection(
locs,
image,
batch_size,
N,
camera_fl,
width,
height,
channels,
depth_mask,
n,
b,
out,
dlocs,
dimage);
}
}
int cuda_imageprojection(
const float* locs,
const float* image,
const float camera_fl,
const float* depth_mask,
const int batch_size,
const int N,
const int width,
const int height,
const int channels,
float* out,
float* dlocs,
float* dimage,
cudaStream_t stream)
{
int nops = batch_size*N;
int numBlocks = ceil(nops * (1.0/256));
dim3 blocks(min(MAX_BLOCKS, numBlocks));
dim3 threads(256);
// Re-order locs and data.
kernel_imageprojection<<<blocks, threads, 0, stream>>>(locs,
image,
camera_fl,
depth_mask,
batch_size,
N,
width,
height,
channels,
out,
dlocs,
dimage);
cudaDeviceSynchronize();
return PrintOnCudaError("cuda_imageprojection");
}
#ifdef __cplusplus
//}
#endif
|
04ea25e7d2860d8110f5edd64ed9e3991139b8a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _mat_sum_row_fast(float *m, float *target,int nrow, int ncol, int agg_col){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float accum[NUM_THREAD_PER_ROW];
if(tx < ncol){
accum[threadIdx.x] = m[blockIdx.y*ncol+tx];
}else{
accum[threadIdx.x] = 0.0f;
}
__syncthreads();
if(NUM_THREAD_PER_ROW >= 512){
if(threadIdx.x < 256)
accum[threadIdx.x] += accum[threadIdx.x+256];
__syncthreads();
}
if(NUM_THREAD_PER_ROW >= 256){
if(threadIdx.x < 128)
accum[threadIdx.x] += accum[threadIdx.x+128];
__syncthreads();
}
//NUM_THREAD_PER_ROW at least 128
if(threadIdx.x < 64)
accum[threadIdx.x] += accum[threadIdx.x+64];
__syncthreads();
if(threadIdx.x < 32){
accum[threadIdx.x] += accum[threadIdx.x+32];
accum[threadIdx.x] += accum[threadIdx.x+16];
accum[threadIdx.x] += accum[threadIdx.x+8];
accum[threadIdx.x] += accum[threadIdx.x+4];
accum[threadIdx.x] += accum[threadIdx.x+2];
accum[threadIdx.x] += accum[threadIdx.x+1];
}
target[blockIdx.y*agg_col+blockIdx.x] = accum[0];
} | 04ea25e7d2860d8110f5edd64ed9e3991139b8a8.cu | #include "includes.h"
__global__ void _mat_sum_row_fast(float *m, float *target,int nrow, int ncol, int agg_col){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float accum[NUM_THREAD_PER_ROW];
if(tx < ncol){
accum[threadIdx.x] = m[blockIdx.y*ncol+tx];
}else{
accum[threadIdx.x] = 0.0f;
}
__syncthreads();
if(NUM_THREAD_PER_ROW >= 512){
if(threadIdx.x < 256)
accum[threadIdx.x] += accum[threadIdx.x+256];
__syncthreads();
}
if(NUM_THREAD_PER_ROW >= 256){
if(threadIdx.x < 128)
accum[threadIdx.x] += accum[threadIdx.x+128];
__syncthreads();
}
//NUM_THREAD_PER_ROW at least 128
if(threadIdx.x < 64)
accum[threadIdx.x] += accum[threadIdx.x+64];
__syncthreads();
if(threadIdx.x < 32){
accum[threadIdx.x] += accum[threadIdx.x+32];
accum[threadIdx.x] += accum[threadIdx.x+16];
accum[threadIdx.x] += accum[threadIdx.x+8];
accum[threadIdx.x] += accum[threadIdx.x+4];
accum[threadIdx.x] += accum[threadIdx.x+2];
accum[threadIdx.x] += accum[threadIdx.x+1];
}
target[blockIdx.y*agg_col+blockIdx.x] = accum[0];
} |
87ab0e51b2411125b31bfe762f2bd5db443449ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _SigmoidCrossEntropy(
const int nthreads,
const T* logit,
const T* target,
T* loss,
T* mask) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
if (target[i] < 0) {
loss[i] = mask[i] = T(0);
} else {
const T lgt = logit[i];
loss[i] = log(T(1) + exp(lgt - T(2) * lgt * T(lgt >= 0))) +
lgt * (T(lgt >= 0) - target[i]);
mask[i] = T(1);
}
}
}
template <typename T>
__global__ void _SigmoidCrossEntropyGrad(
const int nthreads,
const T* logit,
const T* target,
T* dx,
T* mask) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
if (target[i] < 0) {
dx[i] = mask[i] = T(0);
} else {
dx[i] = T(1) / (T(1) + exp(-logit[i])) - target[i];
mask[i] = T(1);
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(name, T) \
template <> \
void name<T, CUDAContext>( \
const int count, \
const T* logit, \
const T* target, \
T* loss, \
T* mask, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
count, logit, target, loss, mask); \
}
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropy, float);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropy, double);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropyGrad, float);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropyGrad, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| 87ab0e51b2411125b31bfe762f2bd5db443449ef.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _SigmoidCrossEntropy(
const int nthreads,
const T* logit,
const T* target,
T* loss,
T* mask) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
if (target[i] < 0) {
loss[i] = mask[i] = T(0);
} else {
const T lgt = logit[i];
loss[i] = log(T(1) + exp(lgt - T(2) * lgt * T(lgt >= 0))) +
lgt * (T(lgt >= 0) - target[i]);
mask[i] = T(1);
}
}
}
template <typename T>
__global__ void _SigmoidCrossEntropyGrad(
const int nthreads,
const T* logit,
const T* target,
T* dx,
T* mask) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
if (target[i] < 0) {
dx[i] = mask[i] = T(0);
} else {
dx[i] = T(1) / (T(1) + exp(-logit[i])) - target[i];
mask[i] = T(1);
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(name, T) \
template <> \
void name<T, CUDAContext>( \
const int count, \
const T* logit, \
const T* target, \
T* loss, \
T* mask, \
CUDAContext* ctx) { \
_##name<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
count, logit, target, loss, mask); \
}
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropy, float);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropy, double);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropyGrad, float);
DEFINE_KERNEL_LAUNCHER(SigmoidCrossEntropyGrad, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
504ab31964c9452e864c787e38ad4547f75d9d5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <memory.h>
#include <hip/hip_runtime.h>
#include <rmm/rmm.h>
#include "regex.cuh"
#include "regcomp.h"
#include "../custring_view.cuh"
// from is_flags.h -- need to put these somewhere else
#define IS_SPACE(x) ((x & 16)>0)
#define IS_ALPHA(x) ((x & 8)>0)
#define IS_DIGIT(x) ((x & 4)>0)
#define IS_NUMERIC(x) ((x & 2)>0)
#define IS_DECIMAL(x) ((x & 1)>0)
#define IS_ALPHANUM(x) ((x & 15)>0)
#define IS_UPPER(x) ((x & 32)>0)
#define IS_LOWER(x) ((x & 64)>0)
// defined in util.cu
__host__ __device__ unsigned int u82u( unsigned int utf8 );
//
#define LISTBYTES 12
#define LISTSIZE (LISTBYTES<<3)
//
struct Relist
{
short size, listsize;
int pad; // keep data on 8-byte bounday
int2* ranges;//[LISTSIZE];
u_char* inst_ids;//[LISTSIZE];
u_char* mask;//[LISTBYTES];
u_char data[(9*LISTSIZE)+LISTBYTES]; // always last
__host__ __device__ static int size_for(int insts)
{
int size = 0;
size += sizeof(short); // size
size += sizeof(short); // listsize
size += sizeof(int); // pad
size += sizeof(u_char*)*3; // 3 pointers
size += sizeof(int2)*insts; // ranges bytes
size += sizeof(u_char)*insts; // inst_ids bytes
size += sizeof(u_char)*((insts+7)/8); // mask bytes
size = ((size+7)/8)*8; // align it too
return size;
}
__host__ __device__ Relist()
{
//listsize = LISTSIZE;
//reset();
set_listsize(LISTSIZE);
}
__host__ __device__ inline void set_listsize(short ls)
{
listsize = ls;
u_char* ptr = (u_char*)data;
ranges = (int2*)ptr;
ptr += listsize * sizeof(int2);
inst_ids = ptr;
ptr += listsize;
mask = ptr;
reset();
}
__host__ __device__ inline void reset()
{
//memset(mask, 0, LISTBYTES);
memset(mask, 0, (listsize+7)/8);
size = 0;
}
__device__ inline bool activate(int i, int begin, int end)
{
//if ( i >= listsize )
// printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
{
if (!readMask(i))
{
writeMask(true, i);
inst_ids[size] = (u_char)i;
int2 range;
range.x = begin;
range.y = end;
ranges[size] = range;
size++;
return true;
}
}
return false;
}
__device__ inline void writeMask(bool v, int pos)
{
u_char uc = 1 << (pos & 7);
if (v)
mask[pos >> 3] |= uc;
else
mask[pos >> 3] &= ~uc;
}
//if( tid > jnk.list1->minId && tid < jnk.list1->maxId && !readMask(jnk.list1->mask, tid) )
__device__ inline bool readMask(int pos)
{
u_char uc = mask[pos >> 3];
return (bool)((uc >> (pos & 7)) & 1);
}
};
struct Reljunk
{
Relist *list1, *list2;
int starttype;
char32_t startchar;
};
__device__ inline bool isAlphaNumeric(char32_t c)
{
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9');
}
__device__ inline void swaplist(Relist*& l1, Relist*& l2)
{
Relist* t = l1;
l1 = l2;
l2 = t;
}
__device__ dreclass::dreclass(unsigned char* flags)
: builtins(0), count(0), chrs(0), uflags(flags) {}
__device__ bool dreclass::is_match(char32_t ch)
{
int i=0, len = count;
for( ; i < len; i += 2 )
{
if( (ch >= chrs[i]) && (ch <= chrs[i+1]) )
return true;
}
if( !builtins )
return false;
unsigned int uni = u82u(ch);
if( uni > 0x00FFFF )
return false;
unsigned char fl = uflags[uni];
if( (builtins & 1) && ((ch=='_') || IS_ALPHANUM(fl)) ) // \w
return true;
if( (builtins & 2) && IS_SPACE(fl) ) // \s
return true;
if( (builtins & 4) && IS_DIGIT(fl) ) // \d
return true;
if( (builtins & 8) && ((ch != '\n') && (ch != '_') && !IS_ALPHANUM(fl)) ) // \W
return true;
if( (builtins & 16) && !IS_SPACE(fl) ) // \S
return true;
if( (builtins & 32) && ((ch != '\n') && !IS_DIGIT(fl)) ) // \D
return true;
//
return false;
}
dreprog::dreprog() {}
dreprog::~dreprog() {}
dreprog* dreprog::create_from(const char32_t* pattern, unsigned char* uflags, unsigned int strscount )
{
// compile pattern
Reprog* prog = Reprog::create_from(pattern);
// compute size to hold prog
int insts_count = (int)prog->inst_count();
int classes_count = (int)prog->classes_count();
int insts_size = insts_count * sizeof(Reinst);
int classes_size = classes_count * sizeof(int); // offsets
for( int idx=0; idx < classes_count; ++idx )
classes_size += (int)((prog->class_at(idx).chrs.size())*sizeof(char32_t)) + (int)sizeof(int);
// allocate memory to store prog
size_t memsize = sizeof(dreprog) + insts_size + classes_size;
u_char* buffer = (u_char*)malloc(memsize);
dreprog* rtn = (dreprog*)buffer;
buffer += sizeof(dreprog);
Reinst* insts = (Reinst*)buffer;
memcpy( insts, prog->insts_data(), insts_size);
buffer += insts_size;
// classes are variable size so create offsets array
int* offsets = (int*)buffer;
buffer += classes_count * sizeof(int);
char32_t* classes = (char32_t*)buffer;
int offset = 0;
for( int idx=0; idx < classes_count; ++idx )
{
Reclass& cls = prog->class_at(idx);
memcpy( classes++, &(cls.builtins), sizeof(int) );
int len = (int)cls.chrs.size();
memcpy( classes, cls.chrs.c_str(), len*sizeof(char32_t) );
offset += 1 + len;
offsets[idx] = offset;
classes += len;
}
// initialize the rest of the elements
rtn->startinst_id = prog->get_start_inst();
rtn->num_capturing_groups = prog->groups_count();
rtn->insts_count = insts_count;
rtn->classes_count = classes_count;
rtn->unicode_flags = uflags;
rtn->relists_mem = 0;
// allocate memory for relist if necessary
if( (insts_count > LISTSIZE) && strscount )
{
int rsz = Relist::size_for(insts_count);
size_t rlmsz = rsz*2*strscount; // Reljunk has 2 Relist ptrs
void* rmem = 0;
RMM_ALLOC(&rmem,rlmsz,0);//hipMalloc(&rmem,rlmsz);
rtn->relists_mem = rmem;
}
// compiled prog copied into flat memory
delete prog;
// copy flat prog to device memory
dreprog* d_rtn = 0;
RMM_ALLOC(&d_rtn,memsize,0);//hipMalloc(&d_rtn,memsize);
hipMemcpy(d_rtn,rtn,memsize,hipMemcpyHostToDevice);
free(rtn);
return d_rtn;
}
void dreprog::destroy(dreprog* prog)
{
prog->free_relists();
RMM_FREE(prog,0);//hipFree(prog);
}
void dreprog::free_relists()
{
void* cptr = 0; // this magic works but only as member function
hipMemcpy(&cptr,&relists_mem,sizeof(void*),hipMemcpyDeviceToHost);
if( cptr )
RMM_FREE(cptr,0);//hipFree(cptr);
}
int dreprog::inst_counts()
{
int count = 0;
hipMemcpy(&count,&insts_count,sizeof(int),hipMemcpyDeviceToHost);
return count;
}
int dreprog::group_counts()
{
int count = 0;
hipMemcpy(&count,&num_capturing_groups,sizeof(int),hipMemcpyDeviceToHost);
return count;
}
__host__ __device__ Reinst* dreprog::get_inst(int idx)
{
if( idx < 0 || idx >= insts_count )
return 0;
u_char* buffer = (u_char*)this;
Reinst* insts = (Reinst*)(buffer + sizeof(dreprog));
return insts + idx;
}
//__device__ char32_t* dreprog::get_class(int idx, int& len)
//{
// if( idx < 0 || idx >= classes_count )
// return 0;
// u_char* buffer = (u_char*)this;
// buffer += sizeof(dreprog) + (insts_count * sizeof(Reinst));
// int* offsets = (int*)buffer;
// buffer += classes_count * sizeof(int);
// char32_t* classes = (char32_t*)buffer;
// int offset = offsets[idx];
// len = offset;
// if( idx==0 )
// return classes;
// offset = offsets[idx-1];
// len -= offset;
// classes += offset;
// return classes;
//}
__device__ int dreprog::get_class(int idx, dreclass& cls)
{
if( idx < 0 || idx >= classes_count )
return 0;
u_char* buffer = (u_char*)this;
buffer += sizeof(dreprog) + (insts_count * sizeof(Reinst));
int* offsets = (int*)buffer;
buffer += classes_count * sizeof(int);
char32_t* classes = (char32_t*)buffer;
int offset = offsets[idx];
int builtins, len = offset -1;
if( idx > 0 )
{
offset = offsets[idx-1];
len -= offset;
classes += offset;
}
memcpy( &builtins, classes++, sizeof(int) );
cls.builtins = builtins;
cls.count = len;
cls.chrs = classes;
return len;
}
// execute compiled expression for each character in the provided string
__device__ int dreprog::regexec(custring_view* dstr, Reljunk &jnk, int& begin, int& end, int groupId)
{
int match = 0;
int checkstart = jnk.starttype;
int txtlen = dstr->chars_count();
int pos = begin;
int eos = end;
char32_t c = 0; // lc = 0;
custring_view::iterator itr = custring_view::iterator(*dstr,pos);
jnk.list1->reset();
do
{
/* fast check for first char */
if (checkstart)
{
switch (jnk.starttype)
{
case CHAR:
{
int fidx = dstr->find((Char)jnk.startchar,pos);
if( fidx < 0 )
return match;
pos = fidx;
break;
}
case BOL:
{
if( pos==0 )
break;
if( jnk.startchar != '^' )
return match;
--pos;
int fidx = dstr->find((Char)'\n',pos);
if( fidx < 0 )
return match; // update begin/end values?
pos = fidx + 1;
break;
}
}
//if( pos > 0 )
//{
// itr = custring_view::iterator(*dstr,pos-1);
// lc = *itr;
// ++itr;
//}
//else
//{
// itr = dstr->begin();
// lc = 0;
//}
itr = custring_view::iterator(*dstr,pos);
}
if (pos < eos && match == 0)
jnk.list1->activate(startinst_id, pos, 0);
//c = (char32_t)(pos >= txtlen ? 0 : dstr->at(pos) );
c = (char32_t)(pos >= txtlen ? 0 : *itr); // iterator is many times faster than at()
// expand LBRA, RBRA, BOL, EOL, BOW, NBOW, and OR
bool expanded;
do
{
jnk.list2->reset();
expanded = false;
for (short i = 0; i < jnk.list1->size; i++)
{
int inst_id = (int)jnk.list1->inst_ids[i];
int2 &range = jnk.list1->ranges[i];
const Reinst* inst = get_inst(inst_id);
int id_activate = -1;
switch (inst->type)
{
case CHAR:
case ANY:
case ANYNL:
case CCLASS:
case NCCLASS:
case END:
id_activate = inst_id;
break;
case LBRA:
if (inst->u1.subid == groupId)
range.x = pos;
id_activate = inst->u2.next_id;
expanded = true;
break;
case RBRA:
if (inst->u1.subid == groupId)
range.y = pos;
id_activate = inst->u2.next_id;
expanded = true;
break;
case BOL:
if( (pos==0) || ((inst->u1.c=='^') && (dstr->at(pos-1)==(Char)'\n')) )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
case EOL:
if( (c==0) || (inst->u1.c == '$' && c == '\n'))
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
case BOW:
{
unsigned int uni = u82u(c);
char32_t lc = (char32_t)(pos ? dstr->at(pos-1) : 0);
unsigned int luni = u82u(lc);
//bool cur_alphaNumeric = isAlphaNumeric(c);
//bool last_alphaNumeric = ( (pos==0) ? false : isAlphaNumeric((char32_t)dstr->at(pos-1)) );
bool cur_alphaNumeric = (uni < 0x010000) && IS_ALPHANUM(unicode_flags[uni]);
bool last_alphaNumeric = (luni < 0x010000) && IS_ALPHANUM(unicode_flags[luni]);
if( cur_alphaNumeric != last_alphaNumeric )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
}
case NBOW:
{
unsigned int uni = u82u(c);
char32_t lc = (char32_t)(pos ? dstr->at(pos-1) : 0);
unsigned int luni = u82u(lc);
//bool cur_alphaNumeric = isAlphaNumeric(c);
//bool last_alphaNumeric = ( (pos==0) ? false : isAlphaNumeric((char32_t)dstr->at(pos-1)) );
bool cur_alphaNumeric = (uni < 0x010000) && IS_ALPHANUM(unicode_flags[uni]);
bool last_alphaNumeric = (luni < 0x010000) && IS_ALPHANUM(unicode_flags[luni]);
if( cur_alphaNumeric == last_alphaNumeric )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
}
case OR:
jnk.list2->activate(inst->u1.right_id, range.x, range.y);
id_activate = inst->u2.left_id;
expanded = true;
break;
}
if (id_activate >= 0)
jnk.list2->activate(id_activate, range.x, range.y);
}
swaplist(jnk.list1, jnk.list2);
} while (expanded);
// execute, only CHAR, ANY, ANYNL, CCLASS, NCCLASS, END left now
jnk.list2->reset();
for (short i = 0; i < jnk.list1->size; i++)
{
int inst_id = (int)jnk.list1->inst_ids[i];
int2 &range = jnk.list1->ranges[i];
const Reinst* inst = get_inst(inst_id);
int id_activate = -1;
switch (inst->type)
{
case CHAR:
if (inst->u1.c == c)
id_activate = inst->u2.next_id;
break;
case ANY:
if (c != '\n')
id_activate = inst->u2.next_id;
break;
case ANYNL:
id_activate = inst->u2.next_id;
break;
case CCLASS:
{
dreclass cls(unicode_flags);
get_class(inst->u1.cls_id,cls);
if( cls.is_match(c) )
id_activate = inst->u2.next_id;
//int numCls = 0;
//char32_t* cls = get_class(inst->u1.cls_id,numCls);
//for( int i=0; i < numCls; i += 2 )
//{
// if( (c >= cls[i]) && (c <= cls[i+1]) )
// {
// id_activate = inst->u2.next_id;
// break;
// }
//}
break;
}
case NCCLASS:
{
dreclass cls(unicode_flags);
get_class(inst->u1.cls_id,cls);
if( !cls.is_match(c) )
id_activate = inst->u2.next_id;
//int numCls = 0;
//char32_t* cls = get_class(inst->u1.cls_id,numCls);
//int i=0;
//for( ; i < numCls; i += 2 )
// if( c >= cls[i] && c <= cls[i+1] )
// break;
//if( i == numCls )
// id_activate = inst->u2.next_id;
break;
}
case END:
match = 1;
begin = range.x;
end = groupId==0? pos : range.y;
goto BreakFor;
}
if (id_activate >= 0)
jnk.list2->activate(id_activate, range.x, range.y);
}
BreakFor:
++pos;
++itr;
swaplist(jnk.list1, jnk.list2);
checkstart = jnk.list1->size > 0 ? 0 : 1;
}
while (c && (jnk.list1->size>0 || match == 0));
return match;
}
//
__device__ int dreprog::contains( custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int begin=0, end=dstr->chars_count();
int rtn = regexec(dstr,jnk,begin,end);
return rtn;
}
__device__ int dreprog::contains( unsigned int idx, custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int begin=0, end=dstr->chars_count();
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end);
}
__device__ int dreprog::match( custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int begin=0, end=1;
int rtn = regexec(dstr,jnk,begin,end);
return rtn;
}
__device__ int dreprog::match( unsigned int idx, custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int begin=0, end=1;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end);
}
__device__ int dreprog::find( custring_view* dstr, int& begin, int& end )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int rtn = regexec(dstr,jnk,begin,end);
if( rtn <=0 )
begin = end = -1;
return rtn;
}
__device__ int dreprog::find( unsigned int idx, custring_view* dstr, int& begin, int& end )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int rtn = 0;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
rtn = regexec(dstr,jnk,begin,end);
}
else
{
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
rtn = regexec(dstr,jnk,begin,end);
}
if( rtn <=0 )
begin = end = -1;
return rtn;
}
//
__device__ int dreprog::extract( custring_view* str, int& begin, int& end, int col )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
end = begin + 1;
int rtn = regexec(str,jnk,begin,end, col +1);
return rtn;
}
__device__ int dreprog::extract( unsigned int idx, custring_view* dstr, int& begin, int& end, int col )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
end = begin + 1;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end,col+1);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end,col+1);
}
| 504ab31964c9452e864c787e38ad4547f75d9d5e.cu |
#include <memory.h>
#include <cuda_runtime.h>
#include <rmm/rmm.h>
#include "regex.cuh"
#include "regcomp.h"
#include "../custring_view.cuh"
// from is_flags.h -- need to put these somewhere else
#define IS_SPACE(x) ((x & 16)>0)
#define IS_ALPHA(x) ((x & 8)>0)
#define IS_DIGIT(x) ((x & 4)>0)
#define IS_NUMERIC(x) ((x & 2)>0)
#define IS_DECIMAL(x) ((x & 1)>0)
#define IS_ALPHANUM(x) ((x & 15)>0)
#define IS_UPPER(x) ((x & 32)>0)
#define IS_LOWER(x) ((x & 64)>0)
// defined in util.cu
__host__ __device__ unsigned int u82u( unsigned int utf8 );
//
#define LISTBYTES 12
#define LISTSIZE (LISTBYTES<<3)
//
struct Relist
{
short size, listsize;
int pad; // keep data on 8-byte bounday
int2* ranges;//[LISTSIZE];
u_char* inst_ids;//[LISTSIZE];
u_char* mask;//[LISTBYTES];
u_char data[(9*LISTSIZE)+LISTBYTES]; // always last
__host__ __device__ static int size_for(int insts)
{
int size = 0;
size += sizeof(short); // size
size += sizeof(short); // listsize
size += sizeof(int); // pad
size += sizeof(u_char*)*3; // 3 pointers
size += sizeof(int2)*insts; // ranges bytes
size += sizeof(u_char)*insts; // inst_ids bytes
size += sizeof(u_char)*((insts+7)/8); // mask bytes
size = ((size+7)/8)*8; // align it too
return size;
}
__host__ __device__ Relist()
{
//listsize = LISTSIZE;
//reset();
set_listsize(LISTSIZE);
}
__host__ __device__ inline void set_listsize(short ls)
{
listsize = ls;
u_char* ptr = (u_char*)data;
ranges = (int2*)ptr;
ptr += listsize * sizeof(int2);
inst_ids = ptr;
ptr += listsize;
mask = ptr;
reset();
}
__host__ __device__ inline void reset()
{
//memset(mask, 0, LISTBYTES);
memset(mask, 0, (listsize+7)/8);
size = 0;
}
__device__ inline bool activate(int i, int begin, int end)
{
//if ( i >= listsize )
// printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
{
if (!readMask(i))
{
writeMask(true, i);
inst_ids[size] = (u_char)i;
int2 range;
range.x = begin;
range.y = end;
ranges[size] = range;
size++;
return true;
}
}
return false;
}
__device__ inline void writeMask(bool v, int pos)
{
u_char uc = 1 << (pos & 7);
if (v)
mask[pos >> 3] |= uc;
else
mask[pos >> 3] &= ~uc;
}
//if( tid > jnk.list1->minId && tid < jnk.list1->maxId && !readMask(jnk.list1->mask, tid) )
__device__ inline bool readMask(int pos)
{
u_char uc = mask[pos >> 3];
return (bool)((uc >> (pos & 7)) & 1);
}
};
struct Reljunk
{
Relist *list1, *list2;
int starttype;
char32_t startchar;
};
__device__ inline bool isAlphaNumeric(char32_t c)
{
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9');
}
__device__ inline void swaplist(Relist*& l1, Relist*& l2)
{
Relist* t = l1;
l1 = l2;
l2 = t;
}
__device__ dreclass::dreclass(unsigned char* flags)
: builtins(0), count(0), chrs(0), uflags(flags) {}
__device__ bool dreclass::is_match(char32_t ch)
{
int i=0, len = count;
for( ; i < len; i += 2 )
{
if( (ch >= chrs[i]) && (ch <= chrs[i+1]) )
return true;
}
if( !builtins )
return false;
unsigned int uni = u82u(ch);
if( uni > 0x00FFFF )
return false;
unsigned char fl = uflags[uni];
if( (builtins & 1) && ((ch=='_') || IS_ALPHANUM(fl)) ) // \w
return true;
if( (builtins & 2) && IS_SPACE(fl) ) // \s
return true;
if( (builtins & 4) && IS_DIGIT(fl) ) // \d
return true;
if( (builtins & 8) && ((ch != '\n') && (ch != '_') && !IS_ALPHANUM(fl)) ) // \W
return true;
if( (builtins & 16) && !IS_SPACE(fl) ) // \S
return true;
if( (builtins & 32) && ((ch != '\n') && !IS_DIGIT(fl)) ) // \D
return true;
//
return false;
}
dreprog::dreprog() {}
dreprog::~dreprog() {}
dreprog* dreprog::create_from(const char32_t* pattern, unsigned char* uflags, unsigned int strscount )
{
// compile pattern
Reprog* prog = Reprog::create_from(pattern);
// compute size to hold prog
int insts_count = (int)prog->inst_count();
int classes_count = (int)prog->classes_count();
int insts_size = insts_count * sizeof(Reinst);
int classes_size = classes_count * sizeof(int); // offsets
for( int idx=0; idx < classes_count; ++idx )
classes_size += (int)((prog->class_at(idx).chrs.size())*sizeof(char32_t)) + (int)sizeof(int);
// allocate memory to store prog
size_t memsize = sizeof(dreprog) + insts_size + classes_size;
u_char* buffer = (u_char*)malloc(memsize);
dreprog* rtn = (dreprog*)buffer;
buffer += sizeof(dreprog);
Reinst* insts = (Reinst*)buffer;
memcpy( insts, prog->insts_data(), insts_size);
buffer += insts_size;
// classes are variable size so create offsets array
int* offsets = (int*)buffer;
buffer += classes_count * sizeof(int);
char32_t* classes = (char32_t*)buffer;
int offset = 0;
for( int idx=0; idx < classes_count; ++idx )
{
Reclass& cls = prog->class_at(idx);
memcpy( classes++, &(cls.builtins), sizeof(int) );
int len = (int)cls.chrs.size();
memcpy( classes, cls.chrs.c_str(), len*sizeof(char32_t) );
offset += 1 + len;
offsets[idx] = offset;
classes += len;
}
// initialize the rest of the elements
rtn->startinst_id = prog->get_start_inst();
rtn->num_capturing_groups = prog->groups_count();
rtn->insts_count = insts_count;
rtn->classes_count = classes_count;
rtn->unicode_flags = uflags;
rtn->relists_mem = 0;
// allocate memory for relist if necessary
if( (insts_count > LISTSIZE) && strscount )
{
int rsz = Relist::size_for(insts_count);
size_t rlmsz = rsz*2*strscount; // Reljunk has 2 Relist ptrs
void* rmem = 0;
RMM_ALLOC(&rmem,rlmsz,0);//cudaMalloc(&rmem,rlmsz);
rtn->relists_mem = rmem;
}
// compiled prog copied into flat memory
delete prog;
// copy flat prog to device memory
dreprog* d_rtn = 0;
RMM_ALLOC(&d_rtn,memsize,0);//cudaMalloc(&d_rtn,memsize);
cudaMemcpy(d_rtn,rtn,memsize,cudaMemcpyHostToDevice);
free(rtn);
return d_rtn;
}
void dreprog::destroy(dreprog* prog)
{
prog->free_relists();
RMM_FREE(prog,0);//cudaFree(prog);
}
void dreprog::free_relists()
{
void* cptr = 0; // this magic works but only as member function
cudaMemcpy(&cptr,&relists_mem,sizeof(void*),cudaMemcpyDeviceToHost);
if( cptr )
RMM_FREE(cptr,0);//cudaFree(cptr);
}
int dreprog::inst_counts()
{
int count = 0;
cudaMemcpy(&count,&insts_count,sizeof(int),cudaMemcpyDeviceToHost);
return count;
}
int dreprog::group_counts()
{
int count = 0;
cudaMemcpy(&count,&num_capturing_groups,sizeof(int),cudaMemcpyDeviceToHost);
return count;
}
__host__ __device__ Reinst* dreprog::get_inst(int idx)
{
if( idx < 0 || idx >= insts_count )
return 0;
u_char* buffer = (u_char*)this;
Reinst* insts = (Reinst*)(buffer + sizeof(dreprog));
return insts + idx;
}
//__device__ char32_t* dreprog::get_class(int idx, int& len)
//{
// if( idx < 0 || idx >= classes_count )
// return 0;
// u_char* buffer = (u_char*)this;
// buffer += sizeof(dreprog) + (insts_count * sizeof(Reinst));
// int* offsets = (int*)buffer;
// buffer += classes_count * sizeof(int);
// char32_t* classes = (char32_t*)buffer;
// int offset = offsets[idx];
// len = offset;
// if( idx==0 )
// return classes;
// offset = offsets[idx-1];
// len -= offset;
// classes += offset;
// return classes;
//}
__device__ int dreprog::get_class(int idx, dreclass& cls)
{
if( idx < 0 || idx >= classes_count )
return 0;
u_char* buffer = (u_char*)this;
buffer += sizeof(dreprog) + (insts_count * sizeof(Reinst));
int* offsets = (int*)buffer;
buffer += classes_count * sizeof(int);
char32_t* classes = (char32_t*)buffer;
int offset = offsets[idx];
int builtins, len = offset -1;
if( idx > 0 )
{
offset = offsets[idx-1];
len -= offset;
classes += offset;
}
memcpy( &builtins, classes++, sizeof(int) );
cls.builtins = builtins;
cls.count = len;
cls.chrs = classes;
return len;
}
// execute compiled expression for each character in the provided string
__device__ int dreprog::regexec(custring_view* dstr, Reljunk &jnk, int& begin, int& end, int groupId)
{
int match = 0;
int checkstart = jnk.starttype;
int txtlen = dstr->chars_count();
int pos = begin;
int eos = end;
char32_t c = 0; // lc = 0;
custring_view::iterator itr = custring_view::iterator(*dstr,pos);
jnk.list1->reset();
do
{
/* fast check for first char */
if (checkstart)
{
switch (jnk.starttype)
{
case CHAR:
{
int fidx = dstr->find((Char)jnk.startchar,pos);
if( fidx < 0 )
return match;
pos = fidx;
break;
}
case BOL:
{
if( pos==0 )
break;
if( jnk.startchar != '^' )
return match;
--pos;
int fidx = dstr->find((Char)'\n',pos);
if( fidx < 0 )
return match; // update begin/end values?
pos = fidx + 1;
break;
}
}
//if( pos > 0 )
//{
// itr = custring_view::iterator(*dstr,pos-1);
// lc = *itr;
// ++itr;
//}
//else
//{
// itr = dstr->begin();
// lc = 0;
//}
itr = custring_view::iterator(*dstr,pos);
}
if (pos < eos && match == 0)
jnk.list1->activate(startinst_id, pos, 0);
//c = (char32_t)(pos >= txtlen ? 0 : dstr->at(pos) );
c = (char32_t)(pos >= txtlen ? 0 : *itr); // iterator is many times faster than at()
// expand LBRA, RBRA, BOL, EOL, BOW, NBOW, and OR
bool expanded;
do
{
jnk.list2->reset();
expanded = false;
for (short i = 0; i < jnk.list1->size; i++)
{
int inst_id = (int)jnk.list1->inst_ids[i];
int2 &range = jnk.list1->ranges[i];
const Reinst* inst = get_inst(inst_id);
int id_activate = -1;
switch (inst->type)
{
case CHAR:
case ANY:
case ANYNL:
case CCLASS:
case NCCLASS:
case END:
id_activate = inst_id;
break;
case LBRA:
if (inst->u1.subid == groupId)
range.x = pos;
id_activate = inst->u2.next_id;
expanded = true;
break;
case RBRA:
if (inst->u1.subid == groupId)
range.y = pos;
id_activate = inst->u2.next_id;
expanded = true;
break;
case BOL:
if( (pos==0) || ((inst->u1.c=='^') && (dstr->at(pos-1)==(Char)'\n')) )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
case EOL:
if( (c==0) || (inst->u1.c == '$' && c == '\n'))
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
case BOW:
{
unsigned int uni = u82u(c);
char32_t lc = (char32_t)(pos ? dstr->at(pos-1) : 0);
unsigned int luni = u82u(lc);
//bool cur_alphaNumeric = isAlphaNumeric(c);
//bool last_alphaNumeric = ( (pos==0) ? false : isAlphaNumeric((char32_t)dstr->at(pos-1)) );
bool cur_alphaNumeric = (uni < 0x010000) && IS_ALPHANUM(unicode_flags[uni]);
bool last_alphaNumeric = (luni < 0x010000) && IS_ALPHANUM(unicode_flags[luni]);
if( cur_alphaNumeric != last_alphaNumeric )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
}
case NBOW:
{
unsigned int uni = u82u(c);
char32_t lc = (char32_t)(pos ? dstr->at(pos-1) : 0);
unsigned int luni = u82u(lc);
//bool cur_alphaNumeric = isAlphaNumeric(c);
//bool last_alphaNumeric = ( (pos==0) ? false : isAlphaNumeric((char32_t)dstr->at(pos-1)) );
bool cur_alphaNumeric = (uni < 0x010000) && IS_ALPHANUM(unicode_flags[uni]);
bool last_alphaNumeric = (luni < 0x010000) && IS_ALPHANUM(unicode_flags[luni]);
if( cur_alphaNumeric == last_alphaNumeric )
{
id_activate = inst->u2.next_id;
expanded = true;
}
break;
}
case OR:
jnk.list2->activate(inst->u1.right_id, range.x, range.y);
id_activate = inst->u2.left_id;
expanded = true;
break;
}
if (id_activate >= 0)
jnk.list2->activate(id_activate, range.x, range.y);
}
swaplist(jnk.list1, jnk.list2);
} while (expanded);
// execute, only CHAR, ANY, ANYNL, CCLASS, NCCLASS, END left now
jnk.list2->reset();
for (short i = 0; i < jnk.list1->size; i++)
{
int inst_id = (int)jnk.list1->inst_ids[i];
int2 &range = jnk.list1->ranges[i];
const Reinst* inst = get_inst(inst_id);
int id_activate = -1;
switch (inst->type)
{
case CHAR:
if (inst->u1.c == c)
id_activate = inst->u2.next_id;
break;
case ANY:
if (c != '\n')
id_activate = inst->u2.next_id;
break;
case ANYNL:
id_activate = inst->u2.next_id;
break;
case CCLASS:
{
dreclass cls(unicode_flags);
get_class(inst->u1.cls_id,cls);
if( cls.is_match(c) )
id_activate = inst->u2.next_id;
//int numCls = 0;
//char32_t* cls = get_class(inst->u1.cls_id,numCls);
//for( int i=0; i < numCls; i += 2 )
//{
// if( (c >= cls[i]) && (c <= cls[i+1]) )
// {
// id_activate = inst->u2.next_id;
// break;
// }
//}
break;
}
case NCCLASS:
{
dreclass cls(unicode_flags);
get_class(inst->u1.cls_id,cls);
if( !cls.is_match(c) )
id_activate = inst->u2.next_id;
//int numCls = 0;
//char32_t* cls = get_class(inst->u1.cls_id,numCls);
//int i=0;
//for( ; i < numCls; i += 2 )
// if( c >= cls[i] && c <= cls[i+1] )
// break;
//if( i == numCls )
// id_activate = inst->u2.next_id;
break;
}
case END:
match = 1;
begin = range.x;
end = groupId==0? pos : range.y;
goto BreakFor;
}
if (id_activate >= 0)
jnk.list2->activate(id_activate, range.x, range.y);
}
BreakFor:
++pos;
++itr;
swaplist(jnk.list1, jnk.list2);
checkstart = jnk.list1->size > 0 ? 0 : 1;
}
while (c && (jnk.list1->size>0 || match == 0));
return match;
}
//
__device__ int dreprog::contains( custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int begin=0, end=dstr->chars_count();
int rtn = regexec(dstr,jnk,begin,end);
return rtn;
}
__device__ int dreprog::contains( unsigned int idx, custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int begin=0, end=dstr->chars_count();
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end);
}
__device__ int dreprog::match( custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int begin=0, end=1;
int rtn = regexec(dstr,jnk,begin,end);
return rtn;
}
__device__ int dreprog::match( unsigned int idx, custring_view* dstr )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( type == CHAR || type == BOL )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int begin=0, end=1;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end);
}
__device__ int dreprog::find( custring_view* dstr, int& begin, int& end )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
int rtn = regexec(dstr,jnk,begin,end);
if( rtn <=0 )
begin = end = -1;
return rtn;
}
__device__ int dreprog::find( unsigned int idx, custring_view* dstr, int& begin, int& end )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
int rtn = 0;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
rtn = regexec(dstr,jnk,begin,end);
}
else
{
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
rtn = regexec(dstr,jnk,begin,end);
}
if( rtn <=0 )
begin = end = -1;
return rtn;
}
//
__device__ int dreprog::extract( custring_view* str, int& begin, int& end, int col )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
end = begin + 1;
int rtn = regexec(str,jnk,begin,end, col +1);
return rtn;
}
__device__ int dreprog::extract( unsigned int idx, custring_view* dstr, int& begin, int& end, int col )
{
Reljunk jnk;
jnk.starttype = 0;
jnk.startchar = 0;
int type = get_inst(startinst_id)->type;
if( (type == CHAR) || (type == BOL) )
{
jnk.starttype = type;
jnk.startchar = get_inst(startinst_id)->u1.c;
}
end = begin + 1;
if( relists_mem==0 )
{
Relist relist[2];
jnk.list1 = relist;
jnk.list2 = relist + 1;
return regexec(dstr,jnk,begin,end,col+1);
}
int relsz = Relist::size_for(insts_count);
char* drel = (char*)relists_mem; // beginning of Relist buffer
drel += (idx * relsz * 2); // two Relist ptrs in Reljunk
jnk.list1 = (Relist*)drel; // first one
jnk.list2 = (Relist*)(drel + relsz); // second one
jnk.list1->set_listsize((short)insts_count); // essentially this is
jnk.list2->set_listsize((short)insts_count); // substitute ctor call
return regexec(dstr,jnk,begin,end,col+1);
}
|
bcbe8b66ee9fe68c070df76c5235bc6c4bc20563.hip | // !!! This is a file automatically generated by hipify!!!
//This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the maximum read bandwidth of L1 cache for 64 bit read
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 1024
#define BLOCKS_NUM 1
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 256
#define ARRAY_SIZE 16384 //ARRAY_SIZE has to be less than L1_SIZE
#define L1_SIZE 32768 //L1 size in 64-bit. Volta L1 size is 128KB, i.e. 16K of 64-bit
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void l1_bw(uint32_t *startClk, uint32_t *stopClk, float *dsink, float *posArray){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// a register to avoid compiler optimization
float sink0 = 0;
float sink1 = 0;
float sink2 = 0;
float sink3 = 0;
// populate l1 cache to warm up
for (uint32_t i = tid; i<ARRAY_SIZE; i+=THREADS_PER_BLOCK) {
float* ptr = posArray + i;
// use ca modifier to cache the load in L1
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// load data from l1 cache and accumulate
for(uint32_t j=0; j<REPEAT_TIMES; j++){
float* ptr = posArray + ((tid + (j*WARP_SIZE*4))%ARRAY_SIZE);
asm volatile ("{\t\n"
".reg .f32 data<4>;\n\t"
"ld.global.ca.f32 data0, [%4+0];\n\t"
"ld.global.ca.f32 data1, [%4+128];\n\t"
"ld.global.ca.f32 data2, [%4+256];\n\t"
"ld.global.ca.f32 data3, [%4+384];\n\t"
"add.f32 %0, data0, %0;\n\t"
"add.f32 %1, data1, %1;\n\t"
"add.f32 %2, data2, %2;\n\t"
"add.f32 %3, data3, %3;\n\t"
"}" : "+f"(sink0),"+f"(sink1),"+f"(sink2),"+f"(sink3) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[uid] = start;
stopClk[uid] = stop;
dsink[uid] = sink0+sink1+sink2+sink3;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *posArray = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *dsink = (float*) malloc(TOTAL_THREADS*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *posArray_g;
float *dsink_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++)
posArray[i] = (float)i;
gpuErrchk( hipMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&posArray_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( hipMalloc(&dsink_g, TOTAL_THREADS*sizeof(float)) );
gpuErrchk( hipMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( l1_bw), dim3(BLOCKS_NUM),dim3(THREADS_PER_BLOCK), 0, 0, startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(float), hipMemcpyDeviceToHost) );
float bw;
bw = (float)(REPEAT_TIMES*THREADS_PER_SM*4*4)/((float)(stopClk[0]-startClk[0]));
printf("L1 bandwidth = %f (byte/clk/SM)\n", bw);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
| bcbe8b66ee9fe68c070df76c5235bc6c4bc20563.cu | //This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the maximum read bandwidth of L1 cache for 64 bit read
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 1024
#define BLOCKS_NUM 1
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 256
#define ARRAY_SIZE 16384 //ARRAY_SIZE has to be less than L1_SIZE
#define L1_SIZE 32768 //L1 size in 64-bit. Volta L1 size is 128KB, i.e. 16K of 64-bit
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void l1_bw(uint32_t *startClk, uint32_t *stopClk, float *dsink, float *posArray){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// a register to avoid compiler optimization
float sink0 = 0;
float sink1 = 0;
float sink2 = 0;
float sink3 = 0;
// populate l1 cache to warm up
for (uint32_t i = tid; i<ARRAY_SIZE; i+=THREADS_PER_BLOCK) {
float* ptr = posArray + i;
// use ca modifier to cache the load in L1
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// load data from l1 cache and accumulate
for(uint32_t j=0; j<REPEAT_TIMES; j++){
float* ptr = posArray + ((tid + (j*WARP_SIZE*4))%ARRAY_SIZE);
asm volatile ("{\t\n"
".reg .f32 data<4>;\n\t"
"ld.global.ca.f32 data0, [%4+0];\n\t"
"ld.global.ca.f32 data1, [%4+128];\n\t"
"ld.global.ca.f32 data2, [%4+256];\n\t"
"ld.global.ca.f32 data3, [%4+384];\n\t"
"add.f32 %0, data0, %0;\n\t"
"add.f32 %1, data1, %1;\n\t"
"add.f32 %2, data2, %2;\n\t"
"add.f32 %3, data3, %3;\n\t"
"}" : "+f"(sink0),"+f"(sink1),"+f"(sink2),"+f"(sink3) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[uid] = start;
stopClk[uid] = stop;
dsink[uid] = sink0+sink1+sink2+sink3;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *posArray = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *dsink = (float*) malloc(TOTAL_THREADS*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *posArray_g;
float *dsink_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++)
posArray[i] = (float)i;
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&dsink_g, TOTAL_THREADS*sizeof(float)) );
gpuErrchk( cudaMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
l1_bw<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(float), cudaMemcpyDeviceToHost) );
float bw;
bw = (float)(REPEAT_TIMES*THREADS_PER_SM*4*4)/((float)(stopClk[0]-startClk[0]));
printf("L1 bandwidth = %f (byte/clk/SM)\n", bw);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
|
05ccd50d19b4e1a5f662b3e23db9f7f4b5d53ba7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} | 05ccd50d19b4e1a5f662b3e23db9f7f4b5d53ba7.cu | #include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} |
2fe92ef78cb30bac648abf09f791874d33e68d32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
__global__ void SparkGPUPi_map(const int *input, int *output, long size) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
hiprandState_t s;
int seed = 1;
hiprand_init(seed, idx, 0, &s);
float x = hiprand_uniform(&s) * 2 - 1;
float y = hiprand_uniform(&s) * 2 - 1;
if (x * x + y * y < 1) {
output[idx] = 1;
} else {
output[idx] = 0;
}
}
__global__ void SparkGPUPi_reduce(int *input, int *output, long size, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
} else if (ix == 0) {
int result = 0;
for (long i = 0; i < jump; ++i) {
result += input[i];
}
output[0] = result;
}
}
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
__global__ void SparkGPULR_map(const long * __restrict__ inputX, const double * __restrict__ inputY, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, const double * __restrict__ inputW) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
long offset = inputX[ix];
const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const long capacity = GET_ARRAY_CAPACITY(inArray);
const long length = GET_ARRAY_LENGTH(inArray);
const double * inArrayBody = GET_ARRAY_BODY(inArray);
double *outArray = GET_BLOB_ADDRESS(outputBlob, offset);
double *outArrayBody = GET_ARRAY_BODY(outArray);
map(outArrayBody, inArrayBody, inputY[ix], inputW, length);
output[ix] = offset;
SET_ARRAY_CAPACITY(outArray, capacity);
SET_ARRAY_LENGTH(outArray, length);
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(const long * __restrict__ input, const double * __restrict__ inputBlob, double *out, long i, long n) {
double sum = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
const long offset = input[idx];
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernal(const long * __restrict__ input, const double * __restrict__ inputBlob, double *outputArrayBody, long length, long n) {
long i = 0;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
const long offset = input[idx];
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(input, inputBlob, &outputArrayBody[i], i, n);
}
}
__global__ void SparkGPULR_reduce(const long * __restrict__ input, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, int stage, int totalStages) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if ((stage == 0) && (idx < size)) {
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, input[idx]);
const long inArrayCapacity = GET_ARRAY_CAPACITY(inArray);
const long inArrayLength = GET_ARRAY_LENGTH(inArray);
output[0] = 0;
double *outArray = GET_BLOB_ADDRESS(outputBlob, output[0]);
double *outArrayBody = GET_ARRAY_BODY(outArray);
if (idx < inArrayLength) {
outArrayBody[idx] = 0;
}
deviceReduceArrayKernal(input, inputBlob, outArrayBody, inArrayLength, size);
SET_ARRAY_CAPACITY(outArray, inArrayCapacity);
SET_ARRAY_LENGTH(outArray, inArrayLength);
}
#else
if ((stage == 0) && (idx == 0)) {
output[idx] = 0;
double *outArray = GET_BLOB_ADDRESS(outputBlob, output[idx]);
double *outArrayBody = GET_ARRAY_BODY(outArray);
long capacity = 0, length = 0;
for (long i = 0; i < size; i++) {
long offset = input[i];
const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset);
capacity = GET_ARRAY_CAPACITY(inArray);
length = GET_ARRAY_LENGTH(inArray);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
SET_ARRAY_CAPACITY(outArray, capacity);
SET_ARRAY_LENGTH(outArray, length);
}
#endif
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
extern "C"
__global__
void blockReduce(int *count, double *data, double * result, int *user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < *count)
deviceReduceArrayKernelj(data, result, *user_D, *count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int *count, double *x, double *y, double *result, double *w, int *user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < *count)
map(&result[idx * *user_D], &x[idx * *user_D ], y[idx],w, *user_D);
}
extern "C"
__global__
void dsblockReduce(int count, double *data, double * result, int user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < count)
deviceReduceArrayKernelj(data, result, user_D, count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
dsmapAll(int count, double *x, double *y, double *result, double *w, int user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < count)
map(&result[idx * user_D], &x[idx * user_D ], y[idx],w, user_D);
}
| 2fe92ef78cb30bac648abf09f791874d33e68d32.cu |
#include <assert.h>
#include <math.h>
#include <curand_kernel.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
__global__ void SparkGPUPi_map(const int *input, int *output, long size) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
curandState s;
int seed = 1;
curand_init(seed, idx, 0, &s);
float x = curand_uniform(&s) * 2 - 1;
float y = curand_uniform(&s) * 2 - 1;
if (x * x + y * y < 1) {
output[idx] = 1;
} else {
output[idx] = 0;
}
}
__global__ void SparkGPUPi_reduce(int *input, int *output, long size, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
} else if (ix == 0) {
int result = 0;
for (long i = 0; i < jump; ++i) {
result += input[i];
}
output[0] = result;
}
}
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
__global__ void SparkGPULR_map(const long * __restrict__ inputX, const double * __restrict__ inputY, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, const double * __restrict__ inputW) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
long offset = inputX[ix];
const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const long capacity = GET_ARRAY_CAPACITY(inArray);
const long length = GET_ARRAY_LENGTH(inArray);
const double * inArrayBody = GET_ARRAY_BODY(inArray);
double *outArray = GET_BLOB_ADDRESS(outputBlob, offset);
double *outArrayBody = GET_ARRAY_BODY(outArray);
map(outArrayBody, inArrayBody, inputY[ix], inputW, length);
output[ix] = offset;
SET_ARRAY_CAPACITY(outArray, capacity);
SET_ARRAY_LENGTH(outArray, length);
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(const long * __restrict__ input, const double * __restrict__ inputBlob, double *out, long i, long n) {
double sum = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
const long offset = input[idx];
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernal(const long * __restrict__ input, const double * __restrict__ inputBlob, double *outputArrayBody, long length, long n) {
long i = 0;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
const long offset = input[idx];
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(input, inputBlob, &outputArrayBody[i], i, n);
}
}
__global__ void SparkGPULR_reduce(const long * __restrict__ input, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, int stage, int totalStages) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if ((stage == 0) && (idx < size)) {
const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, input[idx]);
const long inArrayCapacity = GET_ARRAY_CAPACITY(inArray);
const long inArrayLength = GET_ARRAY_LENGTH(inArray);
output[0] = 0;
double *outArray = GET_BLOB_ADDRESS(outputBlob, output[0]);
double *outArrayBody = GET_ARRAY_BODY(outArray);
if (idx < inArrayLength) {
outArrayBody[idx] = 0;
}
deviceReduceArrayKernal(input, inputBlob, outArrayBody, inArrayLength, size);
SET_ARRAY_CAPACITY(outArray, inArrayCapacity);
SET_ARRAY_LENGTH(outArray, inArrayLength);
}
#else
if ((stage == 0) && (idx == 0)) {
output[idx] = 0;
double *outArray = GET_BLOB_ADDRESS(outputBlob, output[idx]);
double *outArrayBody = GET_ARRAY_BODY(outArray);
long capacity = 0, length = 0;
for (long i = 0; i < size; i++) {
long offset = input[i];
const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset);
capacity = GET_ARRAY_CAPACITY(inArray);
length = GET_ARRAY_LENGTH(inArray);
const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray);
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
SET_ARRAY_CAPACITY(outArray, capacity);
SET_ARRAY_LENGTH(outArray, length);
}
#endif
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
extern "C"
__global__
void blockReduce(int *count, double *data, double * result, int *user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < *count)
deviceReduceArrayKernelj(data, result, *user_D, *count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int *count, double *x, double *y, double *result, double *w, int *user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < *count)
map(&result[idx * *user_D], &x[idx * *user_D ], y[idx],w, *user_D);
}
extern "C"
__global__
void dsblockReduce(int count, double *data, double * result, int user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < count)
deviceReduceArrayKernelj(data, result, user_D, count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
dsmapAll(int count, double *x, double *y, double *result, double *w, int user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < count)
map(&result[idx * user_D], &x[idx * user_D ], y[idx],w, user_D);
}
|
0f7822e9cd2f611c0034e0cfdc040d9db01b4d09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <hipfft.h>
#include <cufinufft.h>
#include "../cuspreadinterp.h"
#include "../cudeconvolve.h"
#include "../memtransfer.h"
using namespace std;
int cufinufft3d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
3D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt*
d_plan->mu;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(hipMemset(d_plan->fw,0,d_plan->maxbatchsize*
d_plan->nf1*d_plan->nf2*d_plan->nf3*sizeof(CUCPX)));
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 1: Spread
hipEventRecord(start);
ier = cuspread3d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuspread3d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
hipEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
cudeconvolve3d(d_plan, blksize);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft3d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
3D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt*
d_plan->mu;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
hipEventRecord(start);
cudeconvolve3d(d_plan, blksize);
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
hipEventRecord(start);
hipDeviceSynchronize();
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
ier = cuinterp3d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp3d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
hipEventRecord(start);
#if 0
if(d_plan->nstreams != 1)
hipDeviceSynchronize();
#endif
}
return ier;
}
#if 0
int cufinufft3d_plan(int M, int ms, int mt, int mu, int ntransf,
int maxbatchsize, int iflag, const cufinufft_opts opts,
cufinufft_plan *d_plan)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int ier;
//ier=cufinufft_default_opts(opts,eps,upsampfac);
int nf1 = (int) d_plan->opts.gpu_upsampfac*ms;
int nf2 = (int) d_plan->opts.gpu_upsampfac*mt;
int nf3 = (int) d_plan->opts.gpu_upsampfac*mu;
int fftsign = (iflag>=0) ? 1 : -1;
d_plan->ms = ms;
d_plan->mt = mt;
d_plan->mu = mu;
d_plan->nf1 = nf1;
d_plan->nf2 = nf2;
d_plan->nf3 = nf3;
d_plan->M = M;
d_plan->iflag = fftsign;
d_plan->ntransf = ntransf;
d_plan->maxbatchsize = maxbatchsize;
#ifdef INFO
printf("[info ] 3d: (ms,mt,mu)=(%d,%d) (nf1, nf2, nf3)=(%d,%d,%d) nj=%d, ntransform = %d\n",
ms, mt, mu, d_plan->nf1, d_plan->nf2, d_plan->nf3, d_plan->M,
d_plan->ntransf);
#endif
// this may move to gpu
CNTime timer; timer.start();
FLT *fwkerhalf1 = (FLT*)malloc(sizeof(FLT)*(nf1/2+1));
FLT *fwkerhalf2 = (FLT*)malloc(sizeof(FLT)*(nf2/2+1));
FLT *fwkerhalf3 = (FLT*)malloc(sizeof(FLT)*(nf3/2+1));
onedim_fseries_kernel(nf1, fwkerhalf1, opts);
onedim_fseries_kernel(nf2, fwkerhalf2, opts);
onedim_fseries_kernel(nf3, fwkerhalf3, opts);
#ifdef DEBUG
printf("[time ] \tkernel fser (ns=%d):\t %.3g s\n", d_plan->opts.gpu_nspread,timer.elapsedsec());
#endif
hipEventRecord(start);
ier = allocgpumemory3d(opts, d_plan);
#ifdef DEBUG
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAllocate GPU memory\t %.3g s\n", milliseconds/1000);
#endif
hipEventRecord(start);
checkCudaErrors(hipMemcpy(d_plan->fwkerhalf1,fwkerhalf1,(nf1/2+1)*
sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_plan->fwkerhalf2,fwkerhalf2,(nf2/2+1)*
sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_plan->fwkerhalf3,fwkerhalf3,(nf3/2+1)*
sizeof(FLT),hipMemcpyHostToDevice));
#ifdef DEBUG
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCopy fwkerhalf1,2,3 HtoD %.3g s\n", milliseconds/1000);
#endif
hipEventRecord(start);
hipfftHandle fftplan;
int dim = 3;
int n[] = {nf3, nf2, nf1};
int inembed[] = {nf3, nf2, nf1};
int istride = 1;
hipfftPlanMany(&fftplan,dim,n,inembed,istride,inembed[0]*inembed[1]*inembed[2],
inembed,istride,inembed[0]*inembed[1]*inembed[2],CUFFT_TYPE,
maxbatchsize);
d_plan->fftplan = fftplan;
#ifdef DEBUG
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Plan\t\t %.3g s\n", milliseconds/1000);
#endif
return ier;
}
int cufinufft3d_setNUpts(FLT* h_kx, FLT* h_ky, FLT *h_kz, cufinufft_opts &opts, cufinufft_plan *d_plan)
{
int M = d_plan->M;
int nf1 = d_plan->nf1;
int nf2 = d_plan->nf2;
int nf3 = d_plan->nf3;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Copy memory to device
hipEventRecord(start);
checkCudaErrors(hipMemcpy(d_plan->kx,h_kx,d_plan->M*sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_plan->ky,h_ky,d_plan->M*sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_plan->kz,h_kz,d_plan->M*sizeof(FLT),hipMemcpyHostToDevice));
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCopy kx,ky,kz HtoD\t %.3g s\n", milliseconds/1000);
#endif
if(d_plan->opts.gpu_pirange==1){
hipEventRecord(start);
hipLaunchKernelGGL(( RescaleXY_3d), dim3((M+1024-1)/1024), dim3(1024), 0, 0, M,nf1,nf2,nf3,d_plan->kx,
d_plan->ky,d_plan->kz);
d_plan->opts.gpu_pirange=0;
#ifdef SPREADTIME
float milliseconds;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ]\tRescaleXY_3d\t\t %.3g ms\n", milliseconds);
#endif
}
hipEventRecord(start);
if(d_plan->opts.gpu_method == 5){
int ier = cuspread3d_subprob_prop(nf1,nf2,nf3,M,opts,d_plan);
if(ier != 0 ){
printf("error: cuspread3d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
}
if(d_plan->opts.gpu_method == 1 || d_plan->opts.gpu_method == 2 || d_plan->opts.gpu_method == 3){
int ier = cuspread3d_blockgather_prop(nf1,nf2,nf3,M,opts,d_plan);
if(ier != 0 ){
printf("error: cuspread3d_blockgather_prop, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSetup Subprob properties %.3g s\n",
milliseconds/1000);
#endif
return 0;
}
#endif
| 0f7822e9cd2f611c0034e0cfdc040d9db01b4d09.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <cufft.h>
#include <cufinufft.h>
#include "../cuspreadinterp.h"
#include "../cudeconvolve.h"
#include "../memtransfer.h"
using namespace std;
int cufinufft3d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
3D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt*
d_plan->mu;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(cudaMemset(d_plan->fw,0,d_plan->maxbatchsize*
d_plan->nf1*d_plan->nf2*d_plan->nf3*sizeof(CUCPX)));
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 1: Spread
cudaEventRecord(start);
ier = cuspread3d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuspread3d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
cudaEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
cudeconvolve3d(d_plan, blksize);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft3d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
3D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt*
d_plan->mu;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
cudaEventRecord(start);
cudeconvolve3d(d_plan, blksize);
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
cudaEventRecord(start);
cudaDeviceSynchronize();
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
ier = cuinterp3d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp3d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
cudaEventRecord(start);
#if 0
if(d_plan->nstreams != 1)
cudaDeviceSynchronize();
#endif
}
return ier;
}
#if 0
int cufinufft3d_plan(int M, int ms, int mt, int mu, int ntransf,
int maxbatchsize, int iflag, const cufinufft_opts opts,
cufinufft_plan *d_plan)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int ier;
//ier=cufinufft_default_opts(opts,eps,upsampfac);
int nf1 = (int) d_plan->opts.gpu_upsampfac*ms;
int nf2 = (int) d_plan->opts.gpu_upsampfac*mt;
int nf3 = (int) d_plan->opts.gpu_upsampfac*mu;
int fftsign = (iflag>=0) ? 1 : -1;
d_plan->ms = ms;
d_plan->mt = mt;
d_plan->mu = mu;
d_plan->nf1 = nf1;
d_plan->nf2 = nf2;
d_plan->nf3 = nf3;
d_plan->M = M;
d_plan->iflag = fftsign;
d_plan->ntransf = ntransf;
d_plan->maxbatchsize = maxbatchsize;
#ifdef INFO
printf("[info ] 3d: (ms,mt,mu)=(%d,%d) (nf1, nf2, nf3)=(%d,%d,%d) nj=%d, ntransform = %d\n",
ms, mt, mu, d_plan->nf1, d_plan->nf2, d_plan->nf3, d_plan->M,
d_plan->ntransf);
#endif
// this may move to gpu
CNTime timer; timer.start();
FLT *fwkerhalf1 = (FLT*)malloc(sizeof(FLT)*(nf1/2+1));
FLT *fwkerhalf2 = (FLT*)malloc(sizeof(FLT)*(nf2/2+1));
FLT *fwkerhalf3 = (FLT*)malloc(sizeof(FLT)*(nf3/2+1));
onedim_fseries_kernel(nf1, fwkerhalf1, opts);
onedim_fseries_kernel(nf2, fwkerhalf2, opts);
onedim_fseries_kernel(nf3, fwkerhalf3, opts);
#ifdef DEBUG
printf("[time ] \tkernel fser (ns=%d):\t %.3g s\n", d_plan->opts.gpu_nspread,timer.elapsedsec());
#endif
cudaEventRecord(start);
ier = allocgpumemory3d(opts, d_plan);
#ifdef DEBUG
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAllocate GPU memory\t %.3g s\n", milliseconds/1000);
#endif
cudaEventRecord(start);
checkCudaErrors(cudaMemcpy(d_plan->fwkerhalf1,fwkerhalf1,(nf1/2+1)*
sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_plan->fwkerhalf2,fwkerhalf2,(nf2/2+1)*
sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_plan->fwkerhalf3,fwkerhalf3,(nf3/2+1)*
sizeof(FLT),cudaMemcpyHostToDevice));
#ifdef DEBUG
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCopy fwkerhalf1,2,3 HtoD %.3g s\n", milliseconds/1000);
#endif
cudaEventRecord(start);
cufftHandle fftplan;
int dim = 3;
int n[] = {nf3, nf2, nf1};
int inembed[] = {nf3, nf2, nf1};
int istride = 1;
cufftPlanMany(&fftplan,dim,n,inembed,istride,inembed[0]*inembed[1]*inembed[2],
inembed,istride,inembed[0]*inembed[1]*inembed[2],CUFFT_TYPE,
maxbatchsize);
d_plan->fftplan = fftplan;
#ifdef DEBUG
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Plan\t\t %.3g s\n", milliseconds/1000);
#endif
return ier;
}
int cufinufft3d_setNUpts(FLT* h_kx, FLT* h_ky, FLT *h_kz, cufinufft_opts &opts, cufinufft_plan *d_plan)
{
int M = d_plan->M;
int nf1 = d_plan->nf1;
int nf2 = d_plan->nf2;
int nf3 = d_plan->nf3;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy memory to device
cudaEventRecord(start);
checkCudaErrors(cudaMemcpy(d_plan->kx,h_kx,d_plan->M*sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_plan->ky,h_ky,d_plan->M*sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_plan->kz,h_kz,d_plan->M*sizeof(FLT),cudaMemcpyHostToDevice));
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCopy kx,ky,kz HtoD\t %.3g s\n", milliseconds/1000);
#endif
if(d_plan->opts.gpu_pirange==1){
cudaEventRecord(start);
RescaleXY_3d<<<(M+1024-1)/1024, 1024>>>(M,nf1,nf2,nf3,d_plan->kx,
d_plan->ky,d_plan->kz);
d_plan->opts.gpu_pirange=0;
#ifdef SPREADTIME
float milliseconds;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ]\tRescaleXY_3d\t\t %.3g ms\n", milliseconds);
#endif
}
cudaEventRecord(start);
if(d_plan->opts.gpu_method == 5){
int ier = cuspread3d_subprob_prop(nf1,nf2,nf3,M,opts,d_plan);
if(ier != 0 ){
printf("error: cuspread3d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
}
if(d_plan->opts.gpu_method == 1 || d_plan->opts.gpu_method == 2 || d_plan->opts.gpu_method == 3){
int ier = cuspread3d_blockgather_prop(nf1,nf2,nf3,M,opts,d_plan);
if(ier != 0 ){
printf("error: cuspread3d_blockgather_prop, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSetup Subprob properties %.3g s\n",
milliseconds/1000);
#endif
return 0;
}
#endif
|
adae9e44ecc1f38bdbd031880f8877c58fe6cb62.hip | // !!! This is a file automatically generated by hipify!!!
#include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
using namespace thrust;
#if CUPY_USE_HIP
typedef hipStream_t hipStream_t;
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const tuple<size_t, T>& lhs,
const tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, complex<float>> >::operator() (
const tuple<size_t, complex<float>>& lhs, const tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, complex<double>> >::operator() (
const tuple<size_t, complex<double>>& lhs, const tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, float> >::operator() (
const tuple<size_t, float>& lhs, const tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, double> >::operator() (
const tuple<size_t, double>& lhs, const tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, __half> >::operator() (
const tuple<size_t, __half>& lhs, const tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, less<T>());
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(keys_start);
dp_keys_last = device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
less< tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
device_ptr<size_t> dp_first = device_pointer_cast(idx_start);
device_ptr<size_t> dp_last = device_pointer_cast(idx_start + n);
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
device_ptr<size_t> dp_idx_first, dp_idx_last;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
| adae9e44ecc1f38bdbd031880f8877c58fe6cb62.cu | #include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
using namespace thrust;
#if CUPY_USE_HIP
typedef hipStream_t cudaStream_t;
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const tuple<size_t, T>& lhs,
const tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, complex<float>> >::operator() (
const tuple<size_t, complex<float>>& lhs, const tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, complex<double>> >::operator() (
const tuple<size_t, complex<double>>& lhs, const tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, float> >::operator() (
const tuple<size_t, float>& lhs, const tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, double> >::operator() (
const tuple<size_t, double>& lhs, const tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool less< tuple<size_t, __half> >::operator() (
const tuple<size_t, __half>& lhs, const tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, less<T>());
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(keys_start);
dp_keys_last = device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
less< tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
device_ptr<size_t> dp_first = device_pointer_cast(idx_start);
device_ptr<size_t> dp_last = device_pointer_cast(idx_start + n);
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
device_ptr<size_t> dp_idx_first, dp_idx_last;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
|
5dd0916c32e4c0fadf0bbde411098b6a89e49e6f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *val = NULL;
hipMalloc(&val, XSIZE*YSIZE);
int *row_ind = NULL;
hipMalloc(&row_ind, XSIZE*YSIZE);
int *col_ind = NULL;
hipMalloc(&col_ind, XSIZE*YSIZE);
float *mat_err = NULL;
hipMalloc(&mat_err, XSIZE*YSIZE);
int nnz = 1;
float *act = NULL;
hipMalloc(&act, XSIZE*YSIZE);
float *label = NULL;
hipMalloc(&label, XSIZE*YSIZE);
float *w = NULL;
hipMalloc(&w, XSIZE*YSIZE);
float learning_rate = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
grad), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
grad), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
grad), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5dd0916c32e4c0fadf0bbde411098b6a89e49e6f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *val = NULL;
cudaMalloc(&val, XSIZE*YSIZE);
int *row_ind = NULL;
cudaMalloc(&row_ind, XSIZE*YSIZE);
int *col_ind = NULL;
cudaMalloc(&col_ind, XSIZE*YSIZE);
float *mat_err = NULL;
cudaMalloc(&mat_err, XSIZE*YSIZE);
int nnz = 1;
float *act = NULL;
cudaMalloc(&act, XSIZE*YSIZE);
float *label = NULL;
cudaMalloc(&label, XSIZE*YSIZE);
float *w = NULL;
cudaMalloc(&w, XSIZE*YSIZE);
float learning_rate = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
grad<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
grad<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
grad<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,mat_err,nnz,act,label,w,learning_rate);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0d639bf6a007d09544e7877e0801235c45ecfb75.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/transform.h>
#include "paddle/fluid/operators/math/inclusive_scan.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cumprod_grad_kernel.h"
#include "paddle/phi/kernels/funcs/complex_functors.h"
#include "paddle/phi/kernels/funcs/cumprod.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/for_range.h"
// NOTE(@xiongkun): use of IsComplex<>
#include "paddle/fluid/framework/data_type.h"
namespace phi {
template <typename T>
struct CumprodGradFunctorExceptFirstZero {
HOSTDEVICE CumprodGradFunctorExceptFirstZero(
const T *x,
const T *y,
const T *dy_mul_y_reversed_cumsum,
const uint8_t *zero_mask,
size_t mid_dim,
size_t inner_dim,
T *dx,
int64_t *first_zero_idx,
T *x_filled_one)
: x_(x),
y_(y),
dy_mul_y_reversed_cumsum_(dy_mul_y_reversed_cumsum),
zero_mask_(zero_mask),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx),
first_zero_idx_(first_zero_idx),
x_filled_one_(x_filled_one) {}
HOSTDEVICE void operator()(size_t idx) const {
auto inner_idx = idx % inner_dim_;
auto outer_idx = idx / (mid_dim_ * inner_dim_);
auto mid_idx = (idx - inner_idx) / inner_dim_ % mid_dim_;
auto mask = zero_mask_[idx];
bool should_fill_one = true;
if (mask == 0) {
dx_[idx] = dy_mul_y_reversed_cumsum_[idx] / x_[idx];
if (mid_idx == mid_dim_ - 1) {
// record first zero position as -1, i.e., no zero
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = -1;
}
} else if (mid_idx > 0) { // mask > 0
if (zero_mask_[idx - inner_dim_] > 0) { // not first zero
dx_[idx] = 0;
should_fill_one = false;
} else {
// idx is the first zero position, it should be recorded
dx_[idx] = y_[idx - inner_dim_];
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = mid_idx;
}
} else { // the first zero position is index 0
dx_[idx] = 1;
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = 0;
}
x_filled_one_[idx] = should_fill_one ? 1 : x_[idx];
}
private:
const T *x_;
const T *y_;
const T *dy_mul_y_reversed_cumsum_;
const uint8_t *zero_mask_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
int64_t *first_zero_idx_;
T *x_filled_one_;
};
template <typename T>
struct FillFirstZeroPositionGradFunctor {
HOSTDEVICE FillFirstZeroPositionGradFunctor(const int64_t *first_zero_idx,
const T *grad_value,
size_t mid_dim,
size_t inner_dim,
T *dx)
: first_zero_idx_(first_zero_idx),
grad_value_(grad_value),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx) {}
HOSTDEVICE void operator()(size_t idx) const {
auto outer_idx = idx / inner_dim_;
auto inner_idx = idx % inner_dim_;
auto mid_idx = first_zero_idx_[idx];
if (mid_idx >= 0) {
auto full_idx =
outer_idx * mid_dim_ * inner_dim_ + mid_idx * inner_dim_ + inner_idx;
dx_[full_idx] *= grad_value_[full_idx];
}
}
private:
const int64_t *first_zero_idx_;
const T *grad_value_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
};
template <typename T, typename Context>
void CumprodGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &out,
const DenseTensor &dout,
int dim,
DenseTensor *dx) {
const auto *y = &out;
const auto *dy = &dout;
size_t outer_dim, mid_dim, inner_dim;
GetCumprodDimInfo(x.dims(), dim, &outer_dim, &mid_dim, &inner_dim);
if (outer_dim == 0 || mid_dim == 0 || inner_dim == 0) return;
size_t numel = outer_dim * mid_dim * inner_dim;
const auto *x_data = x.data<T>();
const auto *y_data = y->data<T>();
const auto *dy_data = dy->data<T>();
auto place = dev_ctx.GetPlace();
auto *dx_data = dev_ctx.template Alloc<T>(dx);
// deal with complex
const T *x_data_deal;
const T *y_data_deal;
Allocator::AllocationPtr x_conj;
Allocator::AllocationPtr y_conj;
if (paddle::framework::IsComplex<T>::value) {
x_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *x_data_conj = reinterpret_cast<T *>(x_conj->ptr());
y_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *y_data_conj = reinterpret_cast<T *>(y_conj->ptr());
phi::funcs::ForRange<Context> for_range_x(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_x(x_data, numel, x_data_conj);
for_range_x(functor_x);
phi::funcs::ForRange<Context> for_range_y(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_y(y_data, numel, y_data_conj);
for_range_y(functor_y);
x_data_deal = x_data_conj;
y_data_deal = y_data_conj;
} else {
x_data_deal = x_data;
y_data_deal = y_data;
}
// Step 1: find cummax-ed zero mask of x
#ifdef PADDLE_WITH_CUDA
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#else
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#endif
auto zero_mask_without_cummax =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_without_cummax_data =
reinterpret_cast<uint8_t *>(zero_mask_without_cummax->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(x_data_deal),
thrust::device_pointer_cast(x_data_deal) + numel,
thrust::device_pointer_cast(zero_mask_without_cummax_data),
funcs::IsZeroFunctor<T>());
auto zero_mask = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_data = reinterpret_cast<uint8_t *>(zero_mask->ptr());
paddle::operators::math::InclusiveScan<uint8_t, hipcub::Max>(
zero_mask_without_cummax_data,
zero_mask_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<uint8_t>(0),
hipcub::Max(),
/*reverse=*/false,
dev_ctx);
zero_mask_without_cummax = nullptr;
// Step 2: calculate reversed cumsum(dy * y)
auto dy_mul_y = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_data = reinterpret_cast<T *>(dy_mul_y->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(y_data_deal),
thrust::device_pointer_cast(dy_mul_y_data),
funcs::MultiplyFunctor<T>());
auto dy_mul_y_reversed_cumsum =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_reversed_cumsum_data =
reinterpret_cast<T *>(dy_mul_y_reversed_cumsum->ptr());
paddle::operators::math::InclusiveScan<T, hipcub::Sum>(
dy_mul_y_data,
dy_mul_y_reversed_cumsum_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
hipcub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 3: calculate the gradient value except the first zero position.
// The gradient value of the first zero position is filled with out[idx-1],
// while the gradient value of the other positions are calculated out
// completely. This functor also:
// (1) find the first zero index, i.e., first_zero_idx_data.
// (2) fill x_filled_one, which satifies
// x_filled_one[i] = x[i], i > pos
// x_filled_one[i] = 1, i <= pos
auto first_zero_idx = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(int64_t));
auto *first_zero_idx_data =
reinterpret_cast<int64_t *>(first_zero_idx->ptr());
auto *x_filled_one_data = dy_mul_y_data; // reuse former allocated memory
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
CumprodGradFunctorExceptFirstZero<T> functor_except_first_zero(
x_data_deal,
y_data_deal,
dy_mul_y_reversed_cumsum_data,
zero_mask_data,
mid_dim,
inner_dim,
dx_data,
first_zero_idx_data,
x_filled_one_data);
for_range(functor_except_first_zero);
// Step 4: calculate cumprod of x_filled_one
auto *x_filled_one_cumprod_data =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
paddle::operators::math::InclusiveScan<T, funcs::MultiplyFunctor<T>>(
x_filled_one_data,
x_filled_one_cumprod_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(1),
funcs::MultiplyFunctor<T>(),
/*reverse=*/false,
dev_ctx);
// Step 5: calculate reversed cumsum(dy * x_filled_one_cumprod)
auto *dy_mul_x_filled_one_cumprod =
dy_mul_y_data; // reuse former allocated memory
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(x_filled_one_cumprod_data),
thrust::device_pointer_cast(dy_mul_x_filled_one_cumprod),
funcs::MultiplyFunctor<T>());
auto *dy_mul_x_filled_one_cumprod_reversed_cumsum =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
paddle::operators::math::InclusiveScan<T, hipcub::Sum>(
dy_mul_x_filled_one_cumprod,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
hipcub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 6: fill zero pos gradient value
phi::funcs::ForRange<Context> for_range_fill_zero_pos_grad(
dev_ctx, outer_dim * inner_dim);
FillFirstZeroPositionGradFunctor<T> fill_first_zero_pos_grad_functor(
first_zero_idx_data,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
mid_dim,
inner_dim,
dx_data);
for_range_fill_zero_pos_grad(fill_first_zero_pos_grad_functor);
}
} // namespace phi
PD_REGISTER_KERNEL(cumprod_grad,
GPU,
ALL_LAYOUT,
phi::CumprodGradKernel,
float,
double,
int,
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
| 0d639bf6a007d09544e7877e0801235c45ecfb75.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/transform.h>
#include "paddle/fluid/operators/math/inclusive_scan.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cumprod_grad_kernel.h"
#include "paddle/phi/kernels/funcs/complex_functors.h"
#include "paddle/phi/kernels/funcs/cumprod.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/for_range.h"
// NOTE(@xiongkun): use of IsComplex<>
#include "paddle/fluid/framework/data_type.h"
namespace phi {
template <typename T>
struct CumprodGradFunctorExceptFirstZero {
HOSTDEVICE CumprodGradFunctorExceptFirstZero(
const T *x,
const T *y,
const T *dy_mul_y_reversed_cumsum,
const uint8_t *zero_mask,
size_t mid_dim,
size_t inner_dim,
T *dx,
int64_t *first_zero_idx,
T *x_filled_one)
: x_(x),
y_(y),
dy_mul_y_reversed_cumsum_(dy_mul_y_reversed_cumsum),
zero_mask_(zero_mask),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx),
first_zero_idx_(first_zero_idx),
x_filled_one_(x_filled_one) {}
HOSTDEVICE void operator()(size_t idx) const {
auto inner_idx = idx % inner_dim_;
auto outer_idx = idx / (mid_dim_ * inner_dim_);
auto mid_idx = (idx - inner_idx) / inner_dim_ % mid_dim_;
auto mask = zero_mask_[idx];
bool should_fill_one = true;
if (mask == 0) {
dx_[idx] = dy_mul_y_reversed_cumsum_[idx] / x_[idx];
if (mid_idx == mid_dim_ - 1) {
// record first zero position as -1, i.e., no zero
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = -1;
}
} else if (mid_idx > 0) { // mask > 0
if (zero_mask_[idx - inner_dim_] > 0) { // not first zero
dx_[idx] = 0;
should_fill_one = false;
} else {
// idx is the first zero position, it should be recorded
dx_[idx] = y_[idx - inner_dim_];
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = mid_idx;
}
} else { // the first zero position is index 0
dx_[idx] = 1;
first_zero_idx_[outer_idx * inner_dim_ + inner_idx] = 0;
}
x_filled_one_[idx] = should_fill_one ? 1 : x_[idx];
}
private:
const T *x_;
const T *y_;
const T *dy_mul_y_reversed_cumsum_;
const uint8_t *zero_mask_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
int64_t *first_zero_idx_;
T *x_filled_one_;
};
template <typename T>
struct FillFirstZeroPositionGradFunctor {
HOSTDEVICE FillFirstZeroPositionGradFunctor(const int64_t *first_zero_idx,
const T *grad_value,
size_t mid_dim,
size_t inner_dim,
T *dx)
: first_zero_idx_(first_zero_idx),
grad_value_(grad_value),
mid_dim_(mid_dim),
inner_dim_(inner_dim),
dx_(dx) {}
HOSTDEVICE void operator()(size_t idx) const {
auto outer_idx = idx / inner_dim_;
auto inner_idx = idx % inner_dim_;
auto mid_idx = first_zero_idx_[idx];
if (mid_idx >= 0) {
auto full_idx =
outer_idx * mid_dim_ * inner_dim_ + mid_idx * inner_dim_ + inner_idx;
dx_[full_idx] *= grad_value_[full_idx];
}
}
private:
const int64_t *first_zero_idx_;
const T *grad_value_;
size_t mid_dim_;
size_t inner_dim_;
T *dx_;
};
template <typename T, typename Context>
void CumprodGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &out,
const DenseTensor &dout,
int dim,
DenseTensor *dx) {
const auto *y = &out;
const auto *dy = &dout;
size_t outer_dim, mid_dim, inner_dim;
GetCumprodDimInfo(x.dims(), dim, &outer_dim, &mid_dim, &inner_dim);
if (outer_dim == 0 || mid_dim == 0 || inner_dim == 0) return;
size_t numel = outer_dim * mid_dim * inner_dim;
const auto *x_data = x.data<T>();
const auto *y_data = y->data<T>();
const auto *dy_data = dy->data<T>();
auto place = dev_ctx.GetPlace();
auto *dx_data = dev_ctx.template Alloc<T>(dx);
// deal with complex
const T *x_data_deal;
const T *y_data_deal;
Allocator::AllocationPtr x_conj;
Allocator::AllocationPtr y_conj;
if (paddle::framework::IsComplex<T>::value) {
x_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *x_data_conj = reinterpret_cast<T *>(x_conj->ptr());
y_conj = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *y_data_conj = reinterpret_cast<T *>(y_conj->ptr());
phi::funcs::ForRange<Context> for_range_x(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_x(x_data, numel, x_data_conj);
for_range_x(functor_x);
phi::funcs::ForRange<Context> for_range_y(dev_ctx, numel);
phi::funcs::ConjFunctor<T> functor_y(y_data, numel, y_data_conj);
for_range_y(functor_y);
x_data_deal = x_data_conj;
y_data_deal = y_data_conj;
} else {
x_data_deal = x_data;
y_data_deal = y_data;
}
// Step 1: find cummax-ed zero mask of x
#ifdef PADDLE_WITH_CUDA
const auto &exec_policy = thrust::cuda::par.on(dev_ctx.stream());
#else
const auto &exec_policy = thrust::hip::par.on(dev_ctx.stream());
#endif
auto zero_mask_without_cummax =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_without_cummax_data =
reinterpret_cast<uint8_t *>(zero_mask_without_cummax->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(x_data_deal),
thrust::device_pointer_cast(x_data_deal) + numel,
thrust::device_pointer_cast(zero_mask_without_cummax_data),
funcs::IsZeroFunctor<T>());
auto zero_mask = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(uint8_t));
auto *zero_mask_data = reinterpret_cast<uint8_t *>(zero_mask->ptr());
paddle::operators::math::InclusiveScan<uint8_t, cub::Max>(
zero_mask_without_cummax_data,
zero_mask_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<uint8_t>(0),
cub::Max(),
/*reverse=*/false,
dev_ctx);
zero_mask_without_cummax = nullptr;
// Step 2: calculate reversed cumsum(dy * y)
auto dy_mul_y = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_data = reinterpret_cast<T *>(dy_mul_y->ptr());
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(y_data_deal),
thrust::device_pointer_cast(dy_mul_y_data),
funcs::MultiplyFunctor<T>());
auto dy_mul_y_reversed_cumsum =
const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(T));
auto *dy_mul_y_reversed_cumsum_data =
reinterpret_cast<T *>(dy_mul_y_reversed_cumsum->ptr());
paddle::operators::math::InclusiveScan<T, cub::Sum>(
dy_mul_y_data,
dy_mul_y_reversed_cumsum_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
cub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 3: calculate the gradient value except the first zero position.
// The gradient value of the first zero position is filled with out[idx-1],
// while the gradient value of the other positions are calculated out
// completely. This functor also:
// (1) find the first zero index, i.e., first_zero_idx_data.
// (2) fill x_filled_one, which satifies
// x_filled_one[i] = x[i], i > pos
// x_filled_one[i] = 1, i <= pos
auto first_zero_idx = const_cast<Allocator &>(dev_ctx.GetAllocator())
.Allocate(numel * sizeof(int64_t));
auto *first_zero_idx_data =
reinterpret_cast<int64_t *>(first_zero_idx->ptr());
auto *x_filled_one_data = dy_mul_y_data; // reuse former allocated memory
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
CumprodGradFunctorExceptFirstZero<T> functor_except_first_zero(
x_data_deal,
y_data_deal,
dy_mul_y_reversed_cumsum_data,
zero_mask_data,
mid_dim,
inner_dim,
dx_data,
first_zero_idx_data,
x_filled_one_data);
for_range(functor_except_first_zero);
// Step 4: calculate cumprod of x_filled_one
auto *x_filled_one_cumprod_data =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
paddle::operators::math::InclusiveScan<T, funcs::MultiplyFunctor<T>>(
x_filled_one_data,
x_filled_one_cumprod_data,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(1),
funcs::MultiplyFunctor<T>(),
/*reverse=*/false,
dev_ctx);
// Step 5: calculate reversed cumsum(dy * x_filled_one_cumprod)
auto *dy_mul_x_filled_one_cumprod =
dy_mul_y_data; // reuse former allocated memory
thrust::transform(exec_policy,
thrust::device_pointer_cast(dy_data),
thrust::device_pointer_cast(dy_data) + numel,
thrust::device_pointer_cast(x_filled_one_cumprod_data),
thrust::device_pointer_cast(dy_mul_x_filled_one_cumprod),
funcs::MultiplyFunctor<T>());
auto *dy_mul_x_filled_one_cumprod_reversed_cumsum =
dy_mul_y_reversed_cumsum_data; // reuse former allocated memory
paddle::operators::math::InclusiveScan<T, cub::Sum>(
dy_mul_x_filled_one_cumprod,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
outer_dim,
mid_dim,
inner_dim,
static_cast<T>(0),
cub::Sum(),
/*reverse=*/true,
dev_ctx);
// Step 6: fill zero pos gradient value
phi::funcs::ForRange<Context> for_range_fill_zero_pos_grad(
dev_ctx, outer_dim * inner_dim);
FillFirstZeroPositionGradFunctor<T> fill_first_zero_pos_grad_functor(
first_zero_idx_data,
dy_mul_x_filled_one_cumprod_reversed_cumsum,
mid_dim,
inner_dim,
dx_data);
for_range_fill_zero_pos_grad(fill_first_zero_pos_grad_functor);
}
} // namespace phi
PD_REGISTER_KERNEL(cumprod_grad,
GPU,
ALL_LAYOUT,
phi::CumprodGradKernel,
float,
double,
int,
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
|
5a3cdd8c6ebe3724875fc342818352c192b782b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
/*
There are memory coalescing issues with respect to writing the
output. n is a multiple of 64. We are using one cache line per
float on the output. This is very inefficient.
*/
output[j + n * i] = input[i + n * j];
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// TODO: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
int xOffset = blockIdx.x * 64;
int yOffset = blockIdx.y * 64;
__shared__ float data[64*65]; // We was an extra bank for padding
for (; j < end_j; j++)
data[(j-yOffset) + 64 * (i - xOffset)+threadIdx.x] = input[i + n * j];
__syncthreads();
int k = 4*threadIdx.y;
const int end_k = k + 4;
int xIdx, yIdx;
for (; k < end_k; k ++)
{
xIdx = threadIdx.x + (64*blockIdx.y);
yIdx = (k+blockIdx.x * 64);
output[n*(yIdx)+(xIdx)] = data[threadIdx.x+65*k];
}
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// TODO: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
// Put the declarations at the top to avoid dependencies.
// Create new variables to precompute the array indices.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
int data_field = (j- (blockIdx.y * 64)) + 64 * (i - (blockIdx.x * 64))+threadIdx.x;
int input_field = i + n * j;
int k = 4*threadIdx.y;
__shared__ float data[64*65]; // We was an extra bank for padding
int nyIdxPlusxIdx = threadIdx.x + (64*blockIdx.y) + n*(k + blockIdx.x*64);
int data_out = threadIdx.x+65*k;
// Unroll the loop.
data[data_field] = input[input_field];
data[data_field + 1] = input[input_field + n];
data[data_field + 2] = input[input_field + 2 * n];
data[data_field + 3] = input[input_field + 3 * n];
__syncthreads();
// Unroll the other loop
output[nyIdxPlusxIdx] = data[data_out];
output[nyIdxPlusxIdx + n] = data[data_out+65];
output[nyIdxPlusxIdx + 2*n] = data[data_out+130];
output[nyIdxPlusxIdx+ 3*n] = data[data_out+195];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( naiveTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( shmemTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( optimalTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
| 5a3cdd8c6ebe3724875fc342818352c192b782b7.cu | #include <cassert>
#include <cuda_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
/*
There are memory coalescing issues with respect to writing the
output. n is a multiple of 64. We are using one cache line per
float on the output. This is very inefficient.
*/
output[j + n * i] = input[i + n * j];
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// TODO: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
int xOffset = blockIdx.x * 64;
int yOffset = blockIdx.y * 64;
__shared__ float data[64*65]; // We was an extra bank for padding
for (; j < end_j; j++)
data[(j-yOffset) + 64 * (i - xOffset)+threadIdx.x] = input[i + n * j];
__syncthreads();
int k = 4*threadIdx.y;
const int end_k = k + 4;
int xIdx, yIdx;
for (; k < end_k; k ++)
{
xIdx = threadIdx.x + (64*blockIdx.y);
yIdx = (k+blockIdx.x * 64);
output[n*(yIdx)+(xIdx)] = data[threadIdx.x+65*k];
}
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// TODO: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
// Put the declarations at the top to avoid dependencies.
// Create new variables to precompute the array indices.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
int data_field = (j- (blockIdx.y * 64)) + 64 * (i - (blockIdx.x * 64))+threadIdx.x;
int input_field = i + n * j;
int k = 4*threadIdx.y;
__shared__ float data[64*65]; // We was an extra bank for padding
int nyIdxPlusxIdx = threadIdx.x + (64*blockIdx.y) + n*(k + blockIdx.x*64);
int data_out = threadIdx.x+65*k;
// Unroll the loop.
data[data_field] = input[input_field];
data[data_field + 1] = input[input_field + n];
data[data_field + 2] = input[input_field + 2 * n];
data[data_field + 3] = input[input_field + 3 * n];
__syncthreads();
// Unroll the other loop
output[nyIdxPlusxIdx] = data[data_out];
output[nyIdxPlusxIdx + n] = data[data_out+65];
output[nyIdxPlusxIdx + 2*n] = data[data_out+130];
output[nyIdxPlusxIdx+ 3*n] = data[data_out+195];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
naiveTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
shmemTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
optimalTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
|
d5951078c03678f593dfe9a9f3b98ca5a83d6b64.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
int main() {
int n;
int i, j, k;
printf("Please enter the size of matrix: \n");
scanf("%d", &n);
int *a, *b, *c;
hipHostMalloc((void**)&a, sizeof(int) * n * n);
hipHostMalloc((void**)&b, sizeof(int) * n * n);
hipHostMalloc((void**)&c, sizeof(int) * n * n);
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
a[i * n + j] = round(rand() % 2);
b[i * n + j] = round(rand() % 2);
}
}
printf("Start...\n");
clock_t start_time = clock();
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
int tmp = 0;
for (k = 0; k < n; k++)
tmp += a[i * n + k] * b[k * n + j];
c[i * n + j] = tmp;
}
}
clock_t end_time = clock();
printf("Time of calculating %dx%d matrix using CPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
hipHostFree(a);
hipHostFree(b);
hipHostFree(c);
return 0;
}
| d5951078c03678f593dfe9a9f3b98ca5a83d6b64.cu |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
int main() {
int n;
int i, j, k;
printf("Please enter the size of matrix: \n");
scanf("%d", &n);
int *a, *b, *c;
cudaMallocHost((void**)&a, sizeof(int) * n * n);
cudaMallocHost((void**)&b, sizeof(int) * n * n);
cudaMallocHost((void**)&c, sizeof(int) * n * n);
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
a[i * n + j] = round(rand() % 2);
b[i * n + j] = round(rand() % 2);
}
}
printf("Start...\n");
clock_t start_time = clock();
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
int tmp = 0;
for (k = 0; k < n; k++)
tmp += a[i * n + k] * b[k * n + j];
c[i * n + j] = tmp;
}
}
clock_t end_time = clock();
printf("Time of calculating %dx%d matrix using CPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
54b351c64ecd0b7ecf62ff2b4739f788aefba59f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a = 3;
b = 5;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Result: %d\n", c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 54b351c64ecd0b7ecf62ff2b4739f788aefba59f.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = 3;
b = 5;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result: %d\n", c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
798cc6cc632583ad31bd996da960e7021193ef6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UpsampleLayer.h"
namespace nvinfer1 {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii / d3;
z = ii % d2;
ii = ii / d2;
y = ii % d1;
ii = ii / d1;
x = ii;
w = w / scale_factor;
z = z / scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x * d1 + y) * d2) + z) * d3 + w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output, int no_elements,
int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii] = input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype *input, Dtype *output, int N,
int C, int H, int W) {
int numElem = N * C * H * W;
hipLaunchKernelGGL(( upscale), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
input, output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType) {
case DataType::kFLOAT:
_size = sizeof(float);
break;
case DataType::kHALF:
_size = sizeof(__half);
break;
case DataType::kINT8:
_size = sizeof(u_int8_t);
break;
default:
std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace,
hipStream_t stream) {
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(hipMemcpyAsync(outputs[0], inputs[0],
totalElems * type2size(mDataType),
hipMemcpyDeviceToDevice, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
return 0;
}
// CUDA_CHECK(hipStreamSynchronize(stream));
switch (mDataType) {
case DataType::kFLOAT:
forwardGpu<float>((const float *)inputs[0], (float *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0], (__half *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0], (u_int8_t *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
} | 798cc6cc632583ad31bd996da960e7021193ef6c.cu | #include "UpsampleLayer.h"
namespace nvinfer1 {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii / d3;
z = ii % d2;
ii = ii / d2;
y = ii % d1;
ii = ii / d1;
x = ii;
w = w / scale_factor;
z = z / scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x * d1 + y) * d2) + z) * d3 + w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output, int no_elements,
int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii] = input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype *input, Dtype *output, int N,
int C, int H, int W) {
int numElem = N * C * H * W;
upscale<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>>(
input, output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType) {
case DataType::kFLOAT:
_size = sizeof(float);
break;
case DataType::kHALF:
_size = sizeof(__half);
break;
case DataType::kINT8:
_size = sizeof(u_int8_t);
break;
default:
std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace,
cudaStream_t stream) {
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(cudaMemcpyAsync(outputs[0], inputs[0],
totalElems * type2size(mDataType),
cudaMemcpyDeviceToDevice, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
return 0;
}
// CUDA_CHECK(cudaStreamSynchronize(stream));
switch (mDataType) {
case DataType::kFLOAT:
forwardGpu<float>((const float *)inputs[0], (float *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0], (__half *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0], (u_int8_t *)outputs[0],
batchSize, mCHW.d[0], mOutputHeight, mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
} |
7f186af1e200a1f80f293463d7e02ec50eb13287.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*------------------------------*/
/* KSVD-OPERATION */
/* IMPLEMENTATION */
/*------------------------------*/
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "GlobalDeclarations.cuh"
#include "Globals.cuh"
#include "FileManager.cuh"
#include "HostMemory.cuh"
#include "DeviceMemory.cuh"
#include "CudaAlgorithm.cuh"
#include "OMP.cuh"
#include "DictionaryUpdate.cuh"
#include "KSVD.cuh"
#include "ErrorHandler.cuh"
#include "ThrustOperators.cuh"
#include "Utilities.cuh"
#include "Algorithms.cuh"
/*****************
* NAMESPACES *
* __________ *
*****************/
using DeviceSpace = DeviceMemory<datatype>;
using HostSpace = HostMemory<datatype>;
/*******************
* DECLARATIONS *
* ____________ *
*******************/
inline void diagX_Mul(datatype*,datatype*,datatype*,int,int);
inline bool use_count(datatype*, datatype*, unsigned int, unsigned int);
inline void replace(datatype*, datatype*, datatype*, unsigned int, unsigned int);
inline void initialize(datatype*, datatype*, datatype*, datatype*, unsigned int, unsigned int);
/*---------------*/
/* Constructor */
/*---------------*/
KSVD::KSVD(DeviceSpace* devptr, HostSpace* hostptr)
: CudaAlgorithm(devptr, hostptr) {
}
/*--------------*/
/* Destructor */
/*--------------*/
KSVD::~KSVD() {
}
/*-------------------------------*/
/* Perform the K-SVD algorithm */
/*-------------------------------*/
bool KSVD::ExecuteIterations(unsigned int iternum) {
// D_temp = normcols(D);
if (normcols() == false) {
return false;
}
// Main Loop
unsigned int* all_perms;
for (unsigned int iter = 0; iter < iternum; iter++) {
// G = D'*D ==> G = D_temp'*D_temp
if (Algorithms::Multiplication::AT_times_B(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::G)
) == false) {
return false;
}
//////////////////////////
// Sparse coding stage! //
//////////////////////////
// Execute Batch-OMP stage now!
// Equivalent: Gamma = sparsecode(data,D,XtX,G,thresh)
if (this->BatchOMP() == false) {
return false;
}
/*
Note:
Now array Gamma is not present but its elements are accessed
using the following formula:
For a *non-zero* element at (i,j) we get its value at:
(c + j*Tdata) [ (int) ( (selected_atoms + j*colsD) [ i ] - 1 ) ]
*/
/////////////////////////////////
// Reset variables for the new //
// iteration before Dictionary //
// Update stage. //
/////////////////////////////////
initialize(
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS),
this->deviceMemory.get(DeviceSpace::REPLACED_ATOMS),
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_COUNTER),
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_BITMAP),
Globals::colsX, Globals::colsD
);
//////////////////////////////
// Dictionary update stage! //
//////////////////////////////
// Generate a random permutation of indices in the
// range 1...colsD. Equivalent: p = randperm(dictsize);
all_perms = Algorithms::RandomPermutation::generateRandPerm(Globals::colsD);
// Execute Dictionary Update stage now!
if (this->updateDictionary(all_perms) == false) {
return false;
}
////////////////////////
// Compute error now! //
////////////////////////
if (this->errorComputation(iter + 1) == false) {
return false;
}
////////////////////////
// Clear Dictionary //
////////////////////////
if (this->clearDict() == false) {
return false;
}
}
return true;
}
/*-----------------------------------------*/
/* Clear the dictionary of unused atoms */
/* or atoms having error above a threshold */
/* or atoms that few samples use them! */
/*-----------------------------------------*/
bool KSVD::clearDict() {
// Count how many elements in every row of Gamma
// have absolute value above 1e-7
if (use_count(
this->deviceMemory.get(DeviceSpace::SELECTED_ATOMS),
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::colsD, Globals::colsX
)
== false) {
return false;
}
// Iteration to clear every atom that satisfies
// a certain condition implemented in maximum
// function below. Matlab equivalent:
//
// for j = 1:dictsize
// | % compute G(:, j)
// | % replace atom
// end
//
for (unsigned int j = 0; j < Globals::colsD; j++) {
// Now we compute:
//
// Gj = D'*D(:,j);
//
if (Algorithms::Multiplication::AT_times_x(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD) + j*Globals::rowsD,
Globals::colsD,
this->deviceMemory.get(DeviceSpace::G)
)
== false) {
return false;
}
// Now we compute the maximum (square) value
// of Gj. Operation performed:
//
// (max(Gj.^2))
//
// We also apply the condition.
if (Algorithms::Reduction::Maximum_squared(
this->deviceMemory.get(DeviceSpace::G),
Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD) + j,
this->deviceMemory.get(DeviceSpace::REPLACED_ATOMS) + j,
j
)
== false) {
return false;
}
// We now find the signal with the maximum error.
// Matlab equivalent:
//
// [~,i] = max(err);
//
if (Algorithms::Reduction::Maximum(
this->deviceMemory.get(DeviceSpace::ERR),
Globals::colsX,
this->deviceMemory.get(DeviceSpace::G)
)
== false) {
return false;
}
// We should now calculate the norm of the
// selected signal in our data.
if (Algorithms::Reduction::euclidean_norm(
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::G),
Globals::rowsX, Globals::TPB_rowsX
)
== false) {
return false;
}
// Finally replace atom.
replace(
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD) + j * Globals::rowsD,
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::G),
Globals::rowsX, Globals::TPB_rowsX
);
}
return true;
}
/*------------------------------------*/
/* Compute the residual error */
/* using the formula: */
/* */
/* sqrt(sum[(X-D*Gamma)^2]/numel(X)) */
/* */
/*------------------------------------*/
bool KSVD::errorComputation(unsigned int iter) {
// Compute (D*Gamma) using CUBLAS
// for performance
if (Algorithms::Multiplication::A_times_B(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD), Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::SELECTED_ATOMS), Globals::colsD, Globals::colsX,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSX_BY_COLSX))
== false) {
return false;
}
// Now reduce over the result and calculate
// the error:
// sum{ (X - X~)^2 }
//
if (Algorithms::Reduction::reduce_RMSE(
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::TEMP_ROWSX_BY_COLSX),
this->deviceMemory.get(DeviceSpace::ERR),
Globals::rowsX , Globals::colsX, iter,
Globals::TPB_rowsX,
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_BITMAP))
== false) {
return false;
}
return true;
}
/*-------------------------*/
/* Normalize columns of D */
/*-------------------------*/
bool KSVD::normcols() {
// Calculating ==> 1./sqrt(sum(D.*D))
if (Algorithms::Reduction::reduce2D_Sum_SQUAREDelements(
this->deviceMemory.get(DeviceSpace::D_ARRAY),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::TPB_rowsX
) == false) {
return false;
}
// D = D*spdiag( 1./sqrt(sum(D.*D)) ) ==> D = D*spdiag(TEMP_1_BY_COLSD)
diagX_Mul(
this->deviceMemory.get(DeviceSpace::D_ARRAY),
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::rowsD * Globals::colsD, Globals::rowsD);
return true;
}
/*-------------------------*/
/* Check if error occured */
/*-------------------------*/
bool KSVD::isValid() {
return(CudaAlgorithm::isValid());
}
///////////////////////////////////////////////////
//////////// HELPER FUNCTIONS ////////////////
///////////////////////////////////////////////////
/************************************
* DECLARATIONS *
* ____________ *
************************************/
__global__
void diagMul(datatype*,datatype*, datatype*, int, int);
__global__
void use_count_kernel(datatype*, datatype*, unsigned int, unsigned int);
__global__
void replaceDj(datatype*, datatype*, datatype*, unsigned int);
__global__
void init(datatype*, datatype*, datatype*, datatype*, unsigned int, unsigned int);
/************************************
* WRAPPERS *
* ________ *
************************************/
/*---------------------------------*/
/* OPERATION: */
/* out = in*diag(coeff) */
/*---------------------------------*/
inline void diagX_Mul(datatype* in, datatype* out, datatype* coeff, int size, int rows) {
dim3 block(1024);
dim3 grid((size + block.x - 1) / block.x);
hipLaunchKernelGGL(( diagMul) , dim3(grid), dim3(block) , 0, 0, in, out, coeff, size, rows);
}
/*------------------------------*/
/* OPERATION: */
/* */
/* usecount = */
/* sum(abs(Gamma)>1e-7, 2); */
/* */
/*------------------------------*/
inline bool use_count(
datatype* gamma, datatype* counters,
unsigned int colsD, unsigned int colsX) {
// Initialize counters to zero
if (hipMemset(counters, 0, colsD * sizeof(datatype)) != hipSuccess) {
return false;
}
// Once again we assume colsD <= 32
dim3 block(16, colsD); // => max. 512 threads per block
dim3 grid((colsX + block.x - 1) / block.x);
use_count_kernel << < grid, block >> > (gamma, counters, colsD, colsX);
return true;
}
/*---------------------------------*/
/* OPERATION: */
/* D(:,j) = X(:,unused_sigs(i)) */
/* / norm(X(:,unused_sigs(i))) */
/*---------------------------------*/
inline void replace(
datatype* Dj, datatype* X, datatype* G,
unsigned int size, unsigned int recommended) {
replaceDj << < 1, recommended >> > (Dj, X, G, size);
}
/*-----------------------------------*/
/* OPERATIONS: */
/* replaced_atoms = zeros(dictsize) */
/* unused_sigs = 1:size(data,2); */
/*-----------------------------------*/
inline void initialize(
datatype* un, datatype* rep, datatype* counter, datatype* bitmap,
unsigned int N, unsigned int colsD) {
dim3 block(1024);
dim3 grid((N + block.x - 1) / block.x);
init << <grid, block>> > (un, rep, counter, bitmap, N, colsD);
}
/************************************
* KERNELS *
* _______ *
************************************/
/*---------------------------*/
/* This kernel multiplies a */
/* vector transformed into a */
/* diagonal matrix with some */
/* other matrix. */
/*---------------------------*/
__global__
void diagMul(datatype* in, datatype* out, datatype* coeff, int size, int rows) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int col = index / rows;
out[index] = in[index] * coeff[col];
}
}
/*-----------------------------------*/
/* This kernel uses an array of */
/* counters to simultaneously count */
/* elements of every row of input */
/* that satisfy a certain condition. */
/*-----------------------------------*/
__global__ void use_count_kernel(
datatype* gamma, datatype* counters,
unsigned int colsD, unsigned int colsX) {
// threadIdx.x = my column
// threadIdx.y = my row
unsigned int column = blockIdx.x * blockDim.x + threadIdx.x;
if (column < colsX) {
if (SQR((gamma + column*colsD)[threadIdx.y]) > 1e-7) {
atomicAdd((unsigned int*)(counters + threadIdx.y), 1);
}
}
}
/*---------------------------------*/
/* This kernel performs the simple */
/* task of: */
/* D(:,j) = X(:,i) / norm(X(:,i)) */
/*---------------------------------*/
__global__ void replaceDj(
datatype* Dj, datatype* X, datatype* G,
unsigned int size) {
if (!(*G)) {
// Our atom does not need replacement
return;
}
if (threadIdx.x < size) {
X += (unsigned int)(*(G + 1))*size;
Dj[threadIdx.x] = X[threadIdx.x] / (*(G + 2));
}
}
/*-----------------------------------*/
/* This kernel is the equivalent of: */
/* */
/* replaced_atoms = zeros(dictsize) */
/* unused_sigs = 1:size(data,2) */
/*-----------------------------------*/
__global__ void init(
datatype* un, datatype* rep, datatype* counter, datatype* bitmap,
unsigned int N, unsigned int N2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
un[index] = index;
bitmap[index] = 1.0;
if (index < N2) {
rep[index] = 0.0;
}
if (index == 0) {
*counter = 0.0;
}
}
} | 7f186af1e200a1f80f293463d7e02ec50eb13287.cu | /*------------------------------*/
/* KSVD-OPERATION */
/* IMPLEMENTATION */
/*------------------------------*/
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "GlobalDeclarations.cuh"
#include "Globals.cuh"
#include "FileManager.cuh"
#include "HostMemory.cuh"
#include "DeviceMemory.cuh"
#include "CudaAlgorithm.cuh"
#include "OMP.cuh"
#include "DictionaryUpdate.cuh"
#include "KSVD.cuh"
#include "ErrorHandler.cuh"
#include "ThrustOperators.cuh"
#include "Utilities.cuh"
#include "Algorithms.cuh"
/*****************
* NAMESPACES *
* __________ *
*****************/
using DeviceSpace = DeviceMemory<datatype>;
using HostSpace = HostMemory<datatype>;
/*******************
* DECLARATIONS *
* ____________ *
*******************/
inline void diagX_Mul(datatype*,datatype*,datatype*,int,int);
inline bool use_count(datatype*, datatype*, unsigned int, unsigned int);
inline void replace(datatype*, datatype*, datatype*, unsigned int, unsigned int);
inline void initialize(datatype*, datatype*, datatype*, datatype*, unsigned int, unsigned int);
/*---------------*/
/* Constructor */
/*---------------*/
KSVD::KSVD(DeviceSpace* devptr, HostSpace* hostptr)
: CudaAlgorithm(devptr, hostptr) {
}
/*--------------*/
/* Destructor */
/*--------------*/
KSVD::~KSVD() {
}
/*-------------------------------*/
/* Perform the K-SVD algorithm */
/*-------------------------------*/
bool KSVD::ExecuteIterations(unsigned int iternum) {
// D_temp = normcols(D);
if (normcols() == false) {
return false;
}
// Main Loop
unsigned int* all_perms;
for (unsigned int iter = 0; iter < iternum; iter++) {
// G = D'*D ==> G = D_temp'*D_temp
if (Algorithms::Multiplication::AT_times_B(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::G)
) == false) {
return false;
}
//////////////////////////
// Sparse coding stage! //
//////////////////////////
// Execute Batch-OMP stage now!
// Equivalent: Gamma = sparsecode(data,D,XtX,G,thresh)
if (this->BatchOMP() == false) {
return false;
}
/*
Note:
Now array Gamma is not present but its elements are accessed
using the following formula:
For a *non-zero* element at (i,j) we get its value at:
(c + j*Tdata) [ (int) ( (selected_atoms + j*colsD) [ i ] - 1 ) ]
*/
/////////////////////////////////
// Reset variables for the new //
// iteration before Dictionary //
// Update stage. //
/////////////////////////////////
initialize(
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS),
this->deviceMemory.get(DeviceSpace::REPLACED_ATOMS),
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_COUNTER),
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_BITMAP),
Globals::colsX, Globals::colsD
);
//////////////////////////////
// Dictionary update stage! //
//////////////////////////////
// Generate a random permutation of indices in the
// range 1...colsD. Equivalent: p = randperm(dictsize);
all_perms = Algorithms::RandomPermutation::generateRandPerm(Globals::colsD);
// Execute Dictionary Update stage now!
if (this->updateDictionary(all_perms) == false) {
return false;
}
////////////////////////
// Compute error now! //
////////////////////////
if (this->errorComputation(iter + 1) == false) {
return false;
}
////////////////////////
// Clear Dictionary //
////////////////////////
if (this->clearDict() == false) {
return false;
}
}
return true;
}
/*-----------------------------------------*/
/* Clear the dictionary of unused atoms */
/* or atoms having error above a threshold */
/* or atoms that few samples use them! */
/*-----------------------------------------*/
bool KSVD::clearDict() {
// Count how many elements in every row of Gamma
// have absolute value above 1e-7
if (use_count(
this->deviceMemory.get(DeviceSpace::SELECTED_ATOMS),
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::colsD, Globals::colsX
)
== false) {
return false;
}
// Iteration to clear every atom that satisfies
// a certain condition implemented in maximum
// function below. Matlab equivalent:
//
// for j = 1:dictsize
// | % compute G(:, j)
// | % replace atom
// end
//
for (unsigned int j = 0; j < Globals::colsD; j++) {
// Now we compute:
//
// Gj = D'*D(:,j);
//
if (Algorithms::Multiplication::AT_times_x(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
Globals::rowsD,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD) + j*Globals::rowsD,
Globals::colsD,
this->deviceMemory.get(DeviceSpace::G)
)
== false) {
return false;
}
// Now we compute the maximum (square) value
// of Gj. Operation performed:
//
// (max(Gj.^2))
//
// We also apply the condition.
if (Algorithms::Reduction::Maximum_squared(
this->deviceMemory.get(DeviceSpace::G),
Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD) + j,
this->deviceMemory.get(DeviceSpace::REPLACED_ATOMS) + j,
j
)
== false) {
return false;
}
// We now find the signal with the maximum error.
// Matlab equivalent:
//
// [~,i] = max(err);
//
if (Algorithms::Reduction::Maximum(
this->deviceMemory.get(DeviceSpace::ERR),
Globals::colsX,
this->deviceMemory.get(DeviceSpace::G)
)
== false) {
return false;
}
// We should now calculate the norm of the
// selected signal in our data.
if (Algorithms::Reduction::euclidean_norm(
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::G),
Globals::rowsX, Globals::TPB_rowsX
)
== false) {
return false;
}
// Finally replace atom.
replace(
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD) + j * Globals::rowsD,
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::G),
Globals::rowsX, Globals::TPB_rowsX
);
}
return true;
}
/*------------------------------------*/
/* Compute the residual error */
/* using the formula: */
/* */
/* sqrt(sum[(X-D*Gamma)^2]/numel(X)) */
/* */
/*------------------------------------*/
bool KSVD::errorComputation(unsigned int iter) {
// Compute (D*Gamma) using CUBLAS
// for performance
if (Algorithms::Multiplication::A_times_B(this->CBhandle,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD), Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::SELECTED_ATOMS), Globals::colsD, Globals::colsX,
this->deviceMemory.get(DeviceSpace::TEMP_ROWSX_BY_COLSX))
== false) {
return false;
}
// Now reduce over the result and calculate
// the error:
// sum{ (X - X~)^2 }
//
if (Algorithms::Reduction::reduce_RMSE(
this->deviceMemory.get(DeviceSpace::X_ARRAY),
this->deviceMemory.get(DeviceSpace::TEMP_ROWSX_BY_COLSX),
this->deviceMemory.get(DeviceSpace::ERR),
Globals::rowsX , Globals::colsX, iter,
Globals::TPB_rowsX,
this->deviceMemory.get(DeviceSpace::UNUSED_SIGS_BITMAP))
== false) {
return false;
}
return true;
}
/*-------------------------*/
/* Normalize columns of D */
/*-------------------------*/
bool KSVD::normcols() {
// Calculating ==> 1./sqrt(sum(D.*D))
if (Algorithms::Reduction::reduce2D_Sum_SQUAREDelements(
this->deviceMemory.get(DeviceSpace::D_ARRAY),
Globals::rowsD, Globals::colsD,
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::TPB_rowsX
) == false) {
return false;
}
// D = D*spdiag( 1./sqrt(sum(D.*D)) ) ==> D = D*spdiag(TEMP_1_BY_COLSD)
diagX_Mul(
this->deviceMemory.get(DeviceSpace::D_ARRAY),
this->deviceMemory.get(DeviceSpace::TEMP_ROWSD_BY_COLSD),
this->deviceMemory.get(DeviceSpace::TEMP_1_BY_COLSD),
Globals::rowsD * Globals::colsD, Globals::rowsD);
return true;
}
/*-------------------------*/
/* Check if error occured */
/*-------------------------*/
bool KSVD::isValid() {
return(CudaAlgorithm::isValid());
}
///////////////////////////////////////////////////
//////////// HELPER FUNCTIONS ////////////////
///////////////////////////////////////////////////
/************************************
* DECLARATIONS *
* ____________ *
************************************/
__global__
void diagMul(datatype*,datatype*, datatype*, int, int);
__global__
void use_count_kernel(datatype*, datatype*, unsigned int, unsigned int);
__global__
void replaceDj(datatype*, datatype*, datatype*, unsigned int);
__global__
void init(datatype*, datatype*, datatype*, datatype*, unsigned int, unsigned int);
/************************************
* WRAPPERS *
* ________ *
************************************/
/*---------------------------------*/
/* OPERATION: */
/* out = in*diag(coeff) */
/*---------------------------------*/
inline void diagX_Mul(datatype* in, datatype* out, datatype* coeff, int size, int rows) {
dim3 block(1024);
dim3 grid((size + block.x - 1) / block.x);
diagMul <<< grid, block >>> (in, out, coeff, size, rows);
}
/*------------------------------*/
/* OPERATION: */
/* */
/* usecount = */
/* sum(abs(Gamma)>1e-7, 2); */
/* */
/*------------------------------*/
inline bool use_count(
datatype* gamma, datatype* counters,
unsigned int colsD, unsigned int colsX) {
// Initialize counters to zero
if (cudaMemset(counters, 0, colsD * sizeof(datatype)) != cudaSuccess) {
return false;
}
// Once again we assume colsD <= 32
dim3 block(16, colsD); // => max. 512 threads per block
dim3 grid((colsX + block.x - 1) / block.x);
use_count_kernel << < grid, block >> > (gamma, counters, colsD, colsX);
return true;
}
/*---------------------------------*/
/* OPERATION: */
/* D(:,j) = X(:,unused_sigs(i)) */
/* / norm(X(:,unused_sigs(i))) */
/*---------------------------------*/
inline void replace(
datatype* Dj, datatype* X, datatype* G,
unsigned int size, unsigned int recommended) {
replaceDj << < 1, recommended >> > (Dj, X, G, size);
}
/*-----------------------------------*/
/* OPERATIONS: */
/* replaced_atoms = zeros(dictsize) */
/* unused_sigs = 1:size(data,2); */
/*-----------------------------------*/
inline void initialize(
datatype* un, datatype* rep, datatype* counter, datatype* bitmap,
unsigned int N, unsigned int colsD) {
dim3 block(1024);
dim3 grid((N + block.x - 1) / block.x);
init << <grid, block>> > (un, rep, counter, bitmap, N, colsD);
}
/************************************
* KERNELS *
* _______ *
************************************/
/*---------------------------*/
/* This kernel multiplies a */
/* vector transformed into a */
/* diagonal matrix with some */
/* other matrix. */
/*---------------------------*/
__global__
void diagMul(datatype* in, datatype* out, datatype* coeff, int size, int rows) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int col = index / rows;
out[index] = in[index] * coeff[col];
}
}
/*-----------------------------------*/
/* This kernel uses an array of */
/* counters to simultaneously count */
/* elements of every row of input */
/* that satisfy a certain condition. */
/*-----------------------------------*/
__global__ void use_count_kernel(
datatype* gamma, datatype* counters,
unsigned int colsD, unsigned int colsX) {
// threadIdx.x = my column
// threadIdx.y = my row
unsigned int column = blockIdx.x * blockDim.x + threadIdx.x;
if (column < colsX) {
if (SQR((gamma + column*colsD)[threadIdx.y]) > 1e-7) {
atomicAdd((unsigned int*)(counters + threadIdx.y), 1);
}
}
}
/*---------------------------------*/
/* This kernel performs the simple */
/* task of: */
/* D(:,j) = X(:,i) / norm(X(:,i)) */
/*---------------------------------*/
__global__ void replaceDj(
datatype* Dj, datatype* X, datatype* G,
unsigned int size) {
if (!(*G)) {
// Our atom does not need replacement
return;
}
if (threadIdx.x < size) {
X += (unsigned int)(*(G + 1))*size;
Dj[threadIdx.x] = X[threadIdx.x] / (*(G + 2));
}
}
/*-----------------------------------*/
/* This kernel is the equivalent of: */
/* */
/* replaced_atoms = zeros(dictsize) */
/* unused_sigs = 1:size(data,2) */
/*-----------------------------------*/
__global__ void init(
datatype* un, datatype* rep, datatype* counter, datatype* bitmap,
unsigned int N, unsigned int N2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
un[index] = index;
bitmap[index] = 1.0;
if (index < N2) {
rep[index] = 0.0;
}
if (index == 0) {
*counter = 0.0;
}
}
} |
af382f2f326a641f0cbadfb26152bafd1bef885e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
#include <hipcub/hipcub.hpp>
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void permuteGrad(
const T* grad_out_x,
const T* grad_out_y,
const int* flat_node2pin_map,
const int num_pins,
T* grad_out_x_perm,
T* grad_out_y_perm
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int pin_id = flat_node2pin_map[i];
grad_out_x_perm[i] = grad_out_x[pin_id];
grad_out_y_perm[i] = grad_out_y[pin_id];
}
}
/// @brief Compute pin position from node position
template <typename T, typename K>
__global__ void computePinPos(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const K* pin2node_map,
const int num_pins,
T* pin_x, T* pin_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int node_id = pin2node_map[i];
pin_x[i] = pin_offset_x[i] + x[node_id];
pin_y[i] = pin_offset_y[i] + y[node_id];
}
}
template <typename T>
int computePinPosCudaSegmentLauncher(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_pins,
T* pin_x, T* pin_y
)
{
int thread_count = 512;
hipLaunchKernelGGL(( computePinPos), dim3((num_pins+thread_count-1) / thread_count), dim3(thread_count), 0, 0, x, y, pin_offset_x, pin_offset_y, pin2node_map, num_pins, pin_x, pin_y);
return 0;
}
template <typename T>
int computePinPosGradCudaSegmentLauncher(
const T* grad_out_x, const T* grad_out_y,
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_nodes,
int num_pins,
T* grad_x, T* grad_y,
T* grad_perm_buf ///< 2*num_pins, buffer for store the permutated gradients
)
{
int thread_count = 512;
T* grad_out_x_perm = grad_perm_buf;
T* grad_out_y_perm = grad_perm_buf + num_pins;
hipLaunchKernelGGL(( permuteGrad), dim3((num_pins+thread_count-1) / thread_count), dim3(thread_count), 0, 0, grad_out_x, grad_out_y, flat_node2pin_map, num_pins, grad_out_x_perm, grad_out_y_perm);
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// allocate temp storage
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
hipMalloc(&d_temp_storage, temp_storage_bytes);
// for x
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
// for y
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_y_perm, grad_y,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
hipFree(d_temp_storage);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputePinPosCudaSegmentLauncher(\
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_pins, \
T* pin_x, T* pin_y \
)\
{\
return computePinPosCudaSegmentLauncher(\
x, y, \
pin_offset_x, \
pin_offset_y, \
pin2node_map, \
flat_node2pin_map, \
flat_node2pin_start_map, \
num_pins, \
pin_x, pin_y \
);\
} \
\
int instantiateComputePinPosGradCudaSegmentLauncher(\
const T* grad_out_x, const T* grad_out_y, \
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_nodes, \
int num_pins, \
T* grad_x, T* grad_y, \
T* grad_perm_buf \
)\
{\
return computePinPosGradCudaSegmentLauncher(\
grad_out_x, grad_out_y, \
x, y, \
pin_offset_x, \
pin_offset_y, \
pin2node_map, \
flat_node2pin_map, \
flat_node2pin_start_map, \
num_nodes, \
num_pins, \
grad_x, grad_y, \
grad_perm_buf \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| af382f2f326a641f0cbadfb26152bafd1bef885e.cu | #include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
#include <cub/cub.cuh>
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void permuteGrad(
const T* grad_out_x,
const T* grad_out_y,
const int* flat_node2pin_map,
const int num_pins,
T* grad_out_x_perm,
T* grad_out_y_perm
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int pin_id = flat_node2pin_map[i];
grad_out_x_perm[i] = grad_out_x[pin_id];
grad_out_y_perm[i] = grad_out_y[pin_id];
}
}
/// @brief Compute pin position from node position
template <typename T, typename K>
__global__ void computePinPos(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const K* pin2node_map,
const int num_pins,
T* pin_x, T* pin_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int node_id = pin2node_map[i];
pin_x[i] = pin_offset_x[i] + x[node_id];
pin_y[i] = pin_offset_y[i] + y[node_id];
}
}
template <typename T>
int computePinPosCudaSegmentLauncher(
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_pins,
T* pin_x, T* pin_y
)
{
int thread_count = 512;
computePinPos<<<(num_pins+thread_count-1) / thread_count, thread_count>>>(x, y, pin_offset_x, pin_offset_y, pin2node_map, num_pins, pin_x, pin_y);
return 0;
}
template <typename T>
int computePinPosGradCudaSegmentLauncher(
const T* grad_out_x, const T* grad_out_y,
const T* x, const T* y,
const T* pin_offset_x,
const T* pin_offset_y,
const long* pin2node_map,
const int* flat_node2pin_map,
const int* flat_node2pin_start_map,
int num_nodes,
int num_pins,
T* grad_x, T* grad_y,
T* grad_perm_buf ///< 2*num_pins, buffer for store the permutated gradients
)
{
int thread_count = 512;
T* grad_out_x_perm = grad_perm_buf;
T* grad_out_y_perm = grad_perm_buf + num_pins;
permuteGrad<<<(num_pins+thread_count-1) / thread_count, thread_count>>>(grad_out_x, grad_out_y, flat_node2pin_map, num_pins, grad_out_x_perm, grad_out_y_perm);
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// allocate temp storage
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// for x
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_x_perm, grad_x,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
// for y
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, grad_out_y_perm, grad_y,
num_nodes, flat_node2pin_start_map, flat_node2pin_start_map + 1);
cudaFree(d_temp_storage);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputePinPosCudaSegmentLauncher(\
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_pins, \
T* pin_x, T* pin_y \
)\
{\
return computePinPosCudaSegmentLauncher(\
x, y, \
pin_offset_x, \
pin_offset_y, \
pin2node_map, \
flat_node2pin_map, \
flat_node2pin_start_map, \
num_pins, \
pin_x, pin_y \
);\
} \
\
int instantiateComputePinPosGradCudaSegmentLauncher(\
const T* grad_out_x, const T* grad_out_y, \
const T* x, const T* y, \
const T* pin_offset_x, \
const T* pin_offset_y, \
const long* pin2node_map, \
const int* flat_node2pin_map, \
const int* flat_node2pin_start_map, \
int num_nodes, \
int num_pins, \
T* grad_x, T* grad_y, \
T* grad_perm_buf \
)\
{\
return computePinPosGradCudaSegmentLauncher(\
grad_out_x, grad_out_y, \
x, y, \
pin_offset_x, \
pin_offset_y, \
pin2node_map, \
flat_node2pin_map, \
flat_node2pin_start_map, \
num_nodes, \
num_pins, \
grad_x, grad_y, \
grad_perm_buf \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
8a101936e3c6db8f25e2b02131d415999998a821.hip | // !!! This is a file automatically generated by hipify!!!
// @file datamex.cu
// @brief Basic data structures (MEX support)
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "datamex.hpp"
#if ENABLE_GPU
#include "datacu.hpp"
#endif
#include <assert.h>
#ifndef NDEBUG
#include<iostream>
#endif
#include "impl/copy.hpp"
using namespace vl ;
using namespace vl::impl ;
/* ---------------------------------------------------------------- */
/* MexContext */
/* ---------------------------------------------------------------- */
vl::MexContext::MexContext()
: Context()
#if ENABLE_GPU
, gpuIsInitialized(false)
, canary(NULL)
#endif
{ }
vl::MexContext::~MexContext()
{
#if ENABLE_GPU
// so that ~Context does not crash if MATLAB resets the GPU in the mean time
validateGpu() ;
#endif
}
/* ---------------------------------------------------------------- */
/* GPU management */
/* ---------------------------------------------------------------- */
#if ENABLE_GPU
// Do noting if the GPU is not initialized, otherwise invalidate it
// if needed
vl::ErrorCode
MexContext::validateGpu()
{
if (!gpuIsInitialized) { return vl::VLE_Success ; }
gpuIsInitialized = mxGPUIsValidGPUData(canary) ;
if (!gpuIsInitialized) {
#ifndef NDEBUG
std::cout<<"MexContext:: GPU reset detected; invalidating the GPU state"<<std::endl ;
#endif
mxDestroyArray(canary) ;
canary = NULL ;
Context::invalidateGpu() ;
}
return vl::VLE_Success ;
}
// Initialize GPU; also make sure that it was not reset by MATLAB
vl::ErrorCode
vl::MexContext::initGpu()
{
validateGpu() ;
if (!gpuIsInitialized) {
mwSize dims = 1 ;
mxInitGPU() ;
// todo: can mxGPUCreateGPUArray return NULL ?
mxGPUArray * gpuArray =
mxGPUCreateGPUArray(1,&dims,mxINT8_CLASS,mxREAL,MX_GPU_DO_NOT_INITIALIZE) ;
canary = mxGPUCreateMxArrayOnGPU(gpuArray) ;
mexMakeArrayPersistent(canary) ;
mxGPUDestroyGPUArray(gpuArray) ;
gpuIsInitialized = true ;
}
return vl::VLE_Success ;
}
#endif
/* ---------------------------------------------------------------- */
/* MexTensor */
/* ---------------------------------------------------------------- */
/*
The MexTensor class helps handling MATLAB CPU and GPU arrays.
The design is somewhat ackward to match MATLAB assumpitons.
The class can either:
- wrap an existing mxArray (or mxArray + mxGPUArray)
- or create a new mxArray (or mxArray + mxGPUArray)
In the last case, the array is released when the destructor is
called. However, this would normally interfere with MATLAB
automatic garbage collection upon raising an exception (which
can happen using mexErrMsgTxt() or, implicitly, when an array
creation function cannot complete, for example due to a memory error).
Therefore the constructors make the allocated memory persistent. C++
guarantees that the arrays are freeed upon error in the destructors.
Note that, upon cerating an array, errors such as running out of
CPU/GPU memory can occurr. In this case, MATLAB throws an error
and quits the MEX file (either implicitly or because we call
mexErrMsgTxt()). Hence constructors always complete with a well
defined object.
*/
/* ---------------------------------------------------------------- */
/* Constructing, clearing, destroying */
/* ---------------------------------------------------------------- */
vl::MexTensor::MexTensor(MexContext & context)
: context(context),
Tensor(),
array(NULL),
isArrayOwner(false)
#if ENABLE_GPU
, gpuArray(NULL)
#endif
{ }
mxArray *
vl::MexTensor::relinquish()
{
if (isArrayOwner) {
isArrayOwner = false ;
return (mxArray*) array ;
} else {
// this is because we may be encapsulating an input argument
// and we may be trying to return it
// we should probably use the undocumented
// extern mxArray *mxCreateSharedDataCopy(const mxArray *pr);
return mxDuplicateArray(array) ;
}
}
void
vl::MexTensor::makePersistent()
{
assert(isArrayOwner) ;
mexMakeArrayPersistent((mxArray*)array) ;
}
void
vl::MexTensor::clear()
{
#if ENABLE_GPU
if (gpuArray) {
mxGPUDestroyGPUArray(gpuArray) ;
gpuArray = NULL ;
}
#endif
if (isArrayOwner) {
if (array) {
mxDestroyArray((mxArray*)array) ;
array = NULL ;
}
isArrayOwner = false ;
}
memory = NULL ;
memorySize = 0 ;
deviceType = vl::VLDT_CPU ;
vl::TensorShape::clear() ;
}
vl::MexTensor::~MexTensor()
{
clear() ;
}
/* ---------------------------------------------------------------- */
/* init with optional zero filling */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::initHelper(DeviceType newDeviceType, DataType newDataType,
TensorShape const & newShape, bool fillWithZeros)
{
clear() ;
// assign dimensions
mwSize dimensions [VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS] ;
for (int k = 0 ; k < newShape.getNumDimensions() ; ++k) {
dimensions[k] = (mwSize)newShape.getDimension(k) ;
}
// compute the size in bytes
mwSize newMemorySize = newShape.getNumElements() ;
mxClassID classID ;
switch (newDataType) {
case VLDT_Float:
newMemorySize *= sizeof(DataTypeTraits<VLDT_Float>::type) ;
classID = mxSINGLE_CLASS ;
break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double:
newMemorySize *= sizeof(DataTypeTraits<VLDT_Double>::type) ;
classID = mxDOUBLE_CLASS ;
break ;
#endif
default:
abort() ;
}
// allocate the memory on CPU or GPU
void * newMemory = NULL ;
mxArray * newArray = NULL ;
#if ENABLE_GPU
mxGPUArray* newGpuArray = NULL ;
#endif
if (newDeviceType == vl::VLDT_CPU) {
if (fillWithZeros) {
newArray = mxCreateNumericArray(4, dimensions, classID, mxREAL) ;
newMemory = mxGetData(newArray) ;
} else {
mwSize dimensions_ [1] = {0} ;
newMemory = mxMalloc(newMemorySize) ;
newArray = mxCreateNumericArray(1, dimensions_,
classID,
mxREAL) ;
mxSetData(newArray, newMemory) ;
mxSetDimensions(newArray, dimensions, newShape.getNumDimensions()) ;
}
}
#ifdef ENABLE_GPU
else {
context.initGpu() ;
newGpuArray = mxGPUCreateGPUArray(newShape.getNumDimensions(), dimensions,
classID,
mxREAL,
fillWithZeros ? MX_GPU_INITIALIZE_VALUES : MX_GPU_DO_NOT_INITIALIZE) ;
newArray = mxGPUCreateMxArrayOnGPU(newGpuArray) ;
newMemory = mxGPUGetData(newGpuArray) ;
}
#else
else {
abort() ;
}
#endif
//mexMakeArrayPersistent(newArray) ; // avoid double free with MATALB garbage collector upon error
TensorShape::operator=(newShape) ;
deviceType = newDeviceType ;
dataType = newDataType ;
memory = newMemory ;
memorySize = newMemorySize ;
array = newArray ;
isArrayOwner = true ;
#if ENABLE_GPU
gpuArray = newGpuArray ;
#endif
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* init*() */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::init(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape)
{
return initHelper(newDeviceType, newDataType, newShape, false) ;
}
vl::ErrorCode
vl::MexTensor::initWithZeros(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape)
{
return initHelper(newDeviceType, newDataType, newShape, true) ;
}
vl::ErrorCode
vl::MexTensor::initWithValue(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape,
double value)
{
if (value == 0) {
return initHelper(newDeviceType, newDataType, newShape, true) ;
} else {
vl::ErrorCode error = initHelper(newDeviceType, newDataType, newShape, false) ;
if (error != VLE_Success) { return error ; }
size_t const n = getNumElements() ;
if (newDeviceType == vl::VLDT_CPU) {
switch (newDataType) {
case VLDT_Float: error = operations<vl::VLDT_CPU,float>::fill((float*)memory, n, (float)value) ; break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double: error = operations<vl::VLDT_CPU,double>::fill((double*)memory, n, (double)value) ; break ;
#endif
default: abort() ;
}
}
#ifdef ENABLE_GPU
else {
switch (newDataType) {
case VLDT_Float: error = operations<vl::VLDT_GPU,float>::fill((float*)memory, n, (float)value) ; break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double: error = operations<vl::VLDT_GPU,double>::fill((double*)memory, n, (double)value) ; break ;
#endif
default: abort() ;
}
if (error == VLE_Cuda) {
hipError_t error = hipGetLastError() ;
clear() ;
mexErrMsgTxt((std::string("MexTensor: fill [CUDA error: ")
+ hipGetErrorString(error)
+ "]"
).c_str()) ;
}
}
#endif
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* init by wrapping a given array */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::init(mxArray const * array_)
{
clear() ;
if (array_ == NULL) { return vl::VLE_Success ; } // empty
vl::DeviceType newDeviceType ;
vl::DataType newDataType ;
void const * newMemory = NULL ;
mxArray * newArray = (mxArray*)array_ ;
#if ENABLE_GPU
mxGPUArray* newGpuArray = NULL ;
#endif
mwSize const * newDimensions ;
mwSize newNumDimensions ;
mxClassID newClassID ;
#ifdef ENABLE_GPU
context.initGpu() ;
if (mxIsGPUArray(array_)) {
newDeviceType = vl::VLDT_GPU ;
newGpuArray = (mxGPUArray*) mxGPUCreateFromMxArray(newArray) ;
newMemory = mxGPUGetDataReadOnly(newGpuArray) ;
newClassID = mxGPUGetClassID(newGpuArray) ;
newDimensions = mxGPUGetDimensions(newGpuArray) ;
newNumDimensions = mxGPUGetNumberOfDimensions(newGpuArray) ;
} else
#endif
{
if (!mxIsNumeric(newArray)) {
mexErrMsgTxt("An input is not a numeric array (or GPU support not compiled).") ;
}
newDeviceType = VLDT_CPU ;
newMemory = mxGetData(newArray) ;
newClassID = mxGetClassID(newArray) ;
newDimensions = mxGetDimensions(newArray) ;
newNumDimensions = mxGetNumberOfDimensions(newArray) ;
}
if (newNumDimensions >= VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS) {
#if ENABLE_GPU
if (newGpuArray) {
mxGPUDestroyGPUArray(newGpuArray) ;
newGpuArray = NULL ;
}
#endif
mexErrMsgTxt("An input has more than the maximum number of allowed dimensions.") ;
}
numDimensions = newNumDimensions ;
for (int k = 0 ; k < numDimensions ; ++k) {
setDimension(k, newDimensions[k]) ;
}
size_t newMemorySize = getNumElements() ;
switch (newClassID) {
case mxSINGLE_CLASS:
newDataType = VLDT_Float ;
newMemorySize *= sizeof(DataTypeTraits<VLDT_Float>::type) ;
break ;
#ifdef ENABLE_DOUBLE
case mxDOUBLE_CLASS:
newDataType = VLDT_Double ;
newMemorySize *= sizeof(DataTypeTraits<VLDT_Double>::type) ;
break ;
#endif
default:
if (isEmpty()) {
newDataType = VLDT_Float ;
newMemorySize = 0 ;
break ;
}
#ifdef ENABLE_DOUBLE
mexErrMsgTxt("An input is neither SINGLE or DOUBLE nor it is empty.") ;
#else
mexErrMsgTxt("An input is neither SINGLE nor empty.") ;
#endif
break ;
}
deviceType = newDeviceType ;
dataType = newDataType ;
memory = (void*)newMemory ;
memorySize = newMemorySize ;
array = newArray ;
isArrayOwner = false ;
#if ENABLE_GPU
gpuArray = newGpuArray ;
#endif
return vl::VLE_Success ;
}
size_t
vl::MexTensor::getMemorySize() const
{
return memorySize ;
}
void vl::print(char const * str, vl::MexTensor const & tensor)
{
size_t size = tensor.getMemorySize() ;
double scaled ;
size_t const * dimensions = tensor.getDimensions() ;
const char * units ;
const char * type ;
if (size < 1024) {
scaled = size ;
units = "B" ;
} else if (size < 1024*1024) {
scaled = size / 1024.0 ;
units = "KB" ;
} else if (size < 1024*1024*1024) {
scaled = size / (1024.0 * 1024.0) ;
units = "MB" ;
} else {
scaled = size / (1024.0 * 1024.0 * 1024.0) ;
units = "GB" ;
}
const char * dev = "" ;
switch (tensor.getDeviceType()) {
case vl::VLDT_CPU : dev = "CPU" ; break ;
case vl::VLDT_GPU : dev = "GPU" ; break ;
}
switch (tensor.getDataType()) {
case vl::VLDT_Float: type = "float" ; break ;
case vl::VLDT_Double: type = "double" ; break ;
case vl::VLDT_Char: type = "char" ; break ;
default: type = "uknown type" ;
}
mexPrintf("%s[", str) ;
for (int k = 0 ; k < tensor.getNumDimensions() ; ++k) {
mexPrintf("%d ", dimensions[k]) ;
}
mexPrintf("| %s %.1f%s %s]\n",
type,
scaled,
units,
dev);
}
| 8a101936e3c6db8f25e2b02131d415999998a821.cu | // @file datamex.cu
// @brief Basic data structures (MEX support)
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "datamex.hpp"
#if ENABLE_GPU
#include "datacu.hpp"
#endif
#include <assert.h>
#ifndef NDEBUG
#include<iostream>
#endif
#include "impl/copy.hpp"
using namespace vl ;
using namespace vl::impl ;
/* ---------------------------------------------------------------- */
/* MexContext */
/* ---------------------------------------------------------------- */
vl::MexContext::MexContext()
: Context()
#if ENABLE_GPU
, gpuIsInitialized(false)
, canary(NULL)
#endif
{ }
vl::MexContext::~MexContext()
{
#if ENABLE_GPU
// so that ~Context does not crash if MATLAB resets the GPU in the mean time
validateGpu() ;
#endif
}
/* ---------------------------------------------------------------- */
/* GPU management */
/* ---------------------------------------------------------------- */
#if ENABLE_GPU
// Do noting if the GPU is not initialized, otherwise invalidate it
// if needed
vl::ErrorCode
MexContext::validateGpu()
{
if (!gpuIsInitialized) { return vl::VLE_Success ; }
gpuIsInitialized = mxGPUIsValidGPUData(canary) ;
if (!gpuIsInitialized) {
#ifndef NDEBUG
std::cout<<"MexContext:: GPU reset detected; invalidating the GPU state"<<std::endl ;
#endif
mxDestroyArray(canary) ;
canary = NULL ;
Context::invalidateGpu() ;
}
return vl::VLE_Success ;
}
// Initialize GPU; also make sure that it was not reset by MATLAB
vl::ErrorCode
vl::MexContext::initGpu()
{
validateGpu() ;
if (!gpuIsInitialized) {
mwSize dims = 1 ;
mxInitGPU() ;
// todo: can mxGPUCreateGPUArray return NULL ?
mxGPUArray * gpuArray =
mxGPUCreateGPUArray(1,&dims,mxINT8_CLASS,mxREAL,MX_GPU_DO_NOT_INITIALIZE) ;
canary = mxGPUCreateMxArrayOnGPU(gpuArray) ;
mexMakeArrayPersistent(canary) ;
mxGPUDestroyGPUArray(gpuArray) ;
gpuIsInitialized = true ;
}
return vl::VLE_Success ;
}
#endif
/* ---------------------------------------------------------------- */
/* MexTensor */
/* ---------------------------------------------------------------- */
/*
The MexTensor class helps handling MATLAB CPU and GPU arrays.
The design is somewhat ackward to match MATLAB assumpitons.
The class can either:
- wrap an existing mxArray (or mxArray + mxGPUArray)
- or create a new mxArray (or mxArray + mxGPUArray)
In the last case, the array is released when the destructor is
called. However, this would normally interfere with MATLAB
automatic garbage collection upon raising an exception (which
can happen using mexErrMsgTxt() or, implicitly, when an array
creation function cannot complete, for example due to a memory error).
Therefore the constructors make the allocated memory persistent. C++
guarantees that the arrays are freeed upon error in the destructors.
Note that, upon cerating an array, errors such as running out of
CPU/GPU memory can occurr. In this case, MATLAB throws an error
and quits the MEX file (either implicitly or because we call
mexErrMsgTxt()). Hence constructors always complete with a well
defined object.
*/
/* ---------------------------------------------------------------- */
/* Constructing, clearing, destroying */
/* ---------------------------------------------------------------- */
vl::MexTensor::MexTensor(MexContext & context)
: context(context),
Tensor(),
array(NULL),
isArrayOwner(false)
#if ENABLE_GPU
, gpuArray(NULL)
#endif
{ }
mxArray *
vl::MexTensor::relinquish()
{
if (isArrayOwner) {
isArrayOwner = false ;
return (mxArray*) array ;
} else {
// this is because we may be encapsulating an input argument
// and we may be trying to return it
// we should probably use the undocumented
// extern mxArray *mxCreateSharedDataCopy(const mxArray *pr);
return mxDuplicateArray(array) ;
}
}
void
vl::MexTensor::makePersistent()
{
assert(isArrayOwner) ;
mexMakeArrayPersistent((mxArray*)array) ;
}
void
vl::MexTensor::clear()
{
#if ENABLE_GPU
if (gpuArray) {
mxGPUDestroyGPUArray(gpuArray) ;
gpuArray = NULL ;
}
#endif
if (isArrayOwner) {
if (array) {
mxDestroyArray((mxArray*)array) ;
array = NULL ;
}
isArrayOwner = false ;
}
memory = NULL ;
memorySize = 0 ;
deviceType = vl::VLDT_CPU ;
vl::TensorShape::clear() ;
}
vl::MexTensor::~MexTensor()
{
clear() ;
}
/* ---------------------------------------------------------------- */
/* init with optional zero filling */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::initHelper(DeviceType newDeviceType, DataType newDataType,
TensorShape const & newShape, bool fillWithZeros)
{
clear() ;
// assign dimensions
mwSize dimensions [VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS] ;
for (int k = 0 ; k < newShape.getNumDimensions() ; ++k) {
dimensions[k] = (mwSize)newShape.getDimension(k) ;
}
// compute the size in bytes
mwSize newMemorySize = newShape.getNumElements() ;
mxClassID classID ;
switch (newDataType) {
case VLDT_Float:
newMemorySize *= sizeof(DataTypeTraits<VLDT_Float>::type) ;
classID = mxSINGLE_CLASS ;
break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double:
newMemorySize *= sizeof(DataTypeTraits<VLDT_Double>::type) ;
classID = mxDOUBLE_CLASS ;
break ;
#endif
default:
abort() ;
}
// allocate the memory on CPU or GPU
void * newMemory = NULL ;
mxArray * newArray = NULL ;
#if ENABLE_GPU
mxGPUArray* newGpuArray = NULL ;
#endif
if (newDeviceType == vl::VLDT_CPU) {
if (fillWithZeros) {
newArray = mxCreateNumericArray(4, dimensions, classID, mxREAL) ;
newMemory = mxGetData(newArray) ;
} else {
mwSize dimensions_ [1] = {0} ;
newMemory = mxMalloc(newMemorySize) ;
newArray = mxCreateNumericArray(1, dimensions_,
classID,
mxREAL) ;
mxSetData(newArray, newMemory) ;
mxSetDimensions(newArray, dimensions, newShape.getNumDimensions()) ;
}
}
#ifdef ENABLE_GPU
else {
context.initGpu() ;
newGpuArray = mxGPUCreateGPUArray(newShape.getNumDimensions(), dimensions,
classID,
mxREAL,
fillWithZeros ? MX_GPU_INITIALIZE_VALUES : MX_GPU_DO_NOT_INITIALIZE) ;
newArray = mxGPUCreateMxArrayOnGPU(newGpuArray) ;
newMemory = mxGPUGetData(newGpuArray) ;
}
#else
else {
abort() ;
}
#endif
//mexMakeArrayPersistent(newArray) ; // avoid double free with MATALB garbage collector upon error
TensorShape::operator=(newShape) ;
deviceType = newDeviceType ;
dataType = newDataType ;
memory = newMemory ;
memorySize = newMemorySize ;
array = newArray ;
isArrayOwner = true ;
#if ENABLE_GPU
gpuArray = newGpuArray ;
#endif
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* init*() */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::init(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape)
{
return initHelper(newDeviceType, newDataType, newShape, false) ;
}
vl::ErrorCode
vl::MexTensor::initWithZeros(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape)
{
return initHelper(newDeviceType, newDataType, newShape, true) ;
}
vl::ErrorCode
vl::MexTensor::initWithValue(DeviceType newDeviceType,
DataType newDataType,
TensorShape const & newShape,
double value)
{
if (value == 0) {
return initHelper(newDeviceType, newDataType, newShape, true) ;
} else {
vl::ErrorCode error = initHelper(newDeviceType, newDataType, newShape, false) ;
if (error != VLE_Success) { return error ; }
size_t const n = getNumElements() ;
if (newDeviceType == vl::VLDT_CPU) {
switch (newDataType) {
case VLDT_Float: error = operations<vl::VLDT_CPU,float>::fill((float*)memory, n, (float)value) ; break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double: error = operations<vl::VLDT_CPU,double>::fill((double*)memory, n, (double)value) ; break ;
#endif
default: abort() ;
}
}
#ifdef ENABLE_GPU
else {
switch (newDataType) {
case VLDT_Float: error = operations<vl::VLDT_GPU,float>::fill((float*)memory, n, (float)value) ; break ;
#ifdef ENABLE_DOUBLE
case VLDT_Double: error = operations<vl::VLDT_GPU,double>::fill((double*)memory, n, (double)value) ; break ;
#endif
default: abort() ;
}
if (error == VLE_Cuda) {
cudaError_t error = cudaGetLastError() ;
clear() ;
mexErrMsgTxt((std::string("MexTensor: fill [CUDA error: ")
+ cudaGetErrorString(error)
+ "]"
).c_str()) ;
}
}
#endif
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* init by wrapping a given array */
/* ---------------------------------------------------------------- */
vl::ErrorCode
vl::MexTensor::init(mxArray const * array_)
{
clear() ;
if (array_ == NULL) { return vl::VLE_Success ; } // empty
vl::DeviceType newDeviceType ;
vl::DataType newDataType ;
void const * newMemory = NULL ;
mxArray * newArray = (mxArray*)array_ ;
#if ENABLE_GPU
mxGPUArray* newGpuArray = NULL ;
#endif
mwSize const * newDimensions ;
mwSize newNumDimensions ;
mxClassID newClassID ;
#ifdef ENABLE_GPU
context.initGpu() ;
if (mxIsGPUArray(array_)) {
newDeviceType = vl::VLDT_GPU ;
newGpuArray = (mxGPUArray*) mxGPUCreateFromMxArray(newArray) ;
newMemory = mxGPUGetDataReadOnly(newGpuArray) ;
newClassID = mxGPUGetClassID(newGpuArray) ;
newDimensions = mxGPUGetDimensions(newGpuArray) ;
newNumDimensions = mxGPUGetNumberOfDimensions(newGpuArray) ;
} else
#endif
{
if (!mxIsNumeric(newArray)) {
mexErrMsgTxt("An input is not a numeric array (or GPU support not compiled).") ;
}
newDeviceType = VLDT_CPU ;
newMemory = mxGetData(newArray) ;
newClassID = mxGetClassID(newArray) ;
newDimensions = mxGetDimensions(newArray) ;
newNumDimensions = mxGetNumberOfDimensions(newArray) ;
}
if (newNumDimensions >= VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS) {
#if ENABLE_GPU
if (newGpuArray) {
mxGPUDestroyGPUArray(newGpuArray) ;
newGpuArray = NULL ;
}
#endif
mexErrMsgTxt("An input has more than the maximum number of allowed dimensions.") ;
}
numDimensions = newNumDimensions ;
for (int k = 0 ; k < numDimensions ; ++k) {
setDimension(k, newDimensions[k]) ;
}
size_t newMemorySize = getNumElements() ;
switch (newClassID) {
case mxSINGLE_CLASS:
newDataType = VLDT_Float ;
newMemorySize *= sizeof(DataTypeTraits<VLDT_Float>::type) ;
break ;
#ifdef ENABLE_DOUBLE
case mxDOUBLE_CLASS:
newDataType = VLDT_Double ;
newMemorySize *= sizeof(DataTypeTraits<VLDT_Double>::type) ;
break ;
#endif
default:
if (isEmpty()) {
newDataType = VLDT_Float ;
newMemorySize = 0 ;
break ;
}
#ifdef ENABLE_DOUBLE
mexErrMsgTxt("An input is neither SINGLE or DOUBLE nor it is empty.") ;
#else
mexErrMsgTxt("An input is neither SINGLE nor empty.") ;
#endif
break ;
}
deviceType = newDeviceType ;
dataType = newDataType ;
memory = (void*)newMemory ;
memorySize = newMemorySize ;
array = newArray ;
isArrayOwner = false ;
#if ENABLE_GPU
gpuArray = newGpuArray ;
#endif
return vl::VLE_Success ;
}
size_t
vl::MexTensor::getMemorySize() const
{
return memorySize ;
}
void vl::print(char const * str, vl::MexTensor const & tensor)
{
size_t size = tensor.getMemorySize() ;
double scaled ;
size_t const * dimensions = tensor.getDimensions() ;
const char * units ;
const char * type ;
if (size < 1024) {
scaled = size ;
units = "B" ;
} else if (size < 1024*1024) {
scaled = size / 1024.0 ;
units = "KB" ;
} else if (size < 1024*1024*1024) {
scaled = size / (1024.0 * 1024.0) ;
units = "MB" ;
} else {
scaled = size / (1024.0 * 1024.0 * 1024.0) ;
units = "GB" ;
}
const char * dev = "" ;
switch (tensor.getDeviceType()) {
case vl::VLDT_CPU : dev = "CPU" ; break ;
case vl::VLDT_GPU : dev = "GPU" ; break ;
}
switch (tensor.getDataType()) {
case vl::VLDT_Float: type = "float" ; break ;
case vl::VLDT_Double: type = "double" ; break ;
case vl::VLDT_Char: type = "char" ; break ;
default: type = "uknown type" ;
}
mexPrintf("%s[", str) ;
for (int k = 0 ; k < tensor.getNumDimensions() ; ++k) {
mexPrintf("%d ", dimensions[k]) ;
}
mexPrintf("| %s %.1f%s %s]\n",
type,
scaled,
units,
dev);
}
|
d04f3b81cbc86dffe8c58b28b0d035a142df54f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda/utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(const DType* data,
const DType x,
const DType y,
const index_t width,
const index_t height) {
index_t x1 = floor(x);
index_t x2 = ceil(x);
index_t y1 = floor(y);
index_t y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1 * width + x1];
DType value12 = data[y2 * width + x1];
DType value21 = data[y1 * width + x2];
DType value22 = data[y2 * width + x2];
DType value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(const index_t count,
const DType* bottom_data,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t output_dim,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class,
DType* top_data,
DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType trans_y =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
index_t count = 0;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType>& out,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
const DType* bottom_data = data.dptr_;
const DType* bottom_rois = bbox.dptr_;
const DType* bottom_trans = no_trans ? nullptr : trans.dptr_;
DType* top_data = out.dptr_;
DType* top_count_data = top_count.dptr_;
const index_t count = out.shape_.Size();
const index_t channels = data.size(1);
const index_t height = data.size(2);
const index_t width = data.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernel<DType>)
, dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum), 0, stream,
count,
bottom_data,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
output_dim,
group_size,
part_size,
num_classes,
channels_each_class,
top_data,
top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(hipGetLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(const index_t count,
const DType* top_diff,
const DType* top_count,
const index_t num_rois,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const index_t output_dim,
DType* bottom_data_diff,
DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType trans_y =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
index_t x0 = floor(w);
index_t x1 = ceil(w);
index_t y0 = floor(h);
index_t y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x) * (1 - dist_y);
DType q01 = (1 - dist_x) * dist_y;
DType q10 = dist_x * (1 - dist_y);
DType q11 = dist_x * dist_y;
index_t bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
DType diff_x = U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y);
diff_x *= trans_std * diff_val * roi_width;
DType diff_y = U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x);
diff_y *= trans_std * diff_val * roi_height;
atomicAdd(bottom_trans_diff +
(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w,
diff_x);
atomicAdd(bottom_trans_diff +
(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w,
diff_y);
}
}
}
}
template <typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType>& in_grad,
const Tensor<gpu, 4, DType>& trans_grad,
const Tensor<gpu, 4, DType>& out_grad,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
const DType* top_diff = out_grad.dptr_;
const DType* bottom_data = data.dptr_;
const DType* bottom_rois = bbox.dptr_;
const DType* bottom_trans = no_trans ? nullptr : trans.dptr_;
DType* bottom_data_diff = in_grad.dptr_;
DType* bottom_trans_diff = no_trans ? nullptr : trans_grad.dptr_;
const DType* top_count_data = top_count.dptr_;
const index_t count = out_grad.shape_.Size();
const index_t num_rois = bbox.size(0);
const index_t channels = in_grad.size(1);
const index_t height = in_grad.size(2);
const index_t width = in_grad.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel<DType>)
, dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum), 0, stream,
count,
top_diff,
top_count_data,
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(hipGetLastError());
}
} // namespace cuda
template <typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType>& out,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolForward(out,
data,
bbox,
trans,
top_count,
no_trans,
spatial_scale,
output_dim,
group_size,
pooled_size,
part_size,
sample_per_part,
trans_std);
}
template <typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType>& in_grad,
const Tensor<gpu, 4, DType>& trans_grad,
const Tensor<gpu, 4, DType>& out_grad,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad,
trans_grad,
out_grad,
data,
bbox,
trans,
top_count,
no_trans,
spatial_scale,
output_dim,
group_size,
pooled_size,
part_size,
sample_per_part,
trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new DeformablePSROIPoolingOp<gpu, DType>(param); });
return op;
}
} // namespace op
} // namespace mxnet
| d04f3b81cbc86dffe8c58b28b0d035a142df54f9.cu | /*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda/utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(const DType* data,
const DType x,
const DType y,
const index_t width,
const index_t height) {
index_t x1 = floor(x);
index_t x2 = ceil(x);
index_t y1 = floor(y);
index_t y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1 * width + x1];
DType value12 = data[y2 * width + x1];
DType value21 = data[y1 * width + x2];
DType value22 = data[y2 * width + x2];
DType value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(const index_t count,
const DType* bottom_data,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t output_dim,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class,
DType* top_data,
DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType trans_y =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
index_t count = 0;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType>& out,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
const DType* bottom_data = data.dptr_;
const DType* bottom_rois = bbox.dptr_;
const DType* bottom_trans = no_trans ? nullptr : trans.dptr_;
DType* top_data = out.dptr_;
DType* top_count_data = top_count.dptr_;
const index_t count = out.shape_.Size();
const index_t channels = data.size(1);
const index_t height = data.size(2);
const index_t width = data.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
DeformablePSROIPoolForwardKernel<DType>
<<<mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream>>>(
count,
bottom_data,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
output_dim,
group_size,
part_size,
num_classes,
channels_each_class,
top_data,
top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(cudaGetLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(const index_t count,
const DType* top_diff,
const DType* top_count,
const index_t num_rois,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const index_t output_dim,
DType* bottom_data_diff,
DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType trans_y =
no_trans ?
static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w] *
trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
index_t x0 = floor(w);
index_t x1 = ceil(w);
index_t y0 = floor(h);
index_t y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x) * (1 - dist_y);
DType q01 = (1 - dist_x) * dist_y;
DType q10 = dist_x * (1 - dist_y);
DType q11 = dist_x * dist_y;
index_t bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
DType diff_x = U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y);
diff_x *= trans_std * diff_val * roi_width;
DType diff_y = U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x);
diff_y *= trans_std * diff_val * roi_height;
atomicAdd(bottom_trans_diff +
(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size +
part_w,
diff_x);
atomicAdd(bottom_trans_diff +
(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size +
part_w,
diff_y);
}
}
}
}
template <typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType>& in_grad,
const Tensor<gpu, 4, DType>& trans_grad,
const Tensor<gpu, 4, DType>& out_grad,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
const DType* top_diff = out_grad.dptr_;
const DType* bottom_data = data.dptr_;
const DType* bottom_rois = bbox.dptr_;
const DType* bottom_trans = no_trans ? nullptr : trans.dptr_;
DType* bottom_data_diff = in_grad.dptr_;
DType* bottom_trans_diff = no_trans ? nullptr : trans_grad.dptr_;
const DType* top_count_data = top_count.dptr_;
const index_t count = out_grad.shape_.Size();
const index_t num_rois = bbox.size(0);
const index_t channels = in_grad.size(1);
const index_t height = in_grad.size(2);
const index_t width = in_grad.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
DeformablePSROIPoolBackwardAccKernel<DType>
<<<mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream>>>(
count,
top_diff,
top_count_data,
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(cudaGetLastError());
}
} // namespace cuda
template <typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType>& out,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolForward(out,
data,
bbox,
trans,
top_count,
no_trans,
spatial_scale,
output_dim,
group_size,
pooled_size,
part_size,
sample_per_part,
trans_std);
}
template <typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType>& in_grad,
const Tensor<gpu, 4, DType>& trans_grad,
const Tensor<gpu, 4, DType>& out_grad,
const Tensor<gpu, 4, DType>& data,
const Tensor<gpu, 2, DType>& bbox,
const Tensor<gpu, 4, DType>& trans,
const Tensor<gpu, 4, DType>& top_count,
const bool no_trans,
const float spatial_scale,
const index_t output_dim,
const index_t group_size,
const index_t pooled_size,
const index_t part_size,
const index_t sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad,
trans_grad,
out_grad,
data,
bbox,
trans,
top_count,
no_trans,
spatial_scale,
output_dim,
group_size,
pooled_size,
part_size,
sample_per_part,
trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new DeformablePSROIPoolingOp<gpu, DType>(param); });
return op;
}
} // namespace op
} // namespace mxnet
|
9466973bc091956b0d5d48c30ce7fa738c39db94.hip | // !!! This is a file automatically generated by hipify!!!
//==================================================================
// Title: x-drop seed-and-extend alignment algorithm
// Author: A. Zeni, G. Guidi
//==================================================================
#include "logan_functions.cuh"
#include "seed.cuh"
#include <hip/hip_runtime.h>
#include <chrono>
using namespace std;
using namespace chrono;
#define cudaErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if(code != hipSuccess){
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if(abort) exit(code);
}
}
__inline__ __device__ void warpReduce(volatile short *input, int myTId)
{
input[myTId] = (input[myTId] > input[myTId + 32]) ? input[myTId] : input[myTId + 32];
input[myTId] = (input[myTId] > input[myTId + 16]) ? input[myTId] : input[myTId + 16];
input[myTId] = (input[myTId] > input[myTId + 8]) ? input[myTId] : input[myTId + 8];
input[myTId] = (input[myTId] > input[myTId + 4]) ? input[myTId] : input[myTId + 4];
input[myTId] = (input[myTId] > input[myTId + 2]) ? input[myTId] : input[myTId + 2];
input[myTId] = (input[myTId] > input[myTId + 1]) ? input[myTId] : input[myTId + 1];
}
__inline__ __device__ short reduce_max(short *input, int dim, int n_threads)
{
unsigned int myTId = threadIdx.x;
if(dim>32){
for(int i = n_threads/2; i >32; i>>=1){
if(myTId < i){
input[myTId] = (input[myTId] > input[myTId + i]) ? input[myTId] : input[myTId + i];
}
__syncthreads();
}
}
if(myTId<32)
warpReduce(input, myTId);
__syncthreads();
return input[0];
}
__inline__ __device__ void updateExtendedSeedL(
SeedL &seed,
ExtensionDirectionL direction, //as there are only 4 directions we may consider even smaller data types
int cols,
int rows,
int lowerDiag,
int upperDiag)
{
if (direction == EXTEND_LEFTL)
{
int beginDiag = seed.beginDiagonal;
// Set lower and upper diagonals.
if (getLowerDiagonal(seed) > beginDiag + lowerDiag)
setLowerDiagonal(seed, beginDiag + lowerDiag);
if (getUpperDiagonal(seed) < beginDiag + upperDiag)
setUpperDiagonal(seed, beginDiag + upperDiag);
// Set new start position of seed.
seed.beginPositionH -= rows;
seed.beginPositionV -= cols;
} else { // direction == EXTEND_RIGHTL
// Set new lower and upper diagonals.
int endDiag = seed.endDiagonal;
if (getUpperDiagonal(seed) < endDiag - lowerDiag)
setUpperDiagonal(seed, (endDiag - lowerDiag));
if (getLowerDiagonal(seed) > (endDiag - upperDiag))
setLowerDiagonal(seed, endDiag - upperDiag);
// Set new end position of seed.
seed.endPositionH += rows;
seed.endPositionV += cols;
}
}
__inline__ __device__ void computeAntidiag(
const short *antiDiag1,
const short *antiDiag2,
short *antiDiag3,
const char* querySeg,
const char* databaseSeg,
const int best,
const int scoreDropOff,
const int cols,
const int rows,
const int minCol,
const int maxCol,
const int antiDiagNo,
const int offset1,
const int offset2,
const ExtensionDirectionL direction,
int n_threads)
{
int tid = threadIdx.x;
for(int i = 0; i < maxCol; i+=n_threads){
int col = tid + minCol + i;
int queryPos, dbPos;
queryPos = col - 1;
dbPos = col + rows - antiDiagNo - 1;
if(col < maxCol){
int tmp = max_logan(antiDiag2[col-offset2],antiDiag2[col-offset2-1]) + GAP_EXT;
int score = (querySeg[queryPos] == databaseSeg[dbPos]) ? MATCH : MISMATCH;
tmp = max_logan(antiDiag1[col-offset1-1]+score,tmp);
antiDiag3[tid+1+i] = (tmp < best - scoreDropOff) ? UNDEF : tmp;
}
}
}
__inline__ __device__ void calcExtendedLowerDiag(int &lowerDiag,
int const &minCol,
int const &antiDiagNo)
{
int minRow = antiDiagNo - minCol;
if (minCol - minRow < lowerDiag)
lowerDiag = minCol - minRow;
}
__inline__ __device__ void calcExtendedUpperDiag(
int &upperDiag,
int const &maxCol,
int const &antiDiagNo)
{
int maxRow = antiDiagNo + 1 - maxCol;
if (maxCol - 1 - maxRow > upperDiag)
upperDiag = maxCol - 1 - maxRow;
}
__inline__ __device__ void initAntiDiag3(
short *antiDiag3,
int &a3size,
int const &offset,
int const &maxCol,
int const &antiDiagNo,
int const &minScore,
int const &gapCost,
int const &undefined)
{
a3size = maxCol + 1 - offset;
antiDiag3[0] = undefined;
antiDiag3[maxCol - offset] = undefined;
if (antiDiagNo * gapCost > minScore)
{
if (offset == 0) // init first column
antiDiag3[0] = antiDiagNo * gapCost;
if (antiDiagNo - maxCol == 0) // init first row
antiDiag3[maxCol - offset] = antiDiagNo * gapCost;
}
}
__inline__ __device__ void initAntiDiags(
short *antiDiag1,
short *antiDiag2,
short *antiDiag3,
int &a2size,
int &a3size,
int const &dropOff,
int const &gapCost,
int const &undefined)
{
a2size = 1;
antiDiag2[0] = 0;
a3size = 2;
antiDiag3[0] = gapCost;
antiDiag3[1] = gapCost;
}
__global__ void extendSeedLGappedXDropOneDirectionGlobal(
SeedL *__restrict__ seed,
const char *__restrict__ querySegArray,
const char *__restrict__ databaseSegArray,
const ExtensionDirectionL direction,
const int scoreDropOff,
int *__restrict__ res,
const int *__restrict__ offsetQuery,
const int *__restrict__ offsetTarget,
const int offAntidiag,
short *__restrict__ antidiag,
const int n_threads)
{
extern __shared__ short temp_alloc[];
short *temp= &temp_alloc[0];
int myId = blockIdx.x;
int myTId = threadIdx.x;
const char *querySeg;
const char *databaseSeg;
if(myId==0){
querySeg = querySegArray;
databaseSeg = databaseSegArray;
}
else{
querySeg = querySegArray + offsetQuery[myId-1];
databaseSeg = databaseSegArray + offsetTarget[myId-1];
}
short *antiDiag1 = &antidiag[myId*offAntidiag*3];
short* antiDiag2 = &antiDiag1[offAntidiag];
short* antiDiag3 = &antiDiag2[offAntidiag];
SeedL mySeed(seed[myId]);
//dimension of the antidiagonals
int a1size = 0, a2size = 0, a3size = 0;
int cols, rows;
if(myId == 0){
cols = offsetQuery[myId]+1;
rows = offsetTarget[myId]+1;
}
else{
cols = offsetQuery[myId]-offsetQuery[myId-1]+1;
rows = offsetTarget[myId]-offsetTarget[myId-1]+1;
}
if (rows == 1 || cols == 1) return;
int minCol = 1;
int maxCol = 2;
int offset1 = 0; // number of leading columns that need not be calculated in antiDiag1
int offset2 = 0; // in antiDiag2
int offset3 = 0; // in antiDiag3
initAntiDiags(antiDiag1,antiDiag2, antiDiag3, a2size, a3size, scoreDropOff, GAP_EXT, UNDEF);
int antiDiagNo = 1; // the currently calculated anti-diagonal
int best = 0; // maximal score value in the DP matrix (for drop-off calculation)
int lowerDiag = 0;
int upperDiag = 0;
while (minCol < maxCol)
{
++antiDiagNo;
//antidiagswap
//antiDiag2 -> antiDiag1
//antiDiag3 -> antiDiag2
//antiDiag1 -> antiDiag3
short *t = antiDiag1;
antiDiag1 = antiDiag2;
antiDiag2 = antiDiag3;
antiDiag3 = t;
int t_l = a1size;
a1size = a2size;
a2size = a3size;
a3size = t_l;
offset1 = offset2;
offset2 = offset3;
offset3 = minCol-1;
initAntiDiag3(antiDiag3, a3size, offset3, maxCol, antiDiagNo, best - scoreDropOff, GAP_EXT, UNDEF);
computeAntidiag(antiDiag1, antiDiag2, antiDiag3, querySeg, databaseSeg,
best, scoreDropOff, cols, rows, minCol, maxCol, antiDiagNo,
offset1, offset2, direction, n_threads);
//roofline analysis
__syncthreads();
int tmp, antiDiagBest = UNDEF;
for(int i=0; i<a3size; i+=n_threads){
int size = a3size-i;
if(myTId<n_threads){
temp[myTId] = (myTId<size) ? antiDiag3[myTId+i]:UNDEF;
}
__syncthreads();
tmp = reduce_max(temp,size, n_threads);
antiDiagBest = (tmp>antiDiagBest) ? tmp:antiDiagBest;
}
best = (best > antiDiagBest) ? best : antiDiagBest;
while (minCol - offset3 < a3size && antiDiag3[minCol - offset3] == UNDEF &&
minCol - offset2 - 1 < a2size && antiDiag2[minCol - offset2 - 1] == UNDEF)
{
++minCol;
}
// Calculate new maxCol
while (maxCol - offset3 > 0 && (antiDiag3[maxCol - offset3 - 1] == UNDEF) &&
(antiDiag2[maxCol - offset2 - 1] == UNDEF))
{
--maxCol;
}
++maxCol;
// Calculate new lowerDiag and upperDiag of extended seed
calcExtendedLowerDiag(lowerDiag, minCol, antiDiagNo);
calcExtendedUpperDiag(upperDiag, maxCol - 1, antiDiagNo);
// end of databaseSeg reached?
minCol = (minCol > (antiDiagNo + 2 - rows)) ? minCol : (antiDiagNo + 2 - rows);
// end of querySeg reached?
maxCol = (maxCol < cols) ? maxCol : cols;
}
int longestExtensionCol = a3size + offset3 - 2;
int longestExtensionRow = antiDiagNo - longestExtensionCol;
int longestExtensionScore = antiDiag3[longestExtensionCol - offset3];
if (longestExtensionScore == UNDEF)
{
if (antiDiag2[a2size -2] != UNDEF)
{
// reached end of query segment
longestExtensionCol = a2size + offset2 - 2;
longestExtensionRow = antiDiagNo - 1 - longestExtensionCol;
longestExtensionScore = antiDiag2[longestExtensionCol - offset2];
}
else if (a2size > 2 && antiDiag2[a2size-3] != UNDEF)
{
// reached end of database segment
longestExtensionCol = a2size + offset2 - 3;
longestExtensionRow = antiDiagNo - 1 - longestExtensionCol;
longestExtensionScore = antiDiag2[longestExtensionCol - offset2];
}
}
if (longestExtensionScore == UNDEF){
// general case
for (int i = 0; i < a1size; ++i){
if (antiDiag1[i] > longestExtensionScore){
longestExtensionScore = antiDiag1[i];
longestExtensionCol = i + offset1;
longestExtensionRow = antiDiagNo - 2 - longestExtensionCol;
}
}
}
if (longestExtensionScore != UNDEF)
updateExtendedSeedL(mySeed, direction, longestExtensionCol, longestExtensionRow, lowerDiag, upperDiag);
seed[myId] = mySeed;
res[myId] = longestExtensionScore;
}
void extendSeedL(std::vector<SeedL> &seeds,
ExtensionDirectionL direction,
std::vector<std::string> &target,
std::vector<std::string> &query,
std::vector<ScoringSchemeL> &penalties,
int const& XDrop,
int const& kmer_length,
int *res,
int numAlignments,
int ngpus,
int n_threads
)
{
if(scoreGapExtend(penalties[0]) >= 0){
cout<<"Error: Logan does not support gap extension penalty >= 0\n";
exit(-1);
}
if(scoreGapOpen(penalties[0]) >= 0){
cout<<"Error: Logan does not support gap opening penalty >= 0\n";
exit(-1);
}
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cout << "Error: no device found\n";
return;
}
if (ngpus > deviceCount || ngpus > MAX_GPUS) {
std::cout << "Error: the maximum number of devices allowed is "
<< ::min(deviceCount, MAX_GPUS) << std::endl;
return;
}
//start measuring time
#ifdef ADAPTABLE
n_threads = (XDrop/WARP_DIM + 1)* WARP_DIM;
if(n_threads>1024)
n_threads=1024;
#endif
//declare streams
hipStream_t stream_r[MAX_GPUS], stream_l[MAX_GPUS];
// NB nSequences is correlated to the number of GPUs that we have
int nSequences = numAlignments/ngpus;
int nSequencesLast = nSequences+numAlignments%ngpus;
//final result of the alignment
int *scoreLeft = (int *)malloc(numAlignments * sizeof(int));
int *scoreRight = (int *)malloc(numAlignments * sizeof(int));
//create two sets of seeds
//copy seeds
vector<SeedL> seeds_r;
vector<SeedL> seeds_l;
seeds_r.reserve(numAlignments);
for (size_t i=0; i<seeds.size(); i++){
seeds_r.push_back(seeds[i]);
}
//sequences offsets
vector<int> offsetLeftQ[MAX_GPUS];
vector<int> offsetLeftT[MAX_GPUS];
vector<int> offsetRightQ[MAX_GPUS];
vector<int> offsetRightT[MAX_GPUS];
//shared_mem_size per block per GPU
int ant_len_left[MAX_GPUS];
int ant_len_right[MAX_GPUS];
//antidiag in case shared memory isn't enough
short *ant_l[MAX_GPUS], *ant_r[MAX_GPUS];
//total lenght of the sequences
int totalLengthQPref[MAX_GPUS];
int totalLengthTPref[MAX_GPUS];
int totalLengthQSuff[MAX_GPUS];
int totalLengthTSuff[MAX_GPUS];
//declare and allocate sequences prefixes and suffixes
char *prefQ[MAX_GPUS], *prefT[MAX_GPUS];
char *suffQ[MAX_GPUS], *suffT[MAX_GPUS];
//declare GPU offsets
int *offsetLeftQ_d[MAX_GPUS], *offsetLeftT_d[MAX_GPUS];
int *offsetRightQ_d[MAX_GPUS], *offsetRightT_d[MAX_GPUS];
//declare GPU results
int *scoreLeft_d[MAX_GPUS], *scoreRight_d[MAX_GPUS];
//declare GPU seeds
SeedL *seed_d_l[MAX_GPUS], *seed_d_r[MAX_GPUS];
//declare prefixes and suffixes on the GPU
char *prefQ_d[MAX_GPUS], *prefT_d[MAX_GPUS];
char *suffQ_d[MAX_GPUS], *suffT_d[MAX_GPUS];
std::vector<double> pergpustime(ngpus);
#pragma omp parallel for
for(int i = 0; i < ngpus; i++){
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
//compute offsets and shared memory per block
int MYTHREAD = omp_get_thread_num();
auto start_setup_ithread = NOW;
ant_len_left[i]=0;
ant_len_right[i]=0;
for(int j = 0; j < dim; j++){
offsetLeftQ[i].push_back(getBeginPositionV(seeds[j+i*nSequences]));
offsetLeftT[i].push_back(getBeginPositionH(seeds[j+i*nSequences]));
ant_len_left[i] = ::max(::min(offsetLeftQ[i][j],offsetLeftT[i][j]), ant_len_left[i]);
offsetRightQ[i].push_back(query[j+i*nSequences].size()-getEndPositionV(seeds[j+i*nSequences]));
offsetRightT[i].push_back(target[j+i*nSequences].size()-getEndPositionH(seeds[j+i*nSequences]));
ant_len_right[i] = ::max(::min(offsetRightQ[i][j], offsetRightT[i][j]), ant_len_right[i]);
}
//compute antidiagonal offsets
partial_sum(offsetLeftQ[i].begin(),offsetLeftQ[i].end(),offsetLeftQ[i].begin());
partial_sum(offsetLeftT[i].begin(),offsetLeftT[i].end(),offsetLeftT[i].begin());
partial_sum(offsetRightQ[i].begin(),offsetRightQ[i].end(),offsetRightQ[i].begin());
partial_sum(offsetRightT[i].begin(),offsetRightT[i].end(),offsetRightT[i].begin());
//set total length of the sequences
totalLengthQPref[i] = offsetLeftQ[i][dim-1];
totalLengthTPref[i] = offsetLeftT[i][dim-1];
totalLengthQSuff[i] = offsetRightQ[i][dim-1];
totalLengthTSuff[i] = offsetRightT[i][dim-1];
//allocate sequences prefix and suffix on the CPU
prefQ[i] = (char*)malloc(sizeof(char)*totalLengthQPref[i]);
prefT[i] = (char*)malloc(sizeof(char)*totalLengthTPref[i]);
suffQ[i] = (char*)malloc(sizeof(char)*totalLengthQSuff[i]);
suffT[i] = (char*)malloc(sizeof(char)*totalLengthTSuff[i]);
//generate prefix and suffix on the CPU
reverse_copy(query[0+i*nSequences].c_str(),query[0+i*nSequences].c_str()+offsetLeftQ[i][0],prefQ[i]);
memcpy(prefT[i], target[0+i*nSequences].c_str(), offsetLeftT[i][0]);
memcpy(suffQ[i], query[0+i*nSequences].c_str()+getEndPositionV(seeds[0+i*nSequences]), offsetRightQ[i][0]);
reverse_copy(target[0+i*nSequences].c_str()+getEndPositionH(seeds[0+i*nSequences]),target[0+i*nSequences].c_str()+getEndPositionH(seeds[0+i*nSequences])+offsetRightT[i][0],suffT[i]);
for(int j = 1; j<dim; j++){
char *seqptr = prefQ[i] + offsetLeftQ[i][j-1];
reverse_copy(query[j+i*nSequences].c_str(),query[j+i*nSequences].c_str()+(offsetLeftQ[i][j]-offsetLeftQ[i][j-1]),seqptr);
seqptr = prefT[i] + offsetLeftT[i][j-1];
memcpy(seqptr, target[j+i*nSequences].c_str(), offsetLeftT[i][j]-offsetLeftT[i][j-1]);
seqptr = suffQ[i] + offsetRightQ[i][j-1];
memcpy(seqptr, query[j+i*nSequences].c_str()+getEndPositionV(seeds[j+i*nSequences]), offsetRightQ[i][j]-offsetRightQ[i][j-1]);
seqptr = suffT[i] + offsetRightT[i][j-1];
reverse_copy(target[j+i*nSequences].c_str()+getEndPositionH(seeds[j+i*nSequences]),target[j+i*nSequences].c_str()+getEndPositionH(seeds[j+i*nSequences])+(offsetRightT[i][j]-offsetRightT[i][j-1]),seqptr);
}
auto end_setup_ithread = NOW;
duration<double> setup_ithread = end_setup_ithread - start_setup_ithread;
pergpustime[MYTHREAD] = setup_ithread.count();
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
//set gpu device
hipSetDevice(i);
//create streams
hipStreamCreateWithFlags(&stream_r[i],hipStreamNonBlocking);
hipStreamCreateWithFlags(&stream_l[i],hipStreamNonBlocking);
//allocate antidiagonals on the GPU
cudaErrchk(hipMalloc(&ant_l[i], sizeof(short)*ant_len_left[i]*3*dim));
cudaErrchk(hipMalloc(&ant_r[i], sizeof(short)*ant_len_right[i]*3*dim));
//allocate offsets on the GPU
cudaErrchk(hipMalloc(&offsetLeftQ_d[i], dim*sizeof(int)));
cudaErrchk(hipMalloc(&offsetLeftT_d[i], dim*sizeof(int)));
cudaErrchk(hipMalloc(&offsetRightQ_d[i], dim*sizeof(int)));
cudaErrchk(hipMalloc(&offsetRightT_d[i], dim*sizeof(int)));
//allocate results on the GPU
cudaErrchk(hipMalloc(&scoreLeft_d[i], dim*sizeof(int)));
cudaErrchk(hipMalloc(&scoreRight_d[i], dim*sizeof(int)));
//allocate seeds on the GPU
cudaErrchk(hipMalloc(&seed_d_l[i], dim*sizeof(SeedL)));
cudaErrchk(hipMalloc(&seed_d_r[i], dim*sizeof(SeedL)));
//allocate sequences on the GPU
cudaErrchk(hipMalloc(&prefQ_d[i], totalLengthQPref[i]*sizeof(char)));
cudaErrchk(hipMalloc(&prefT_d[i], totalLengthTPref[i]*sizeof(char)));
cudaErrchk(hipMalloc(&suffQ_d[i], totalLengthQSuff[i]*sizeof(char)));
cudaErrchk(hipMalloc(&suffT_d[i], totalLengthTSuff[i]*sizeof(char)));
//copy seeds to the GPU
cudaErrchk(hipMemcpyAsync(seed_d_l[i], &seeds[0]+i*nSequences, dim*sizeof(SeedL), hipMemcpyHostToDevice, stream_l[i]));
cudaErrchk(hipMemcpyAsync(seed_d_r[i], &seeds_r[0]+i*nSequences, dim*sizeof(SeedL), hipMemcpyHostToDevice, stream_r[i]));
//copy offsets to the GPU
cudaErrchk(hipMemcpyAsync(offsetLeftQ_d[i], &offsetLeftQ[i][0], dim*sizeof(int), hipMemcpyHostToDevice, stream_l[i]));
cudaErrchk(hipMemcpyAsync(offsetLeftT_d[i], &offsetLeftT[i][0], dim*sizeof(int), hipMemcpyHostToDevice, stream_l[i]));
cudaErrchk(hipMemcpyAsync(offsetRightQ_d[i], &offsetRightQ[i][0], dim*sizeof(int), hipMemcpyHostToDevice, stream_r[i]));
cudaErrchk(hipMemcpyAsync(offsetRightT_d[i], &offsetRightT[i][0], dim*sizeof(int), hipMemcpyHostToDevice, stream_r[i]));
//copy sequences to the GPU
cudaErrchk(hipMemcpyAsync(prefQ_d[i], prefQ[i], totalLengthQPref[i]*sizeof(char), hipMemcpyHostToDevice, stream_l[i]));
cudaErrchk(hipMemcpyAsync(prefT_d[i], prefT[i], totalLengthTPref[i]*sizeof(char), hipMemcpyHostToDevice, stream_l[i]));
cudaErrchk(hipMemcpyAsync(suffQ_d[i], suffQ[i], totalLengthQSuff[i]*sizeof(char), hipMemcpyHostToDevice, stream_r[i]));
cudaErrchk(hipMemcpyAsync(suffT_d[i], suffT[i], totalLengthTSuff[i]*sizeof(char), hipMemcpyHostToDevice, stream_r[i]));
}
auto start_c = NOW;
//execute kernels
#pragma omp parallel for
for(int i = 0; i<ngpus;i++)
{
hipSetDevice(i);
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
hipLaunchKernelGGL(( extendSeedLGappedXDropOneDirectionGlobal)
, dim3(dim), dim3(n_threads), n_threads*sizeof(short), stream_l[i],
seed_d_l[i], prefQ_d[i], prefT_d[i], EXTEND_LEFTL, XDrop,
scoreLeft_d[i], offsetLeftQ_d[i], offsetLeftT_d[i],
ant_len_left[i], ant_l[i], n_threads);
hipLaunchKernelGGL(( extendSeedLGappedXDropOneDirectionGlobal)
, dim3(dim), dim3(n_threads), n_threads*sizeof(short), stream_r[i],
seed_d_r[i], suffQ_d[i], suffT_d[i], EXTEND_RIGHTL, XDrop,
scoreRight_d[i], offsetRightQ_d[i], offsetRightT_d[i],
ant_len_right[i], ant_r[i], n_threads);
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
hipSetDevice(i);
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
cudaErrchk(hipMemcpyAsync(scoreLeft+i*nSequences, scoreLeft_d[i], dim*sizeof(int), hipMemcpyDeviceToHost, stream_l[i]));
cudaErrchk(hipMemcpyAsync(&seeds[0]+i*nSequences, seed_d_l[i], dim*sizeof(SeedL), hipMemcpyDeviceToHost,stream_l[i]));
cudaErrchk(hipMemcpyAsync(scoreRight+i*nSequences, scoreRight_d[i], dim*sizeof(int), hipMemcpyDeviceToHost, stream_r[i]));
cudaErrchk(hipMemcpyAsync(&seeds_r[0]+i*nSequences, seed_d_r[i], dim*sizeof(SeedL), hipMemcpyDeviceToHost,stream_r[i]));
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
hipSetDevice(i);
hipDeviceSynchronize();
}
auto end_c = NOW;
duration<double> compute = end_c-start_c;
std::cout << "Device only time [seconds]:\t" << compute.count() << std::endl;
cudaErrchk(hipPeekAtLastError());
#pragma omp parallel for
for(int i = 0; i < ngpus; i++){
hipSetDevice(i);
hipStreamDestroy(stream_l[i]);
hipStreamDestroy(stream_r[i]);
free(prefQ[i]);
free(prefT[i]);
free(suffQ[i]);
free(suffT[i]);
cudaErrchk(hipFree(prefQ_d[i]));
cudaErrchk(hipFree(prefT_d[i]));
cudaErrchk(hipFree(suffQ_d[i]));
cudaErrchk(hipFree(suffT_d[i]));
cudaErrchk(hipFree(offsetLeftQ_d[i]));
cudaErrchk(hipFree(offsetLeftT_d[i]));
cudaErrchk(hipFree(offsetRightQ_d[i]));
cudaErrchk(hipFree(offsetRightT_d[i]));
cudaErrchk(hipFree(seed_d_l[i]));
cudaErrchk(hipFree(seed_d_r[i]));
cudaErrchk(hipFree(scoreLeft_d[i]));
cudaErrchk(hipFree(scoreRight_d[i]));
cudaErrchk(hipFree(ant_l[i]));
cudaErrchk(hipFree(ant_r[i]));
}
for(int i = 0; i < numAlignments; i++){
res[i] = scoreLeft[i]+scoreRight[i]+kmer_length;
setEndPositionH(seeds[i], getEndPositionH(seeds_r[i]));
setEndPositionV(seeds[i], getEndPositionV(seeds_r[i]));
std::cout << res[i] << std::endl;
}
free(scoreLeft);
free(scoreRight);
}
| 9466973bc091956b0d5d48c30ce7fa738c39db94.cu | //==================================================================
// Title: x-drop seed-and-extend alignment algorithm
// Author: A. Zeni, G. Guidi
//==================================================================
#include "logan_functions.cuh"
#include "seed.cuh"
#include <cuda.h>
#include <chrono>
using namespace std;
using namespace chrono;
#define cudaErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if(code != cudaSuccess){
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
__inline__ __device__ void warpReduce(volatile short *input, int myTId)
{
input[myTId] = (input[myTId] > input[myTId + 32]) ? input[myTId] : input[myTId + 32];
input[myTId] = (input[myTId] > input[myTId + 16]) ? input[myTId] : input[myTId + 16];
input[myTId] = (input[myTId] > input[myTId + 8]) ? input[myTId] : input[myTId + 8];
input[myTId] = (input[myTId] > input[myTId + 4]) ? input[myTId] : input[myTId + 4];
input[myTId] = (input[myTId] > input[myTId + 2]) ? input[myTId] : input[myTId + 2];
input[myTId] = (input[myTId] > input[myTId + 1]) ? input[myTId] : input[myTId + 1];
}
__inline__ __device__ short reduce_max(short *input, int dim, int n_threads)
{
unsigned int myTId = threadIdx.x;
if(dim>32){
for(int i = n_threads/2; i >32; i>>=1){
if(myTId < i){
input[myTId] = (input[myTId] > input[myTId + i]) ? input[myTId] : input[myTId + i];
}
__syncthreads();
}
}
if(myTId<32)
warpReduce(input, myTId);
__syncthreads();
return input[0];
}
__inline__ __device__ void updateExtendedSeedL(
SeedL &seed,
ExtensionDirectionL direction, //as there are only 4 directions we may consider even smaller data types
int cols,
int rows,
int lowerDiag,
int upperDiag)
{
if (direction == EXTEND_LEFTL)
{
int beginDiag = seed.beginDiagonal;
// Set lower and upper diagonals.
if (getLowerDiagonal(seed) > beginDiag + lowerDiag)
setLowerDiagonal(seed, beginDiag + lowerDiag);
if (getUpperDiagonal(seed) < beginDiag + upperDiag)
setUpperDiagonal(seed, beginDiag + upperDiag);
// Set new start position of seed.
seed.beginPositionH -= rows;
seed.beginPositionV -= cols;
} else { // direction == EXTEND_RIGHTL
// Set new lower and upper diagonals.
int endDiag = seed.endDiagonal;
if (getUpperDiagonal(seed) < endDiag - lowerDiag)
setUpperDiagonal(seed, (endDiag - lowerDiag));
if (getLowerDiagonal(seed) > (endDiag - upperDiag))
setLowerDiagonal(seed, endDiag - upperDiag);
// Set new end position of seed.
seed.endPositionH += rows;
seed.endPositionV += cols;
}
}
__inline__ __device__ void computeAntidiag(
const short *antiDiag1,
const short *antiDiag2,
short *antiDiag3,
const char* querySeg,
const char* databaseSeg,
const int best,
const int scoreDropOff,
const int cols,
const int rows,
const int minCol,
const int maxCol,
const int antiDiagNo,
const int offset1,
const int offset2,
const ExtensionDirectionL direction,
int n_threads)
{
int tid = threadIdx.x;
for(int i = 0; i < maxCol; i+=n_threads){
int col = tid + minCol + i;
int queryPos, dbPos;
queryPos = col - 1;
dbPos = col + rows - antiDiagNo - 1;
if(col < maxCol){
int tmp = max_logan(antiDiag2[col-offset2],antiDiag2[col-offset2-1]) + GAP_EXT;
int score = (querySeg[queryPos] == databaseSeg[dbPos]) ? MATCH : MISMATCH;
tmp = max_logan(antiDiag1[col-offset1-1]+score,tmp);
antiDiag3[tid+1+i] = (tmp < best - scoreDropOff) ? UNDEF : tmp;
}
}
}
__inline__ __device__ void calcExtendedLowerDiag(int &lowerDiag,
int const &minCol,
int const &antiDiagNo)
{
int minRow = antiDiagNo - minCol;
if (minCol - minRow < lowerDiag)
lowerDiag = minCol - minRow;
}
__inline__ __device__ void calcExtendedUpperDiag(
int &upperDiag,
int const &maxCol,
int const &antiDiagNo)
{
int maxRow = antiDiagNo + 1 - maxCol;
if (maxCol - 1 - maxRow > upperDiag)
upperDiag = maxCol - 1 - maxRow;
}
__inline__ __device__ void initAntiDiag3(
short *antiDiag3,
int &a3size,
int const &offset,
int const &maxCol,
int const &antiDiagNo,
int const &minScore,
int const &gapCost,
int const &undefined)
{
a3size = maxCol + 1 - offset;
antiDiag3[0] = undefined;
antiDiag3[maxCol - offset] = undefined;
if (antiDiagNo * gapCost > minScore)
{
if (offset == 0) // init first column
antiDiag3[0] = antiDiagNo * gapCost;
if (antiDiagNo - maxCol == 0) // init first row
antiDiag3[maxCol - offset] = antiDiagNo * gapCost;
}
}
__inline__ __device__ void initAntiDiags(
short *antiDiag1,
short *antiDiag2,
short *antiDiag3,
int &a2size,
int &a3size,
int const &dropOff,
int const &gapCost,
int const &undefined)
{
a2size = 1;
antiDiag2[0] = 0;
a3size = 2;
antiDiag3[0] = gapCost;
antiDiag3[1] = gapCost;
}
__global__ void extendSeedLGappedXDropOneDirectionGlobal(
SeedL *__restrict__ seed,
const char *__restrict__ querySegArray,
const char *__restrict__ databaseSegArray,
const ExtensionDirectionL direction,
const int scoreDropOff,
int *__restrict__ res,
const int *__restrict__ offsetQuery,
const int *__restrict__ offsetTarget,
const int offAntidiag,
short *__restrict__ antidiag,
const int n_threads)
{
extern __shared__ short temp_alloc[];
short *temp= &temp_alloc[0];
int myId = blockIdx.x;
int myTId = threadIdx.x;
const char *querySeg;
const char *databaseSeg;
if(myId==0){
querySeg = querySegArray;
databaseSeg = databaseSegArray;
}
else{
querySeg = querySegArray + offsetQuery[myId-1];
databaseSeg = databaseSegArray + offsetTarget[myId-1];
}
short *antiDiag1 = &antidiag[myId*offAntidiag*3];
short* antiDiag2 = &antiDiag1[offAntidiag];
short* antiDiag3 = &antiDiag2[offAntidiag];
SeedL mySeed(seed[myId]);
//dimension of the antidiagonals
int a1size = 0, a2size = 0, a3size = 0;
int cols, rows;
if(myId == 0){
cols = offsetQuery[myId]+1;
rows = offsetTarget[myId]+1;
}
else{
cols = offsetQuery[myId]-offsetQuery[myId-1]+1;
rows = offsetTarget[myId]-offsetTarget[myId-1]+1;
}
if (rows == 1 || cols == 1) return;
int minCol = 1;
int maxCol = 2;
int offset1 = 0; // number of leading columns that need not be calculated in antiDiag1
int offset2 = 0; // in antiDiag2
int offset3 = 0; // in antiDiag3
initAntiDiags(antiDiag1,antiDiag2, antiDiag3, a2size, a3size, scoreDropOff, GAP_EXT, UNDEF);
int antiDiagNo = 1; // the currently calculated anti-diagonal
int best = 0; // maximal score value in the DP matrix (for drop-off calculation)
int lowerDiag = 0;
int upperDiag = 0;
while (minCol < maxCol)
{
++antiDiagNo;
//antidiagswap
//antiDiag2 -> antiDiag1
//antiDiag3 -> antiDiag2
//antiDiag1 -> antiDiag3
short *t = antiDiag1;
antiDiag1 = antiDiag2;
antiDiag2 = antiDiag3;
antiDiag3 = t;
int t_l = a1size;
a1size = a2size;
a2size = a3size;
a3size = t_l;
offset1 = offset2;
offset2 = offset3;
offset3 = minCol-1;
initAntiDiag3(antiDiag3, a3size, offset3, maxCol, antiDiagNo, best - scoreDropOff, GAP_EXT, UNDEF);
computeAntidiag(antiDiag1, antiDiag2, antiDiag3, querySeg, databaseSeg,
best, scoreDropOff, cols, rows, minCol, maxCol, antiDiagNo,
offset1, offset2, direction, n_threads);
//roofline analysis
__syncthreads();
int tmp, antiDiagBest = UNDEF;
for(int i=0; i<a3size; i+=n_threads){
int size = a3size-i;
if(myTId<n_threads){
temp[myTId] = (myTId<size) ? antiDiag3[myTId+i]:UNDEF;
}
__syncthreads();
tmp = reduce_max(temp,size, n_threads);
antiDiagBest = (tmp>antiDiagBest) ? tmp:antiDiagBest;
}
best = (best > antiDiagBest) ? best : antiDiagBest;
while (minCol - offset3 < a3size && antiDiag3[minCol - offset3] == UNDEF &&
minCol - offset2 - 1 < a2size && antiDiag2[minCol - offset2 - 1] == UNDEF)
{
++minCol;
}
// Calculate new maxCol
while (maxCol - offset3 > 0 && (antiDiag3[maxCol - offset3 - 1] == UNDEF) &&
(antiDiag2[maxCol - offset2 - 1] == UNDEF))
{
--maxCol;
}
++maxCol;
// Calculate new lowerDiag and upperDiag of extended seed
calcExtendedLowerDiag(lowerDiag, minCol, antiDiagNo);
calcExtendedUpperDiag(upperDiag, maxCol - 1, antiDiagNo);
// end of databaseSeg reached?
minCol = (minCol > (antiDiagNo + 2 - rows)) ? minCol : (antiDiagNo + 2 - rows);
// end of querySeg reached?
maxCol = (maxCol < cols) ? maxCol : cols;
}
int longestExtensionCol = a3size + offset3 - 2;
int longestExtensionRow = antiDiagNo - longestExtensionCol;
int longestExtensionScore = antiDiag3[longestExtensionCol - offset3];
if (longestExtensionScore == UNDEF)
{
if (antiDiag2[a2size -2] != UNDEF)
{
// reached end of query segment
longestExtensionCol = a2size + offset2 - 2;
longestExtensionRow = antiDiagNo - 1 - longestExtensionCol;
longestExtensionScore = antiDiag2[longestExtensionCol - offset2];
}
else if (a2size > 2 && antiDiag2[a2size-3] != UNDEF)
{
// reached end of database segment
longestExtensionCol = a2size + offset2 - 3;
longestExtensionRow = antiDiagNo - 1 - longestExtensionCol;
longestExtensionScore = antiDiag2[longestExtensionCol - offset2];
}
}
if (longestExtensionScore == UNDEF){
// general case
for (int i = 0; i < a1size; ++i){
if (antiDiag1[i] > longestExtensionScore){
longestExtensionScore = antiDiag1[i];
longestExtensionCol = i + offset1;
longestExtensionRow = antiDiagNo - 2 - longestExtensionCol;
}
}
}
if (longestExtensionScore != UNDEF)
updateExtendedSeedL(mySeed, direction, longestExtensionCol, longestExtensionRow, lowerDiag, upperDiag);
seed[myId] = mySeed;
res[myId] = longestExtensionScore;
}
void extendSeedL(std::vector<SeedL> &seeds,
ExtensionDirectionL direction,
std::vector<std::string> &target,
std::vector<std::string> &query,
std::vector<ScoringSchemeL> &penalties,
int const& XDrop,
int const& kmer_length,
int *res,
int numAlignments,
int ngpus,
int n_threads
)
{
if(scoreGapExtend(penalties[0]) >= 0){
cout<<"Error: Logan does not support gap extension penalty >= 0\n";
exit(-1);
}
if(scoreGapOpen(penalties[0]) >= 0){
cout<<"Error: Logan does not support gap opening penalty >= 0\n";
exit(-1);
}
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cout << "Error: no device found\n";
return;
}
if (ngpus > deviceCount || ngpus > MAX_GPUS) {
std::cout << "Error: the maximum number of devices allowed is "
<< std::min(deviceCount, MAX_GPUS) << std::endl;
return;
}
//start measuring time
#ifdef ADAPTABLE
n_threads = (XDrop/WARP_DIM + 1)* WARP_DIM;
if(n_threads>1024)
n_threads=1024;
#endif
//declare streams
cudaStream_t stream_r[MAX_GPUS], stream_l[MAX_GPUS];
// NB nSequences is correlated to the number of GPUs that we have
int nSequences = numAlignments/ngpus;
int nSequencesLast = nSequences+numAlignments%ngpus;
//final result of the alignment
int *scoreLeft = (int *)malloc(numAlignments * sizeof(int));
int *scoreRight = (int *)malloc(numAlignments * sizeof(int));
//create two sets of seeds
//copy seeds
vector<SeedL> seeds_r;
vector<SeedL> seeds_l;
seeds_r.reserve(numAlignments);
for (size_t i=0; i<seeds.size(); i++){
seeds_r.push_back(seeds[i]);
}
//sequences offsets
vector<int> offsetLeftQ[MAX_GPUS];
vector<int> offsetLeftT[MAX_GPUS];
vector<int> offsetRightQ[MAX_GPUS];
vector<int> offsetRightT[MAX_GPUS];
//shared_mem_size per block per GPU
int ant_len_left[MAX_GPUS];
int ant_len_right[MAX_GPUS];
//antidiag in case shared memory isn't enough
short *ant_l[MAX_GPUS], *ant_r[MAX_GPUS];
//total lenght of the sequences
int totalLengthQPref[MAX_GPUS];
int totalLengthTPref[MAX_GPUS];
int totalLengthQSuff[MAX_GPUS];
int totalLengthTSuff[MAX_GPUS];
//declare and allocate sequences prefixes and suffixes
char *prefQ[MAX_GPUS], *prefT[MAX_GPUS];
char *suffQ[MAX_GPUS], *suffT[MAX_GPUS];
//declare GPU offsets
int *offsetLeftQ_d[MAX_GPUS], *offsetLeftT_d[MAX_GPUS];
int *offsetRightQ_d[MAX_GPUS], *offsetRightT_d[MAX_GPUS];
//declare GPU results
int *scoreLeft_d[MAX_GPUS], *scoreRight_d[MAX_GPUS];
//declare GPU seeds
SeedL *seed_d_l[MAX_GPUS], *seed_d_r[MAX_GPUS];
//declare prefixes and suffixes on the GPU
char *prefQ_d[MAX_GPUS], *prefT_d[MAX_GPUS];
char *suffQ_d[MAX_GPUS], *suffT_d[MAX_GPUS];
std::vector<double> pergpustime(ngpus);
#pragma omp parallel for
for(int i = 0; i < ngpus; i++){
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
//compute offsets and shared memory per block
int MYTHREAD = omp_get_thread_num();
auto start_setup_ithread = NOW;
ant_len_left[i]=0;
ant_len_right[i]=0;
for(int j = 0; j < dim; j++){
offsetLeftQ[i].push_back(getBeginPositionV(seeds[j+i*nSequences]));
offsetLeftT[i].push_back(getBeginPositionH(seeds[j+i*nSequences]));
ant_len_left[i] = std::max(std::min(offsetLeftQ[i][j],offsetLeftT[i][j]), ant_len_left[i]);
offsetRightQ[i].push_back(query[j+i*nSequences].size()-getEndPositionV(seeds[j+i*nSequences]));
offsetRightT[i].push_back(target[j+i*nSequences].size()-getEndPositionH(seeds[j+i*nSequences]));
ant_len_right[i] = std::max(std::min(offsetRightQ[i][j], offsetRightT[i][j]), ant_len_right[i]);
}
//compute antidiagonal offsets
partial_sum(offsetLeftQ[i].begin(),offsetLeftQ[i].end(),offsetLeftQ[i].begin());
partial_sum(offsetLeftT[i].begin(),offsetLeftT[i].end(),offsetLeftT[i].begin());
partial_sum(offsetRightQ[i].begin(),offsetRightQ[i].end(),offsetRightQ[i].begin());
partial_sum(offsetRightT[i].begin(),offsetRightT[i].end(),offsetRightT[i].begin());
//set total length of the sequences
totalLengthQPref[i] = offsetLeftQ[i][dim-1];
totalLengthTPref[i] = offsetLeftT[i][dim-1];
totalLengthQSuff[i] = offsetRightQ[i][dim-1];
totalLengthTSuff[i] = offsetRightT[i][dim-1];
//allocate sequences prefix and suffix on the CPU
prefQ[i] = (char*)malloc(sizeof(char)*totalLengthQPref[i]);
prefT[i] = (char*)malloc(sizeof(char)*totalLengthTPref[i]);
suffQ[i] = (char*)malloc(sizeof(char)*totalLengthQSuff[i]);
suffT[i] = (char*)malloc(sizeof(char)*totalLengthTSuff[i]);
//generate prefix and suffix on the CPU
reverse_copy(query[0+i*nSequences].c_str(),query[0+i*nSequences].c_str()+offsetLeftQ[i][0],prefQ[i]);
memcpy(prefT[i], target[0+i*nSequences].c_str(), offsetLeftT[i][0]);
memcpy(suffQ[i], query[0+i*nSequences].c_str()+getEndPositionV(seeds[0+i*nSequences]), offsetRightQ[i][0]);
reverse_copy(target[0+i*nSequences].c_str()+getEndPositionH(seeds[0+i*nSequences]),target[0+i*nSequences].c_str()+getEndPositionH(seeds[0+i*nSequences])+offsetRightT[i][0],suffT[i]);
for(int j = 1; j<dim; j++){
char *seqptr = prefQ[i] + offsetLeftQ[i][j-1];
reverse_copy(query[j+i*nSequences].c_str(),query[j+i*nSequences].c_str()+(offsetLeftQ[i][j]-offsetLeftQ[i][j-1]),seqptr);
seqptr = prefT[i] + offsetLeftT[i][j-1];
memcpy(seqptr, target[j+i*nSequences].c_str(), offsetLeftT[i][j]-offsetLeftT[i][j-1]);
seqptr = suffQ[i] + offsetRightQ[i][j-1];
memcpy(seqptr, query[j+i*nSequences].c_str()+getEndPositionV(seeds[j+i*nSequences]), offsetRightQ[i][j]-offsetRightQ[i][j-1]);
seqptr = suffT[i] + offsetRightT[i][j-1];
reverse_copy(target[j+i*nSequences].c_str()+getEndPositionH(seeds[j+i*nSequences]),target[j+i*nSequences].c_str()+getEndPositionH(seeds[j+i*nSequences])+(offsetRightT[i][j]-offsetRightT[i][j-1]),seqptr);
}
auto end_setup_ithread = NOW;
duration<double> setup_ithread = end_setup_ithread - start_setup_ithread;
pergpustime[MYTHREAD] = setup_ithread.count();
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
//set gpu device
cudaSetDevice(i);
//create streams
cudaStreamCreateWithFlags(&stream_r[i],cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream_l[i],cudaStreamNonBlocking);
//allocate antidiagonals on the GPU
cudaErrchk(cudaMalloc(&ant_l[i], sizeof(short)*ant_len_left[i]*3*dim));
cudaErrchk(cudaMalloc(&ant_r[i], sizeof(short)*ant_len_right[i]*3*dim));
//allocate offsets on the GPU
cudaErrchk(cudaMalloc(&offsetLeftQ_d[i], dim*sizeof(int)));
cudaErrchk(cudaMalloc(&offsetLeftT_d[i], dim*sizeof(int)));
cudaErrchk(cudaMalloc(&offsetRightQ_d[i], dim*sizeof(int)));
cudaErrchk(cudaMalloc(&offsetRightT_d[i], dim*sizeof(int)));
//allocate results on the GPU
cudaErrchk(cudaMalloc(&scoreLeft_d[i], dim*sizeof(int)));
cudaErrchk(cudaMalloc(&scoreRight_d[i], dim*sizeof(int)));
//allocate seeds on the GPU
cudaErrchk(cudaMalloc(&seed_d_l[i], dim*sizeof(SeedL)));
cudaErrchk(cudaMalloc(&seed_d_r[i], dim*sizeof(SeedL)));
//allocate sequences on the GPU
cudaErrchk(cudaMalloc(&prefQ_d[i], totalLengthQPref[i]*sizeof(char)));
cudaErrchk(cudaMalloc(&prefT_d[i], totalLengthTPref[i]*sizeof(char)));
cudaErrchk(cudaMalloc(&suffQ_d[i], totalLengthQSuff[i]*sizeof(char)));
cudaErrchk(cudaMalloc(&suffT_d[i], totalLengthTSuff[i]*sizeof(char)));
//copy seeds to the GPU
cudaErrchk(cudaMemcpyAsync(seed_d_l[i], &seeds[0]+i*nSequences, dim*sizeof(SeedL), cudaMemcpyHostToDevice, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(seed_d_r[i], &seeds_r[0]+i*nSequences, dim*sizeof(SeedL), cudaMemcpyHostToDevice, stream_r[i]));
//copy offsets to the GPU
cudaErrchk(cudaMemcpyAsync(offsetLeftQ_d[i], &offsetLeftQ[i][0], dim*sizeof(int), cudaMemcpyHostToDevice, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(offsetLeftT_d[i], &offsetLeftT[i][0], dim*sizeof(int), cudaMemcpyHostToDevice, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(offsetRightQ_d[i], &offsetRightQ[i][0], dim*sizeof(int), cudaMemcpyHostToDevice, stream_r[i]));
cudaErrchk(cudaMemcpyAsync(offsetRightT_d[i], &offsetRightT[i][0], dim*sizeof(int), cudaMemcpyHostToDevice, stream_r[i]));
//copy sequences to the GPU
cudaErrchk(cudaMemcpyAsync(prefQ_d[i], prefQ[i], totalLengthQPref[i]*sizeof(char), cudaMemcpyHostToDevice, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(prefT_d[i], prefT[i], totalLengthTPref[i]*sizeof(char), cudaMemcpyHostToDevice, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(suffQ_d[i], suffQ[i], totalLengthQSuff[i]*sizeof(char), cudaMemcpyHostToDevice, stream_r[i]));
cudaErrchk(cudaMemcpyAsync(suffT_d[i], suffT[i], totalLengthTSuff[i]*sizeof(char), cudaMemcpyHostToDevice, stream_r[i]));
}
auto start_c = NOW;
//execute kernels
#pragma omp parallel for
for(int i = 0; i<ngpus;i++)
{
cudaSetDevice(i);
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
extendSeedLGappedXDropOneDirectionGlobal
<<<dim, n_threads, n_threads*sizeof(short), stream_l[i]>>> (
seed_d_l[i], prefQ_d[i], prefT_d[i], EXTEND_LEFTL, XDrop,
scoreLeft_d[i], offsetLeftQ_d[i], offsetLeftT_d[i],
ant_len_left[i], ant_l[i], n_threads);
extendSeedLGappedXDropOneDirectionGlobal
<<<dim, n_threads, n_threads*sizeof(short), stream_r[i]>>> (
seed_d_r[i], suffQ_d[i], suffT_d[i], EXTEND_RIGHTL, XDrop,
scoreRight_d[i], offsetRightQ_d[i], offsetRightT_d[i],
ant_len_right[i], ant_r[i], n_threads);
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
cudaSetDevice(i);
int dim = nSequences;
if(i==ngpus-1)
dim = nSequencesLast;
cudaErrchk(cudaMemcpyAsync(scoreLeft+i*nSequences, scoreLeft_d[i], dim*sizeof(int), cudaMemcpyDeviceToHost, stream_l[i]));
cudaErrchk(cudaMemcpyAsync(&seeds[0]+i*nSequences, seed_d_l[i], dim*sizeof(SeedL), cudaMemcpyDeviceToHost,stream_l[i]));
cudaErrchk(cudaMemcpyAsync(scoreRight+i*nSequences, scoreRight_d[i], dim*sizeof(int), cudaMemcpyDeviceToHost, stream_r[i]));
cudaErrchk(cudaMemcpyAsync(&seeds_r[0]+i*nSequences, seed_d_r[i], dim*sizeof(SeedL), cudaMemcpyDeviceToHost,stream_r[i]));
}
#pragma omp parallel for
for(int i = 0; i < ngpus; i++)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
auto end_c = NOW;
duration<double> compute = end_c-start_c;
std::cout << "Device only time [seconds]:\t" << compute.count() << std::endl;
cudaErrchk(cudaPeekAtLastError());
#pragma omp parallel for
for(int i = 0; i < ngpus; i++){
cudaSetDevice(i);
cudaStreamDestroy(stream_l[i]);
cudaStreamDestroy(stream_r[i]);
free(prefQ[i]);
free(prefT[i]);
free(suffQ[i]);
free(suffT[i]);
cudaErrchk(cudaFree(prefQ_d[i]));
cudaErrchk(cudaFree(prefT_d[i]));
cudaErrchk(cudaFree(suffQ_d[i]));
cudaErrchk(cudaFree(suffT_d[i]));
cudaErrchk(cudaFree(offsetLeftQ_d[i]));
cudaErrchk(cudaFree(offsetLeftT_d[i]));
cudaErrchk(cudaFree(offsetRightQ_d[i]));
cudaErrchk(cudaFree(offsetRightT_d[i]));
cudaErrchk(cudaFree(seed_d_l[i]));
cudaErrchk(cudaFree(seed_d_r[i]));
cudaErrchk(cudaFree(scoreLeft_d[i]));
cudaErrchk(cudaFree(scoreRight_d[i]));
cudaErrchk(cudaFree(ant_l[i]));
cudaErrchk(cudaFree(ant_r[i]));
}
for(int i = 0; i < numAlignments; i++){
res[i] = scoreLeft[i]+scoreRight[i]+kmer_length;
setEndPositionH(seeds[i], getEndPositionH(seeds_r[i]));
setEndPositionV(seeds[i], getEndPositionV(seeds_r[i]));
std::cout << res[i] << std::endl;
}
free(scoreLeft);
free(scoreRight);
}
|
bd4fac552e47822bb49f2522b6eb6ef4f81858d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addOneColumnPerThread(double* a, double* b, double* c, int n)
{
// Get the column for current thread
int column = (blockIdx.x * blockDim.x + threadIdx.x);
// Make sure we do not go out of bounds
if (column < n)
{
for (int i = 0; i < n; i++)
{
c[i * n + column] = a[i * n + column] + b[i * n + column];
}
}
} | bd4fac552e47822bb49f2522b6eb6ef4f81858d9.cu | #include "includes.h"
__global__ void addOneColumnPerThread(double* a, double* b, double* c, int n)
{
// Get the column for current thread
int column = (blockIdx.x * blockDim.x + threadIdx.x);
// Make sure we do not go out of bounds
if (column < n)
{
for (int i = 0; i < n; i++)
{
c[i * n + column] = a[i * n + column] + b[i * n + column];
}
}
} |
1d877f988ff167df5f95b2c6009ef20bbdc35701.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/data.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter_hip.cuh"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
auto bin_idx = accessor.SearchBin(e.value, e.column_idx);
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Change the value type of thrust discard iterator so we can use it with cub
template <typename T>
class TypedDiscard : public thrust::discard_iterator<T> {
public:
using value_type = T; // NOLINT
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT& batch, EllpackPageImpl* dst,
int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ellpack matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, is_valid);
TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into hipcub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(device_idx, row_stride * dst->n_rows, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device,
bool is_dense, int nthread,
common::Span<size_t> row_counts_span,
size_t row_stride, size_t n_rows, size_t n_cols,
common::HistogramCuts const& cuts) {
dh::safe_cuda(hipSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, \
bool is_dense, int nthread, \
common::Span<size_t> row_counts_span, \
size_t row_stride, size_t n_rows, size_t n_cols, \
common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl* dst, EllpackPageImpl* src, size_t offset)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl* page, size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(device, num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(device, page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
row_batch.base_rowid + batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(int device) const {
gidx_buffer.SetDevice(device);
return EllpackDeviceAccessor(
device, cuts_, is_dense, row_stride, base_rowid, n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()));
}
} // namespace xgboost
| 1d877f988ff167df5f95b2c6009ef20bbdc35701.cu | /*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/data.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter.cuh"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
auto bin_idx = accessor.SearchBin(e.value, e.column_idx);
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Change the value type of thrust discard iterator so we can use it with cub
template <typename T>
class TypedDiscard : public thrust::discard_iterator<T> {
public:
using value_type = T; // NOLINT
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT& batch, EllpackPageImpl* dst,
int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ellpack matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, is_valid);
TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into cub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(device_idx, row_stride * dst->n_rows, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device,
bool is_dense, int nthread,
common::Span<size_t> row_counts_span,
size_t row_stride, size_t n_rows, size_t n_cols,
common::HistogramCuts const& cuts) {
dh::safe_cuda(cudaSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, \
bool is_dense, int nthread, \
common::Span<size_t> row_counts_span, \
size_t row_stride, size_t n_rows, size_t n_cols, \
common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl* dst, EllpackPageImpl* src, size_t offset)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl* page, size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(device, num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(device, page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
std::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
row_batch.base_rowid + batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(int device) const {
gidx_buffer.SetDevice(device);
return EllpackDeviceAccessor(
device, cuts_, is_dense, row_stride, base_rowid, n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()));
}
} // namespace xgboost
|
ea5cb608d6d2b91bdcaf78b0532f08edf89aa6c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <opencv2/cudafilters.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/core/cvstd.hpp>
#include "support.h"
#define NUM_STREAMS 100
int main(int argc, char* argv[])
{
time_t t;
srand((unsigned) time(&t));
string baseDir = "/home/headleyjz/captcha_data/captchas";
vector<string> files = getFiles(baseDir);
Mat::setDefaultAllocator(cuda::HostMem::getAllocator());
vector<Mat> h_images = getImages(baseDir, files);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
hipEventRecord(start);
cuda::Stream* streams = (cuda::Stream*) malloc(sizeof(cuda::Stream) * NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++)
{
streams[i] = cuda::Stream();
}
cuda::GpuMat* d_images = (cuda::GpuMat*) malloc(sizeof(cuda::GpuMat) * h_images.size());
for (int i = 0; i < 99; i++)
{
int streamAssignment = i % NUM_STREAMS;
printf("image %i assigned to stream %i\n", i, streamAssignment);
d_images[i].upload(h_images[i], streams[streamAssignment]);
}
/* try
{
cuda::GpuMat d_thresh, d_blur, d_erosion_1, d_erosion_2, d_dilation, d_output_thresh;
cuda::GpuMat d_src(h_images[0]);
Mat h_dst;
// d_src.upload(images[0]);
cuda::threshold(d_src, d_thresh, 125, 255, THRESH_BINARY_INV);
Ptr<cuda::Filter> median = cuda::createMedianFilter(d_src.type(), 3);
median->apply(d_thresh, d_blur);
Mat kernel = getStructuringElement(MORPH_RECT, Size(2, 3));
Ptr<cuda::Filter> erosion_1 = cuda::createMorphologyFilter(MORPH_ERODE, d_src.type(), kernel, Point(-1, -1), 1);
erosion_1->apply(d_blur, d_erosion_1);
kernel = getStructuringElement(MORPH_RECT, Size(3, 1));
Ptr<cuda::Filter> erosion_2 = cuda::createMorphologyFilter(MORPH_ERODE, d_src.type(), kernel, Point(-1, -1), 1);
erosion_2->apply(d_erosion_1, d_erosion_2);
kernel = getStructuringElement(MORPH_RECT, Size(2, 2));
Ptr<cuda::Filter> dilation = cuda::createMorphologyFilter(MORPH_DILATE, d_src.type(), kernel, Point(-1, -1), 1);
dilation->apply(d_erosion_2, d_dilation);
// cuda::threshold(d_dilation, d_output_thresh, 125, 255, THRESH_BINARY_INV);
d_dilation.download(h_dst);
vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
imwrite("result.jpg", h_dst, compression_params);
} catch (const cv::Exception& ex)
{
std::cout << "Error: " << ex.what() << std::endl;
}*/
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
hipError_t hipError_t = hipGetLastError();
if (hipError_t != hipSuccess)
{
fprintf(stderr, "hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
exit(EXIT_FAILURE);
}
printf("GPU time to process the images %f ms\n", milliseconds);
return 0;
}
| ea5cb608d6d2b91bdcaf78b0532f08edf89aa6c9.cu | #include <stdio.h>
#include <opencv2/cudafilters.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/core/cvstd.hpp>
#include "support.h"
#define NUM_STREAMS 100
int main(int argc, char* argv[])
{
time_t t;
srand((unsigned) time(&t));
string baseDir = "/home/headleyjz/captcha_data/captchas";
vector<string> files = getFiles(baseDir);
Mat::setDefaultAllocator(cuda::HostMem::getAllocator());
vector<Mat> h_images = getImages(baseDir, files);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start);
cuda::Stream* streams = (cuda::Stream*) malloc(sizeof(cuda::Stream) * NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++)
{
streams[i] = cuda::Stream();
}
cuda::GpuMat* d_images = (cuda::GpuMat*) malloc(sizeof(cuda::GpuMat) * h_images.size());
for (int i = 0; i < 99; i++)
{
int streamAssignment = i % NUM_STREAMS;
printf("image %i assigned to stream %i\n", i, streamAssignment);
d_images[i].upload(h_images[i], streams[streamAssignment]);
}
/* try
{
cuda::GpuMat d_thresh, d_blur, d_erosion_1, d_erosion_2, d_dilation, d_output_thresh;
cuda::GpuMat d_src(h_images[0]);
Mat h_dst;
// d_src.upload(images[0]);
cuda::threshold(d_src, d_thresh, 125, 255, THRESH_BINARY_INV);
Ptr<cuda::Filter> median = cuda::createMedianFilter(d_src.type(), 3);
median->apply(d_thresh, d_blur);
Mat kernel = getStructuringElement(MORPH_RECT, Size(2, 3));
Ptr<cuda::Filter> erosion_1 = cuda::createMorphologyFilter(MORPH_ERODE, d_src.type(), kernel, Point(-1, -1), 1);
erosion_1->apply(d_blur, d_erosion_1);
kernel = getStructuringElement(MORPH_RECT, Size(3, 1));
Ptr<cuda::Filter> erosion_2 = cuda::createMorphologyFilter(MORPH_ERODE, d_src.type(), kernel, Point(-1, -1), 1);
erosion_2->apply(d_erosion_1, d_erosion_2);
kernel = getStructuringElement(MORPH_RECT, Size(2, 2));
Ptr<cuda::Filter> dilation = cuda::createMorphologyFilter(MORPH_DILATE, d_src.type(), kernel, Point(-1, -1), 1);
dilation->apply(d_erosion_2, d_dilation);
// cuda::threshold(d_dilation, d_output_thresh, 125, 255, THRESH_BINARY_INV);
d_dilation.download(h_dst);
vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
imwrite("result.jpg", h_dst, compression_params);
} catch (const cv::Exception& ex)
{
std::cout << "Error: " << ex.what() << std::endl;
}*/
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cudaError_t cudaError = cudaGetLastError();
if (cudaError != cudaSuccess)
{
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
printf("GPU time to process the images %f ms\n", milliseconds);
return 0;
}
|
d90295df1d4028ecd78b802c05acb18475b0c834.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_align_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
const Dtype * offset_bottom_data = bottom_data + (roi_batch_ind*channels+c)*height*width;
Dtype roi_start_w = max(0.0, bottom_rois[1]); roi_start_w *= spatial_scale;
Dtype roi_start_h = max(0.0, bottom_rois[2] );roi_start_h*= spatial_scale;
Dtype roi_end_w = min(bottom_rois[3], static_cast<Dtype>(width)); roi_end_w*= spatial_scale;
Dtype roi_end_h = min(bottom_rois[4],static_cast<Dtype>(height)); roi_end_h*= spatial_scale;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype h_cur = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype w_cur = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
int hend = ceil(h_cur - 0.5);
//int pwstart = floor(pw_cur);
int wend = ceil(w_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
hend = min(max(static_cast<Dtype>(hend), 1.0), static_cast<Dtype>(height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
wend = min(max(static_cast<Dtype>(wend), 1.0), static_cast<Dtype>(width - 1.0));
int hstart = hend - 1;
int wstart = wend - 1;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
Dtype u = h_cur - hstart - 0.5 ;
Dtype v = w_cur - wstart - 0.5 ;
top_data[index] = (1 - u)*(1 - v)*offset_bottom_data[bottom_index1] + (1 - u)*(v)*offset_bottom_data[bottom_index2]+(u)*(1 - v)*offset_bottom_data[bottom_index3] + u*v*offset_bottom_data[bottom_index4];
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype*bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype roi_start_w = max(0.0, bottom_rois[1]); roi_start_w *= spatial_scale;
Dtype roi_start_h = max(0.0,bottom_rois[2] );roi_start_h*= spatial_scale;
Dtype roi_end_w = min(bottom_rois[3], static_cast<Dtype>(width)); roi_end_w*= spatial_scale;
Dtype roi_end_h = min(bottom_rois[4],static_cast<Dtype>(height)); roi_end_h*= spatial_scale;
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
Dtype ph_cur = static_cast<Dtype>(h - roi_start_h) / bin_size_h;
Dtype pw_cur = static_cast<Dtype>(w - roi_start_w) / bin_size_w;
//int phstart = floor(ph_cur);
int phend = ceil(ph_cur - 0.5);
//int pwstart = floor(pw_cur);
int pwend = ceil(pw_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
phend = min(max(static_cast<Dtype>(phend), 1.0), static_cast<Dtype>(pooled_height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
pwend = min(max(static_cast<Dtype>(pwend), 0.0), static_cast<Dtype>(pooled_width - 1.0));
int phstart = phend - 1;
int pwstart = pwend - 1;
int top_index1 = phstart* pooled_width + pwstart;
int top_index2 = phstart* pooled_width + pwend;
int top_index3 = phend* pooled_width + pwstart;
int top_index4 = phend* pooled_width + pwend;
Dtype u = ph_cur - phstart - 0.5 ;
Dtype v = pw_cur - pwstart - 0.5 ;
bottom_diff[index] += (1 - u)*(1 - v)*offset_top_diff[top_index1] + (1 - u)*(v)*offset_top_diff[top_index2]+(u)*(1 - v)*offset_top_diff[top_index3] + u*v*offset_top_diff[top_index4];
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
| d90295df1d4028ecd78b802c05acb18475b0c834.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_align_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
const Dtype * offset_bottom_data = bottom_data + (roi_batch_ind*channels+c)*height*width;
Dtype roi_start_w = max(0.0, bottom_rois[1]); roi_start_w *= spatial_scale;
Dtype roi_start_h = max(0.0, bottom_rois[2] );roi_start_h*= spatial_scale;
Dtype roi_end_w = min(bottom_rois[3], static_cast<Dtype>(width)); roi_end_w*= spatial_scale;
Dtype roi_end_h = min(bottom_rois[4],static_cast<Dtype>(height)); roi_end_h*= spatial_scale;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype h_cur = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype w_cur = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
int hend = ceil(h_cur - 0.5);
//int pwstart = floor(pw_cur);
int wend = ceil(w_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
hend = min(max(static_cast<Dtype>(hend), 1.0), static_cast<Dtype>(height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
wend = min(max(static_cast<Dtype>(wend), 1.0), static_cast<Dtype>(width - 1.0));
int hstart = hend - 1;
int wstart = wend - 1;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
Dtype u = h_cur - hstart - 0.5 ;
Dtype v = w_cur - wstart - 0.5 ;
top_data[index] = (1 - u)*(1 - v)*offset_bottom_data[bottom_index1] + (1 - u)*(v)*offset_bottom_data[bottom_index2]+(u)*(1 - v)*offset_bottom_data[bottom_index3] + u*v*offset_bottom_data[bottom_index4];
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype*bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype roi_start_w = max(0.0, bottom_rois[1]); roi_start_w *= spatial_scale;
Dtype roi_start_h = max(0.0,bottom_rois[2] );roi_start_h*= spatial_scale;
Dtype roi_end_w = min(bottom_rois[3], static_cast<Dtype>(width)); roi_end_w*= spatial_scale;
Dtype roi_end_h = min(bottom_rois[4],static_cast<Dtype>(height)); roi_end_h*= spatial_scale;
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
Dtype ph_cur = static_cast<Dtype>(h - roi_start_h) / bin_size_h;
Dtype pw_cur = static_cast<Dtype>(w - roi_start_w) / bin_size_w;
//int phstart = floor(ph_cur);
int phend = ceil(ph_cur - 0.5);
//int pwstart = floor(pw_cur);
int pwend = ceil(pw_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
phend = min(max(static_cast<Dtype>(phend), 1.0), static_cast<Dtype>(pooled_height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
pwend = min(max(static_cast<Dtype>(pwend), 0.0), static_cast<Dtype>(pooled_width - 1.0));
int phstart = phend - 1;
int pwstart = pwend - 1;
int top_index1 = phstart* pooled_width + pwstart;
int top_index2 = phstart* pooled_width + pwend;
int top_index3 = phend* pooled_width + pwstart;
int top_index4 = phend* pooled_width + pwend;
Dtype u = ph_cur - phstart - 0.5 ;
Dtype v = pw_cur - pwstart - 0.5 ;
bottom_diff[index] += (1 - u)*(1 - v)*offset_top_diff[top_index1] + (1 - u)*(v)*offset_top_diff[top_index2]+(u)*(1 - v)*offset_top_diff[top_index3] + u*v*offset_top_diff[top_index4];
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
|
84a16a45f05af6e784216ab793c9643c8af7ea6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void init5(int* arr, int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n*n)
arr[idx]=5;
}
__global__ void init7(int* arr,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx<n && idy<n)
arr[idy*n+idx]=7;
}
__host__ void printMtrx(int* const mtrx,const int n){
for(int i=0;i<n;i++){
for(int j=0;j<n;j++)
printf("%d ",mtrx[i*n+j]);
printf("\n");
}
printf("\n");
}
__host__ int main(int argc,char* argv[]){
const int n = 5;
size_t bytes=sizeof(int)*n*n;
int* cuMtrx=NULL;
hipMalloc((void**)&cuMtrx,bytes);
int* mt1=(int*)malloc(bytes);
memset(mt1,0,bytes);
hipMemcpy(cuMtrx,mt1,bytes,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( init5), dim3(n*n),dim3(1), 0, 0, cuMtrx,n);
hipMemcpy(mt1,cuMtrx,bytes,hipMemcpyDeviceToHost);
printMtrx(mt1,n);
dim3 grid(n,n,1);
hipLaunchKernelGGL(( init7), dim3(grid),dim3(1), 0, 0, cuMtrx,n);
hipMemcpy(mt1,cuMtrx,bytes,hipMemcpyDeviceToHost);
printMtrx(mt1,n);
hipFree(cuMtrx);
return 0;
}
| 84a16a45f05af6e784216ab793c9643c8af7ea6a.cu | #include <cstdio>
__global__ void init5(int* arr, int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n*n)
arr[idx]=5;
}
__global__ void init7(int* arr,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx<n && idy<n)
arr[idy*n+idx]=7;
}
__host__ void printMtrx(int* const mtrx,const int n){
for(int i=0;i<n;i++){
for(int j=0;j<n;j++)
printf("%d ",mtrx[i*n+j]);
printf("\n");
}
printf("\n");
}
__host__ int main(int argc,char* argv[]){
const int n = 5;
size_t bytes=sizeof(int)*n*n;
int* cuMtrx=NULL;
cudaMalloc((void**)&cuMtrx,bytes);
int* mt1=(int*)malloc(bytes);
memset(mt1,0,bytes);
cudaMemcpy(cuMtrx,mt1,bytes,cudaMemcpyHostToDevice);
init5<<<n*n,1>>>(cuMtrx,n);
cudaMemcpy(mt1,cuMtrx,bytes,cudaMemcpyDeviceToHost);
printMtrx(mt1,n);
dim3 grid(n,n,1);
init7<<<grid,1>>>(cuMtrx,n);
cudaMemcpy(mt1,cuMtrx,bytes,cudaMemcpyDeviceToHost);
printMtrx(mt1,n);
cudaFree(cuMtrx);
return 0;
}
|
2483ea730b73a42811b219b207d3d51bc2b96414.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include "device_launch_parameters.h"
const int DSIZE =8192;
const int block_size =32;
const float A_val = 3.0f;
const float B_val = 2.0f;
//matrix addition in CUDA
__global__ void madd(const float *A, const float *B, const float *C, int ds ){
int idx = threadIDx.x + blockDim.x*blockIDx.x;
int idy = threadIDx.y + blockDim.y*blockIDx.y;
int idnx = ds* idy + idx;
if (idx < ds && idy < ds){
C[idnx] = A[idnx]+ B[idnx];
}
}
int main(){
float *h_A , *h_B, *h_C, *d_A, *d_B, *d_C ;
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for(int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;
}
hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
//cudaCheckErrors("CudaMalloc faliure");
hipMemcpy(d_A, h_A,DSIZE*DSIZE*sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy(d_B, h_B,DSIZE*DSIZE*sizeof(float), hipMemcpyDeviceToHost );
//cudaCheckErrors("CudaMemcpy H2D faliure");
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
hipLaunchKernelGGL(( madd), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, DSIZE);
//cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
hipMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), hipMemcpyDeviceToHost);
}
}
| 2483ea730b73a42811b219b207d3d51bc2b96414.cu | #include<stdio.h>
#include "device_launch_parameters.h"
const int DSIZE =8192;
const int block_size =32;
const float A_val = 3.0f;
const float B_val = 2.0f;
//matrix addition in CUDA
__global__ void madd(const float *A, const float *B, const float *C, int ds ){
int idx = threadIDx.x + blockDim.x*blockIDx.x;
int idy = threadIDx.y + blockDim.y*blockIDx.y;
int idnx = ds* idy + idx;
if (idx < ds && idy < ds){
C[idnx] = A[idnx]+ B[idnx];
}
}
int main(){
float *h_A , *h_B, *h_C, *d_A, *d_B, *d_C ;
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for(int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;
}
cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
//cudaCheckErrors("CudaMalloc faliure");
cudaMemcpy(d_A, h_A,DSIZE*DSIZE*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy(d_B, h_B,DSIZE*DSIZE*sizeof(float), cudaMemcpyDeviceToHost );
//cudaCheckErrors("CudaMemcpy H2D faliure");
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
madd<<<grid, block>>>(d_A, d_B, d_C, DSIZE);
//cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
cudaMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
}
}
|
92a02d0066ed3662279ff9b4c89ece44be78d39d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgetf2.cu, normal z -> c, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#define cgeru_bs 512 // 512 is max threads for 1.x cards
void magma_cgetf2_swap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_cswap?
#define cswap_bs 64
/******************************************************************************/
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_cgetf2_swap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* cswap two row vectors: ith and jth */
dim3 threads( cswap_bs );
dim3 grid( magma_ceildiv( n, cswap_bs ) );
hipLaunchKernelGGL(( kernel_cswap)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges cscal and cgeru
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( cgeru_bs );
dim3 grid( magma_ceildiv( m, cgeru_bs ) );
size_t shared_size = sizeof(magmaFloatComplex)*(n);
hipLaunchKernelGGL(( kernel_cscal_cgeru)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, n, dA, ldda);
}
| 92a02d0066ed3662279ff9b4c89ece44be78d39d.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgetf2.cu, normal z -> c, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#define cgeru_bs 512 // 512 is max threads for 1.x cards
void magma_cgetf2_swap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_cswap?
#define cswap_bs 64
/******************************************************************************/
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_cgetf2_swap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* cswap two row vectors: ith and jth */
dim3 threads( cswap_bs );
dim3 grid( magma_ceildiv( n, cswap_bs ) );
kernel_cswap
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges cscal and cgeru
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( cgeru_bs );
dim3 grid( magma_ceildiv( m, cgeru_bs ) );
size_t shared_size = sizeof(magmaFloatComplex)*(n);
kernel_cscal_cgeru
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(m, n, dA, ldda);
}
|
c69445f71262ac839300b9d944c8c1c90ac3848c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ImageProcessing.cuh"
__global__ void convertToGrayscale(unsigned char *gray, unsigned char *r, unsigned char *g, unsigned char *b, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension)
gray[index] = 1 / 3.0 * (r[index] + g[index] + b[index]);
}
__global__ void getHistrogram(unsigned int *histogram, unsigned char *image, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension) {
int color = image[index];
atomicAdd(&histogram[color], 1);
}
}
__global__ void getNormalizedHistogram(double *norm_histogram, unsigned int* histogram, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < LIMIT + 1) {
norm_histogram[index] = (double)histogram[index] / dimension;
}
}
__global__ void histogramEqualization(unsigned char *eq_image, unsigned char* image, double *cumulative_sum, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension) {
if (floor(LIMIT * cumulative_sum[image[index]]) < LIMIT || floor(LIMIT * cumulative_sum[image[index]]) > 0)
eq_image[index] = floor(LIMIT * cumulative_sum[image[index]]);
else
eq_image[index] = image[index];
}
}
// Exclusive scan on CUDA.
__global__ void exclusiveScanGPU(double *d_array, double *d_result, int N, double *d_aux) {
extern __shared__ double temp[];
int realIndex = 2 * threadIdx.x + blockDim.x * 2 * blockIdx.x;
int threadIndex = threadIdx.x;
int index = 2 * threadIndex;
int offset = 1;
// Copy from the array to shared memory.
temp[index] = d_array[realIndex];
temp[index + 1] = d_array[realIndex + 1];
// Reduce by storing the intermediate values. The last element will be
// the sum of n-1 elements.
for (int d = blockDim.x; d > 0; d = d / 2) {
__syncthreads();
// Regulates the amount of threads operating.
if (threadIndex < d)
{
// Swap the numbers
int current = offset * (index + 1) - 1;
int next = offset * (index + 2) - 1;
temp[next] += temp[current];
}
// Increase the offset by multiple of 2.
offset *= 2;
}
// Only one thread performs this.
if (threadIndex == 0) {
// Store the sum to the auxiliary array.
if (d_aux) {
d_aux[blockIdx.x] = temp[N - 1];
}
// Reset the last element with identity. Only the first thread will do
// the job.
temp[N - 1] = 0;
}
// Down sweep to build scan.
for (int d = 1; d < blockDim.x * 2; d *= 2) {
// Reduce the offset by division of 2.
offset = offset / 2;
__syncthreads();
if (threadIndex < d)
{
int current = offset * (index + 1) - 1;
int next = offset * (index + 2) - 1;
// Swap
double tempCurrent = temp[current];
temp[current] = temp[next];
temp[next] += tempCurrent;
}
}
__syncthreads();
d_result[realIndex] = temp[index]; // write results to device memory
d_result[realIndex + 1] = temp[index + 1];
}
__global__ void sobelFilter(unsigned char * image, unsigned char * filtered_image, int height, int width) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
double dx, dy;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
dx = (-1 * image[(y - 1) * width + (x - 1)]) + (-2 * image[y * width + (x - 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) +
(image[(y - 1) * width + (x + 1)]) + (2 * image[y * width + (x + 1)]) + (image[(y + 1) * width + (x + 1)]);
dy = (image[(y - 1) * width + (x - 1)]) + (2 * image[(y - 1) * width + x]) + (image[(y - 1) * width + (x + 1)]) +
(-1 * image[(y + 1) * width + (x - 1)]) + (-2 * image[(y + 1) * width + x]) + (-1 * image[(y + 1) * width + (x + 1)]);
filtered_image[y * width + x] = sqrt(dx * dx + dy * dy);
}
}
__global__ void gaussianBlur(unsigned char *image, unsigned char *output_image, int width, int height, const int* const kernel, const int dim_kernel, int sum_of_elements){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
float partial_sum = 0.0;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
for (int row = 0; row < dim_kernel; row++) {
for (int col = 0; col < dim_kernel; col++) {
int index_image_x = x + col - dim_kernel / 2;
int index_image_y = y + row - dim_kernel / 2;
index_image_x = min(max(index_image_x, 0), width - 1);
index_image_y = min(max(index_image_y, 0), height - 1);
partial_sum += kernel[row * dim_kernel + col] * image[index_image_y * width + index_image_x];
}
}
output_image[y * width + x] = int((float)partial_sum / sum_of_elements);
}
}
__global__ void binaryThreshold(unsigned char * image, unsigned char * output_image, int width, int height, int threshold) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
if (image[y * width + x] < threshold)
output_image[y * width + x] = 0;
else
output_image[y * width + x] = 255;
}
}
__device__ int min_int(int a, int b) {
return a <= b ? a : b;
}
__device__ int max_int(int a, int b) {
return a >= b ? a : b;
}
hipError_t bgrToGrayscale(unsigned char *gray, Mat image_rgb, unsigned int size)
{
// Host input vectors.
unsigned char *red = new unsigned char[size];
unsigned char *green = new unsigned char[size];
unsigned char *blue = new unsigned char[size];
// Init vectors with rgb values.
for (int y = 0; y < image_rgb.rows; ++y) {
for (int x = 0; x < image_rgb.cols; ++x) {
blue[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 0];
green[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 1];
red[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 2];
}
}
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
// Device input vectors.
unsigned char *d_red;
unsigned char *d_green;
unsigned char *d_blue;
unsigned char *d_gray;
// Allocate GPU buffers.
cudaStatus = hipMalloc(&d_red, size * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_green, size * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_blue, size * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_gray, size * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers
cudaStatus = hipMemcpy(d_red, red, size * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_green, green, size * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_blue, blue, size * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
int no_threads = 1024;
int no_blocks = (int)ceil((float)size / no_threads);
convertToGrayscale << <no_blocks, no_threads >> > (d_gray, d_red, d_green, d_blue, size);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "convert_to_grayscale launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(gray, d_gray, size * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_red);
hipFree(d_green);
hipFree(d_blue);
hipFree(d_gray);
delete[] red;
delete[] green;
delete[] blue;
return cudaStatus;
}
hipError_t getHistogramN(double *cumulativeSumHistogram, double *norm_histogram, unsigned int *histogram, unsigned char *grayScaleImage, int size) {
hipError_t cudaStatus;
unsigned int *d_histogram;
unsigned char *d_gray_scale_image;
double *d_norm_histogram;
double *d_cumulative_sum;
double *d_aux_for_cumulative_sum;
// Threads size
int threads = 256;
int N = 256; // Size of the array.
int blocks = N / threads + ((N%threads == 0) ? 0 : 1);
// Perform on CUDA.
const dim3 blockSize(threads / 2, 1, 1);
const dim3 gridSize(blocks, 1, 1);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc(&d_aux_for_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_norm_histogram, (LIMIT + 1) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_histogram, (LIMIT + 1) * sizeof(unsigned int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_gray_scale_image, size * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_gray_scale_image, grayScaleImage, size * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemset(d_histogram, 0, LIMIT + 1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
goto Error;
}
cudaStatus = hipMemset(d_norm_histogram, 0, LIMIT + 1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
goto Error;
}
cudaStatus = hipMemset(d_cumulative_sum, 0, LIMIT + 1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
goto Error;
}
cudaStatus = hipMemset(d_aux_for_cumulative_sum, 0, LIMIT + 1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
goto Error;
}
int no_threads = 1024;
int no_blocks = (int)ceil((float)size / no_threads);
getHistrogram << <no_blocks, no_threads >> > (d_histogram, d_gray_scale_image, size);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching getHistrogram!\n", cudaStatus);
goto Error;
}
getNormalizedHistogram << <1, 256 >> > (d_norm_histogram, d_histogram, size);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching getNormalizedHistogram!\n", cudaStatus);
goto Error;
}
exclusiveScanGPU << < gridSize, blockSize, blocks * threads * sizeof(double) >> > (d_norm_histogram, d_cumulative_sum, N, d_aux_for_cumulative_sum);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching exclusiveScanGPU!\n", cudaStatus);
goto Error;
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "exclusiveScanGPU launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(histogram, d_histogram, (LIMIT + 1) * sizeof(unsigned int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(norm_histogram, d_norm_histogram, (LIMIT + 1) * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cumulativeSumHistogram, d_cumulative_sum, N * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_gray_scale_image);
hipFree(d_histogram);
hipFree(d_norm_histogram);
hipFree(d_cumulative_sum);
hipFree(d_aux_for_cumulative_sum);
return cudaStatus;
}
hipError_t doHistogramEqualization(unsigned char *eq_image, unsigned char *image, double *cumulative_sum, int dimension) {
hipError_t cudaStatus;
unsigned char *d_eq_image;
unsigned char *d_image;
double *d_cumulative_sum;
int no_thread = 1024;
int no_block = (int)ceil((float)dimension / no_thread);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc(&d_eq_image, dimension * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_image, dimension * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_image, image, dimension * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_cumulative_sum, cumulative_sum, (LIMIT + 1) * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
histogramEqualization << <no_block, no_thread >> > (d_eq_image, d_image, d_cumulative_sum, dimension);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "histogramEqualization launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching histogramEqualization!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(eq_image, d_eq_image, dimension * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_cumulative_sum);
hipFree(d_eq_image);
hipFree(d_image);
return cudaStatus;
}
hipError_t applySobelFilter(unsigned char *image, unsigned char *filtered_image, int width, int height) {
hipError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
sobelFilter << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, height, width);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "sobelFilter launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching sobelFilter!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_filtered_image);
hipFree(d_image);
return cudaStatus;
}
hipError_t applyGaussianFilter(unsigned char *image, unsigned char *filtered_image, int width, int height, const int dim_kernel) {
int kernel[25] = {
1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1
};
hipError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
int *d_kernel;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_kernel, dim_kernel * dim_kernel * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_kernel, kernel, dim_kernel * dim_kernel * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
gaussianBlur << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, d_kernel, dim_kernel, 256);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "gaussianBlur launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching gaussianBlur!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_filtered_image);
hipFree(d_image);
hipFree(d_kernel);
return cudaStatus;
}
hipError_t applyBinaryThreshold(unsigned char * image, unsigned char * filtered_image, int width, int height, const int threshold)
{
hipError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
binaryThreshold << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, threshold);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "binaryThreshold launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching binaryThreshold!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_filtered_image);
hipFree(d_image);
return cudaStatus;
}
| c69445f71262ac839300b9d944c8c1c90ac3848c.cu | #include "ImageProcessing.cuh"
__global__ void convertToGrayscale(unsigned char *gray, unsigned char *r, unsigned char *g, unsigned char *b, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension)
gray[index] = 1 / 3.0 * (r[index] + g[index] + b[index]);
}
__global__ void getHistrogram(unsigned int *histogram, unsigned char *image, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension) {
int color = image[index];
atomicAdd(&histogram[color], 1);
}
}
__global__ void getNormalizedHistogram(double *norm_histogram, unsigned int* histogram, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < LIMIT + 1) {
norm_histogram[index] = (double)histogram[index] / dimension;
}
}
__global__ void histogramEqualization(unsigned char *eq_image, unsigned char* image, double *cumulative_sum, int dimension) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dimension) {
if (floor(LIMIT * cumulative_sum[image[index]]) < LIMIT || floor(LIMIT * cumulative_sum[image[index]]) > 0)
eq_image[index] = floor(LIMIT * cumulative_sum[image[index]]);
else
eq_image[index] = image[index];
}
}
// Exclusive scan on CUDA.
__global__ void exclusiveScanGPU(double *d_array, double *d_result, int N, double *d_aux) {
extern __shared__ double temp[];
int realIndex = 2 * threadIdx.x + blockDim.x * 2 * blockIdx.x;
int threadIndex = threadIdx.x;
int index = 2 * threadIndex;
int offset = 1;
// Copy from the array to shared memory.
temp[index] = d_array[realIndex];
temp[index + 1] = d_array[realIndex + 1];
// Reduce by storing the intermediate values. The last element will be
// the sum of n-1 elements.
for (int d = blockDim.x; d > 0; d = d / 2) {
__syncthreads();
// Regulates the amount of threads operating.
if (threadIndex < d)
{
// Swap the numbers
int current = offset * (index + 1) - 1;
int next = offset * (index + 2) - 1;
temp[next] += temp[current];
}
// Increase the offset by multiple of 2.
offset *= 2;
}
// Only one thread performs this.
if (threadIndex == 0) {
// Store the sum to the auxiliary array.
if (d_aux) {
d_aux[blockIdx.x] = temp[N - 1];
}
// Reset the last element with identity. Only the first thread will do
// the job.
temp[N - 1] = 0;
}
// Down sweep to build scan.
for (int d = 1; d < blockDim.x * 2; d *= 2) {
// Reduce the offset by division of 2.
offset = offset / 2;
__syncthreads();
if (threadIndex < d)
{
int current = offset * (index + 1) - 1;
int next = offset * (index + 2) - 1;
// Swap
double tempCurrent = temp[current];
temp[current] = temp[next];
temp[next] += tempCurrent;
}
}
__syncthreads();
d_result[realIndex] = temp[index]; // write results to device memory
d_result[realIndex + 1] = temp[index + 1];
}
__global__ void sobelFilter(unsigned char * image, unsigned char * filtered_image, int height, int width) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
double dx, dy;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
dx = (-1 * image[(y - 1) * width + (x - 1)]) + (-2 * image[y * width + (x - 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) +
(image[(y - 1) * width + (x + 1)]) + (2 * image[y * width + (x + 1)]) + (image[(y + 1) * width + (x + 1)]);
dy = (image[(y - 1) * width + (x - 1)]) + (2 * image[(y - 1) * width + x]) + (image[(y - 1) * width + (x + 1)]) +
(-1 * image[(y + 1) * width + (x - 1)]) + (-2 * image[(y + 1) * width + x]) + (-1 * image[(y + 1) * width + (x + 1)]);
filtered_image[y * width + x] = sqrt(dx * dx + dy * dy);
}
}
__global__ void gaussianBlur(unsigned char *image, unsigned char *output_image, int width, int height, const int* const kernel, const int dim_kernel, int sum_of_elements){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
float partial_sum = 0.0;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
for (int row = 0; row < dim_kernel; row++) {
for (int col = 0; col < dim_kernel; col++) {
int index_image_x = x + col - dim_kernel / 2;
int index_image_y = y + row - dim_kernel / 2;
index_image_x = min(max(index_image_x, 0), width - 1);
index_image_y = min(max(index_image_y, 0), height - 1);
partial_sum += kernel[row * dim_kernel + col] * image[index_image_y * width + index_image_x];
}
}
output_image[y * width + x] = int((float)partial_sum / sum_of_elements);
}
}
__global__ void binaryThreshold(unsigned char * image, unsigned char * output_image, int width, int height, int threshold) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
if (image[y * width + x] < threshold)
output_image[y * width + x] = 0;
else
output_image[y * width + x] = 255;
}
}
__device__ int min_int(int a, int b) {
return a <= b ? a : b;
}
__device__ int max_int(int a, int b) {
return a >= b ? a : b;
}
cudaError_t bgrToGrayscale(unsigned char *gray, Mat image_rgb, unsigned int size)
{
// Host input vectors.
unsigned char *red = new unsigned char[size];
unsigned char *green = new unsigned char[size];
unsigned char *blue = new unsigned char[size];
// Init vectors with rgb values.
for (int y = 0; y < image_rgb.rows; ++y) {
for (int x = 0; x < image_rgb.cols; ++x) {
blue[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 0];
green[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 1];
red[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 2];
}
}
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
// Device input vectors.
unsigned char *d_red;
unsigned char *d_green;
unsigned char *d_blue;
unsigned char *d_gray;
// Allocate GPU buffers.
cudaStatus = cudaMalloc(&d_red, size * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_green, size * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_blue, size * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_gray, size * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers
cudaStatus = cudaMemcpy(d_red, red, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_green, green, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_blue, blue, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
int no_threads = 1024;
int no_blocks = (int)ceil((float)size / no_threads);
convertToGrayscale << <no_blocks, no_threads >> > (d_gray, d_red, d_green, d_blue, size);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "convert_to_grayscale launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(gray, d_gray, size * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_red);
cudaFree(d_green);
cudaFree(d_blue);
cudaFree(d_gray);
delete[] red;
delete[] green;
delete[] blue;
return cudaStatus;
}
cudaError_t getHistogramN(double *cumulativeSumHistogram, double *norm_histogram, unsigned int *histogram, unsigned char *grayScaleImage, int size) {
cudaError_t cudaStatus;
unsigned int *d_histogram;
unsigned char *d_gray_scale_image;
double *d_norm_histogram;
double *d_cumulative_sum;
double *d_aux_for_cumulative_sum;
// Threads size
int threads = 256;
int N = 256; // Size of the array.
int blocks = N / threads + ((N%threads == 0) ? 0 : 1);
// Perform on CUDA.
const dim3 blockSize(threads / 2, 1, 1);
const dim3 gridSize(blocks, 1, 1);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc(&d_aux_for_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_norm_histogram, (LIMIT + 1) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_histogram, (LIMIT + 1) * sizeof(unsigned int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_gray_scale_image, size * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_gray_scale_image, grayScaleImage, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemset(d_histogram, 0, LIMIT + 1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
cudaStatus = cudaMemset(d_norm_histogram, 0, LIMIT + 1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
cudaStatus = cudaMemset(d_cumulative_sum, 0, LIMIT + 1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
cudaStatus = cudaMemset(d_aux_for_cumulative_sum, 0, LIMIT + 1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
int no_threads = 1024;
int no_blocks = (int)ceil((float)size / no_threads);
getHistrogram << <no_blocks, no_threads >> > (d_histogram, d_gray_scale_image, size);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching getHistrogram!\n", cudaStatus);
goto Error;
}
getNormalizedHistogram << <1, 256 >> > (d_norm_histogram, d_histogram, size);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching getNormalizedHistogram!\n", cudaStatus);
goto Error;
}
exclusiveScanGPU << < gridSize, blockSize, blocks * threads * sizeof(double) >> > (d_norm_histogram, d_cumulative_sum, N, d_aux_for_cumulative_sum);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching exclusiveScanGPU!\n", cudaStatus);
goto Error;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "exclusiveScanGPU launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(histogram, d_histogram, (LIMIT + 1) * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(norm_histogram, d_norm_histogram, (LIMIT + 1) * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cumulativeSumHistogram, d_cumulative_sum, N * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_gray_scale_image);
cudaFree(d_histogram);
cudaFree(d_norm_histogram);
cudaFree(d_cumulative_sum);
cudaFree(d_aux_for_cumulative_sum);
return cudaStatus;
}
cudaError_t doHistogramEqualization(unsigned char *eq_image, unsigned char *image, double *cumulative_sum, int dimension) {
cudaError_t cudaStatus;
unsigned char *d_eq_image;
unsigned char *d_image;
double *d_cumulative_sum;
int no_thread = 1024;
int no_block = (int)ceil((float)dimension / no_thread);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc(&d_eq_image, dimension * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_image, dimension * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_image, image, dimension * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_cumulative_sum, cumulative_sum, (LIMIT + 1) * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
histogramEqualization << <no_block, no_thread >> > (d_eq_image, d_image, d_cumulative_sum, dimension);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "histogramEqualization launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching histogramEqualization!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(eq_image, d_eq_image, dimension * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_cumulative_sum);
cudaFree(d_eq_image);
cudaFree(d_image);
return cudaStatus;
}
cudaError_t applySobelFilter(unsigned char *image, unsigned char *filtered_image, int width, int height) {
cudaError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
sobelFilter << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, height, width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "sobelFilter launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching sobelFilter!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_filtered_image);
cudaFree(d_image);
return cudaStatus;
}
cudaError_t applyGaussianFilter(unsigned char *image, unsigned char *filtered_image, int width, int height, const int dim_kernel) {
int kernel[25] = {
1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1
};
cudaError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
int *d_kernel;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_kernel, dim_kernel * dim_kernel * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_kernel, kernel, dim_kernel * dim_kernel * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
gaussianBlur << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, d_kernel, dim_kernel, 256);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "gaussianBlur launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching gaussianBlur!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_filtered_image);
cudaFree(d_image);
cudaFree(d_kernel);
return cudaStatus;
}
cudaError_t applyBinaryThreshold(unsigned char * image, unsigned char * filtered_image, int width, int height, const int threshold)
{
cudaError_t cudaStatus;
unsigned char *d_image;
unsigned char *d_filtered_image;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
double number_of_threads = 32;
dim3 threadsPerBlock(number_of_threads, number_of_threads, 1);
dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1);
binaryThreshold << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, threshold);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "binaryThreshold launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching binaryThreshold!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_filtered_image);
cudaFree(d_image);
return cudaStatus;
}
|
c4999622d3162fd2fa339072ace74b3119e627e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zbajac_csr_overlap.cu, normal z -> d, Tue Aug 30 09:38:41 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define BLOCKSIZE 256
__global__ void magma_dk_testLocking(unsigned int* locks, int n) {
int id = threadIdx.x % n;
bool leaveLoop = false;
while (!leaveLoop) {
if (atomicExch(&(locks[id]), 1u) == 0u) {
//critical section
leaveLoop = true;
atomicExch(&(locks[id]),0u);
}
}
}
/*
__global__ void
magma_dbajac_csr_o_ls_kernel(int localiters, int n,
int matrices, int overlap,
magma_d_matrix *D, magma_d_matrix *R,
const double * __restrict__ b,
double * x )
{
// int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
// int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2;
int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2;
int i, j, start, end;
__shared__ double local_x[ BLOCKSIZE ];
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
//valR = R[ (1+blockIdx.x-1)%matrices ].dval;
//colR = R[ (1+blockIdx.x-1)%matrices ].dcol;
//rowR = R[ (1+blockIdx.x-1)%matrices ].drow;
//valD = D[ (1+blockIdx.x-1)%matrices ].dval;
//colD = D[ (1+blockIdx.x-1)%matrices ].dcol;
//rowD = D[ (1+blockIdx.x-1)%matrices ].drow;
if( blockIdx.x%2==1 ){
valR = R[0].dval;
valD = D[0].dval;
colR = R[0].dcol;
rowR = R[0].drow;
colD = D[0].dcol;
rowD = D[0].drow;
}else{
valR = R[1].dval;
valD = D[1].dval;
colR = R[1].dcol;
rowR = R[1].drow;
colD = D[1].dcol;
rowD = D[1].drow;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end);
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x > overlap ) { // RAS
x[index] = local_x[threadIdx.x];
}
}
}
*/
__global__ void
magma_dbajac_csr_o_ls_kernel1(int localiters, int n,
int matrices, int overlap,
double * valD,
magma_index_t * rowD,
magma_index_t * colD,
double * valR,
magma_index_t * rowR,
magma_index_t * colR,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
//bool leaveLoop = false;
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel2(int localiters, int n,
int matrices, int overlap,
double * valD0,
magma_index_t * rowD0,
magma_index_t * colD0,
double * valR0,
magma_index_t * rowR0,
magma_index_t * colR0,
double * valD1,
magma_index_t * rowD1,
magma_index_t * colD1,
double * valR1,
magma_index_t * rowR1,
magma_index_t * colR1,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2;
int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2;
int i, j, start, end;
//bool leaveLoop = false;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel4(int localiters, int n,
int matrices, int overlap,
double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0,
double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1,
double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2,
double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
//bool leaveLoop = false;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) {
valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2;
}else if ( blockIdx.x%matrices==2 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==3 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel8(int localiters, int n,
int matrices, int overlap,
double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0,
double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1,
double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2,
double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3,
double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4,
double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5,
double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6,
double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if( blockIdx.x%matrices==0 ){
valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6;
}else if ( blockIdx.x%matrices==2 ) {
valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5;
}else if ( blockIdx.x%matrices==3 ) {
valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4;
}else if ( blockIdx.x%matrices==4 ) {
valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3;
}else if ( blockIdx.x%matrices==5 ) {
valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2;
}else if ( blockIdx.x%matrices==6 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==7 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel16(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel32(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16,
double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17,
double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18,
double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19,
double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20,
double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21,
double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22,
double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23,
double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24,
double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25,
double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26,
double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27,
double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28,
double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29,
double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30,
double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; }
else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; }
else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; }
else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; }
else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; }
else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; }
else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; }
else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; }
else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; }
else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; }
else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; }
else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; }
else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; }
else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; }
else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; }
else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; }
else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel64(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16,
double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17,
double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18,
double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19,
double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20,
double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21,
double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22,
double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23,
double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24,
double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25,
double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26,
double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27,
double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28,
double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29,
double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30,
double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31,
double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32,
double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33,
double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34,
double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35,
double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36,
double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37,
double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38,
double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39,
double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40,
double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41,
double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42,
double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43,
double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44,
double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45,
double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46,
double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47,
double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48,
double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49,
double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50,
double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51,
double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52,
double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53,
double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54,
double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55,
double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56,
double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57,
double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58,
double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59,
double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60,
double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61,
double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62,
double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; }
else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; }
else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; }
else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; }
else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; }
else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; }
else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; }
else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; }
else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; }
else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; }
else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; }
else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; }
else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; }
else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; }
else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; }
else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; }
else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; }
else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; }
else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; }
else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; }
else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; }
else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; }
else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; }
else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; }
else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; }
else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; }
else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; }
else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; }
else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; }
else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; }
else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; }
else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; }
else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; }
else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; }
else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; }
else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; }
else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; }
else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; }
else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; }
else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; }
else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; }
else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; }
else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; }
else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; }
else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; }
else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; }
else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; }
else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; }
else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration
with directed restricted additive Schwarz overlap (top-down) performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D1 magma_d_matrix
input matrix with diagonal blocks
@param[in]
R1 magma_d_matrix
input matrix with non-diagonal parts
@param[in]
D2 magma_d_matrix
input matrix with diagonal blocks
@param[in]
R2 magma_d_matrix
input matrix with non-diagonal parts
@param[in]
b magma_d_matrix
RHS
@param[in]
x magma_d_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbajac_csr_overlap(
magma_int_t localiters,
magma_int_t matrices,
magma_int_t overlap,
magma_d_matrix *D,
magma_d_matrix *R,
magma_d_matrix b,
magma_d_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int size = D[0].num_rows;
int min_nnz=100;
for(int i=0; i<matrices; i++){
min_nnz = min(min_nnz, R[i].nnz);
}
if ( min_nnz > -1 ){
if ( matrices == 1 ){
int dimgrid1 = magma_ceildiv( size , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
b.dval, x->dval );
}
else if (matrices == 2) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 4){
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol,
D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 8) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol,
D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol,
D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol,
D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol,
D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol,
D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 16) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
b.dval, x->dval );
}
else if (matrices == 32) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol,
D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol,
D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol,
D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol,
D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol,
D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol,
D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol,
D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol,
D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol,
D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol,
D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol,
D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol,
D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol,
D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol,
D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol,
D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol,
b.dval, x->dval );
}
else if (matrices == 64) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol,
D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol,
D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol,
D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol,
D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol,
D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol,
D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol,
D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol,
D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol,
D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol,
D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol,
D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol,
D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol,
D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol,
D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol,
D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol,
D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol,
D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol,
D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol,
D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol,
D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol,
D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol,
D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol,
D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol,
D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol,
D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol,
D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol,
D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol,
D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol,
D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol,
D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol,
D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol,
D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol,
D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol,
D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol,
D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol,
D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol,
D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol,
D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol,
D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol,
D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol,
D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol,
D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol,
D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol,
D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol,
D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol,
D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol,
D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else {
printf("error: invalid matrix count.\n");
}
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
| c4999622d3162fd2fa339072ace74b3119e627e5.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zbajac_csr_overlap.cu, normal z -> d, Tue Aug 30 09:38:41 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define BLOCKSIZE 256
__global__ void magma_dk_testLocking(unsigned int* locks, int n) {
int id = threadIdx.x % n;
bool leaveLoop = false;
while (!leaveLoop) {
if (atomicExch(&(locks[id]), 1u) == 0u) {
//critical section
leaveLoop = true;
atomicExch(&(locks[id]),0u);
}
}
}
/*
__global__ void
magma_dbajac_csr_o_ls_kernel(int localiters, int n,
int matrices, int overlap,
magma_d_matrix *D, magma_d_matrix *R,
const double * __restrict__ b,
double * x )
{
// int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
// int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2;
int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2;
int i, j, start, end;
__shared__ double local_x[ BLOCKSIZE ];
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
//valR = R[ (1+blockIdx.x-1)%matrices ].dval;
//colR = R[ (1+blockIdx.x-1)%matrices ].dcol;
//rowR = R[ (1+blockIdx.x-1)%matrices ].drow;
//valD = D[ (1+blockIdx.x-1)%matrices ].dval;
//colD = D[ (1+blockIdx.x-1)%matrices ].dcol;
//rowD = D[ (1+blockIdx.x-1)%matrices ].drow;
if( blockIdx.x%2==1 ){
valR = R[0].dval;
valD = D[0].dval;
colR = R[0].dcol;
rowR = R[0].drow;
colD = D[0].dcol;
rowD = D[0].drow;
}else{
valR = R[1].dval;
valD = D[1].dval;
colR = R[1].dcol;
rowR = R[1].drow;
colD = D[1].dcol;
rowD = D[1].drow;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end);
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x > overlap ) { // RAS
x[index] = local_x[threadIdx.x];
}
}
}
*/
__global__ void
magma_dbajac_csr_o_ls_kernel1(int localiters, int n,
int matrices, int overlap,
double * valD,
magma_index_t * rowD,
magma_index_t * colD,
double * valR,
magma_index_t * rowR,
magma_index_t * colR,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
//bool leaveLoop = false;
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel2(int localiters, int n,
int matrices, int overlap,
double * valD0,
magma_index_t * rowD0,
magma_index_t * colD0,
double * valR0,
magma_index_t * rowR0,
magma_index_t * colR0,
double * valD1,
magma_index_t * rowD1,
magma_index_t * colD1,
double * valR1,
magma_index_t * rowR1,
magma_index_t * colR1,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2;
int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2;
int i, j, start, end;
//bool leaveLoop = false;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel4(int localiters, int n,
int matrices, int overlap,
double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0,
double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1,
double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2,
double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
//bool leaveLoop = false;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) {
valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2;
}else if ( blockIdx.x%matrices==2 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==3 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel8(int localiters, int n,
int matrices, int overlap,
double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0,
double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1,
double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2,
double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3,
double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4,
double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5,
double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6,
double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if( blockIdx.x%matrices==0 ){
valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7;
}else if ( blockIdx.x%matrices==1 ) {
valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6;
}else if ( blockIdx.x%matrices==2 ) {
valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5;
}else if ( blockIdx.x%matrices==3 ) {
valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4;
}else if ( blockIdx.x%matrices==4 ) {
valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3;
}else if ( blockIdx.x%matrices==5 ) {
valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2;
}else if ( blockIdx.x%matrices==6 ) {
valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1;
}else if ( blockIdx.x%matrices==7 ) {
valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0;
}
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel16(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel32(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16,
double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17,
double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18,
double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19,
double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20,
double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21,
double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22,
double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23,
double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24,
double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25,
double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26,
double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27,
double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28,
double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29,
double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30,
double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; }
else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; }
else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; }
else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; }
else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; }
else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; }
else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; }
else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; }
else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; }
else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; }
else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; }
else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; }
else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; }
else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; }
else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; }
else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; }
else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
__global__ void
magma_dbajac_csr_o_ls_kernel64(int localiters, int n,
int matrices, int overlap,
double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 ,
double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 ,
double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 ,
double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 ,
double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 ,
double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 ,
double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 ,
double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 ,
double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 ,
double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 ,
double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10,
double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11,
double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12,
double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13,
double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14,
double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15,
double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16,
double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17,
double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18,
double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19,
double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20,
double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21,
double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22,
double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23,
double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24,
double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25,
double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26,
double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27,
double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28,
double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29,
double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30,
double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31,
double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32,
double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33,
double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34,
double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35,
double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36,
double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37,
double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38,
double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39,
double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40,
double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41,
double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42,
double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43,
double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44,
double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45,
double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46,
double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47,
double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48,
double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49,
double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50,
double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51,
double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52,
double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53,
double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54,
double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55,
double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56,
double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57,
double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58,
double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59,
double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60,
double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61,
double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62,
double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63,
const double * __restrict__ b,
double * x )
{
int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap;
int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x;
int i, j, start, end;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double bl, tmp = zero, v = zero;
double *valR, *valD;
magma_index_t *colR, *rowR, *colD, *rowD;
if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; }
else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; }
else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; }
else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; }
else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; }
else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; }
else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; }
else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; }
else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; }
else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; }
else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; }
else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; }
else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; }
else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; }
else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; }
else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; }
else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; }
else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; }
else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; }
else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; }
else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; }
else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; }
else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; }
else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; }
else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; }
else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; }
else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; }
else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; }
else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; }
else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; }
else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; }
else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; }
else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; }
else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; }
else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; }
else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; }
else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; }
else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; }
else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; }
else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; }
else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; }
else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; }
else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; }
else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; }
else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; }
else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; }
else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; }
else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; }
else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; }
else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; }
else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; }
else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; }
else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; }
else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; }
else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; }
else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; }
else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }
else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }
else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }
else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }
else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }
else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }
else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }
else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; }
if ( index>-1 && index < n ) {
start = rowR[index];
end = rowR[index+1];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
if( start != end ){
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
}
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
// add more local iterations
__shared__ double local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
if( threadIdx.x >= overlap ) { // only write back the lower subdomain
x[index] = local_x[threadIdx.x];
}
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration
with directed restricted additive Schwarz overlap (top-down) performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D1 magma_d_matrix
input matrix with diagonal blocks
@param[in]
R1 magma_d_matrix
input matrix with non-diagonal parts
@param[in]
D2 magma_d_matrix
input matrix with diagonal blocks
@param[in]
R2 magma_d_matrix
input matrix with non-diagonal parts
@param[in]
b magma_d_matrix
RHS
@param[in]
x magma_d_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbajac_csr_overlap(
magma_int_t localiters,
magma_int_t matrices,
magma_int_t overlap,
magma_d_matrix *D,
magma_d_matrix *R,
magma_d_matrix b,
magma_d_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int size = D[0].num_rows;
int min_nnz=100;
for(int i=0; i<matrices; i++){
min_nnz = min(min_nnz, R[i].nnz);
}
if ( min_nnz > -1 ){
if ( matrices == 1 ){
int dimgrid1 = magma_ceildiv( size , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
b.dval, x->dval );
}
else if (matrices == 2) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 4){
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol,
D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 8) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol,
D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol,
D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol,
D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol,
D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol,
D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol,
D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol,
D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else if (matrices == 16) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
b.dval, x->dval );
}
else if (matrices == 32) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol,
D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol,
D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol,
D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol,
D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol,
D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol,
D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol,
D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol,
D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol,
D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol,
D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol,
D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol,
D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol,
D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol,
D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol,
D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol,
b.dval, x->dval );
}
else if (matrices == 64) {
int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_dbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, size, matrices, overlap,
D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol,
D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol,
D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol,
D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol,
D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol,
D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol,
D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol,
D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol,
D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol,
D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol,
D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol,
D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol,
D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol,
D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol,
D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol,
D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol,
D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol,
D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol,
D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol,
D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol,
D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol,
D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol,
D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol,
D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol,
D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol,
D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol,
D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol,
D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol,
D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol,
D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol,
D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol,
D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol,
D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol,
D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol,
D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol,
D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol,
D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol,
D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol,
D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol,
D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol,
D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol,
D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol,
D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol,
D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol,
D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol,
D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol,
D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol,
D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol,
D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol,
D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol,
D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol,
D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol,
D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol,
D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol,
D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol,
D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol,
D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol,
D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol,
D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol,
D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol,
D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol,
D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol,
D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol,
D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol,
b.dval, x->dval );
//magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
// ( localiters, size, matrices, overlap, D, R, b.dval, x->dval );
}
else {
printf("error: invalid matrix count.\n");
}
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
|
7a8408ddd24a2e722c14e9b694a8ac2780e0a25c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<size_t>(const int N, const size_t alpha, const size_t* X,
size_t* Y) {
NOT_IMPLEMENTED;
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<size_t>(const int N, const size_t alpha, size_t *X) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<size_t>(const int N, const size_t alpha, const size_t* X,
const size_t beta, size_t* Y) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<size_t>(const int n, const size_t* x, const size_t* y,
size_t* out) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<size_t>(const int n, const size_t* x, size_t* y) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 7a8408ddd24a2e722c14e9b694a8ac2780e0a25c.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<size_t>(const int N, const size_t alpha, const size_t* X,
size_t* Y) {
NOT_IMPLEMENTED;
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<size_t>(const int N, const size_t alpha, size_t *X) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<size_t>(const int N, const size_t alpha, const size_t* X,
const size_t beta, size_t* Y) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<size_t>(const int n, const size_t* x, const size_t* y,
size_t* out) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<size_t>(const int n, const size_t* x, size_t* y) {
NOT_IMPLEMENTED;
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
0baf0b005d5cbc865c182136ae52acc80ea299b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <map>
#include <random>
#include <iomanip>
#include <cmath>
#include <png++/png.hpp>
#include <hiprand/hiprand.h>
template<typename charT>
std::map<std::string, std::string>
read_file(std::basic_istream<charT>& is)
{
if(!is.good())
throw std::invalid_argument("file open error");
std::map<std::string, std::string> contents;
while(!is.eof())
{
std::string line;
std::getline(is, line);
if(line.empty()) continue;
std::istringstream iss(line);
iss >> std::ws;
if(iss.peek() == '#') continue;
std::string key, value;
char eq;
iss >> key >> eq >> value;
if(eq != '=') throw std::runtime_error("file format error");
contents[key] = value;
}
return contents;
}
std::size_t digit(std::size_t n)
{
std::size_t dig = 0;
while(n > 0)
{
++dig;
n /= 10;
}
return dig;
}
// pre-calculated exp(dE / (kB * T))
__constant__ float exp_dE_beta[10];
// use texture memory as spins
// texture<bool, 2, hipReadModeElementType> field1;
// texture<bool, 2, hipReadModeElementType> field2;
__global__
void update_field(bool* spins, const float* random,
const std::size_t x_size, const std::size_t y_size, bool turn)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if(turn)
{
if((x+y)%2 == 1) return;
}
else
{
if((x+y)%2 == 0) return;
}
const std::size_t xdim = blockDim.x * gridDim.x;
const std::size_t offset = x + y * xdim;
if(offset >= x_size * y_size) return;
const std::size_t n_offset = (y+1 < y_size) ? x + (y+1) * xdim : x;
const std::size_t e_offset = (x+1 < x_size) ? (x+1) + y * xdim : y * xdim;
const std::size_t s_offset = (y-1 >= 0) ? x + (y-1) * xdim : x + (y_size-1) * xdim;
const std::size_t w_offset = (x-1 >= 0) ? (x-1) + y * xdim : x_size - 1 + y * xdim;
const bool c = spins[offset]; // center
const bool n = spins[n_offset]; // north
const bool e = spins[e_offset]; // east
const bool s = spins[s_offset]; // south
const bool w = spins[w_offset]; // west
std::size_t dJ = 0;
if(c == n) ++dJ;
if(c == e) ++dJ;
if(c == s) ++dJ;
if(c == w) ++dJ;
const std::size_t dH = c ? 1 : 0;
if(exp_dE_beta[dH + dJ * 2] > random[offset])
spins[offset] = (!c);
return;
}
int main(int argc, char **argv)
{
if (argc != 2)
{
std::cerr << "Usage: ./ising <input.dat>" << std::endl;
std::cerr << "input: width = <int>" << std::endl;
std::cerr << " : height = <int>" << std::endl;
std::cerr << " : steps = <int>" << std::endl;
std::cerr << " : seed = <int>" << std::endl;
std::cerr << " : J = <float>" << std::endl;
std::cerr << " : H = <float>" << std::endl;
std::cerr << " : T = <float>" << std::endl;
std::cerr << " : kB = <float>" << std::endl;
return 1;
}
std::ifstream ifs(argv[1]);
if(!ifs.good())
{
std::cerr << "file output error: " << argv[1] << std::endl;
return 1;
}
const std::map<std::string, std::string> contents = read_file(ifs);
const std::size_t w = std::stoul(contents.at("width"));
const std::size_t h = std::stoul(contents.at("height"));
const std::size_t step = std::stoul(contents.at("steps"));
const std::uint64_t seed = std::stoul(contents.at("seed"));
const float J = std::stof(contents.at("J"));
const float H = std::stof(contents.at("H"));
const float T = std::stof(contents.at("T"));
const float kB = std::stof(contents.at("kB"));
const float beta = 1. / (kB * T);
std::cerr << "input file read" << std::endl;
// up == true, down == false;
// cash exp(dE) to constant memory
const float exp_dE[10] = { // case {neighbors}, center
::exp(beta * ( 4*J + 2*H)), // {up, up, up, up}, down
::exp(beta * ( 4*J - 2*H)), // {dn, dn, dn, dn}, up
::exp(beta * ( 2*J + 2*H)), // {up, up, up, dn}, down
::exp(beta * ( 2*J - 2*H)), // {dn, dn, dn, up}, up
::exp(beta * ( 0*J + 2*H)), // {up, up, dn, dn}, down
::exp(beta * ( 0*J - 2*H)), // {dn, dn, up, up}, up
::exp(beta * (-2*J + 2*H)), // {up, dn, dn, dn}, down
::exp(beta * (-2*J - 2*H)), // {dn, up, up, up}, up
::exp(beta * (-4*J + 2*H)), // {dn, dn, dn, dn}, down
::exp(beta * (-4*J - 2*H)) // {up, up, up, up}, up
};
const hipError_t err_dE =
hipMemcpyToSymbol(exp_dE_beta, exp_dE, sizeof(float) * 10);
assert(err_dE == 0);
std::cerr << "precalculated exp(dE) are copied to constant memory" << std::endl;
// allocate global memory to store spins and random numbers
bool *spins;
hipError_t err_spins = hipMalloc((void**)&spins, sizeof(bool) * w * h);
assert(err_spins == 0);
float *random;
hipError_t err_random = hipMalloc((void**)&random, sizeof(float) * w * h);
assert(err_random == 0);
std::cerr << "device memory for spins and randoms are allocated" << std::endl;
// prepair cuRAND generators
hiprandGenerator_t rng;
hiprandStatus_t st_gen = hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT);
assert(st_gen == HIPRAND_STATUS_SUCCESS);
hiprandStatus_t st_seed = hiprandSetPseudoRandomGeneratorSeed(rng, seed);
assert(st_seed == HIPRAND_STATUS_SUCCESS);
std::cerr << "cuRAND generator created" << std::endl;
// set initial configuration as random boolean
bool *snapshot = new bool[w * h];
std::cerr << "host memory for snapshot allocated" << std::endl;
std::mt19937 mt(seed);
std::bernoulli_distribution distro(0.5);
for(std::size_t i=0; i < w * h; ++i)
snapshot[i] = distro(mt);
std::cerr << "initial snapshot created" << std::endl;
hipError_t cpy_init =
hipMemcpy(spins, snapshot, sizeof(bool)*w*h, hipMemcpyHostToDevice);
assert(cpy_init == 0);
std::cerr << "initial state copied" << std::endl;
#if defined(OUTPUT_TEXT)
std::ofstream ofs("ising_traj.dat");
char *traj = new char[w * h];
#endif
for(std::size_t i=0; i<step; ++i)
{
#ifdef OUTPUT_PNG
// copy snapshot
hipError_t ercpy = hipMemcpy(
snapshot, spins, sizeof(bool) * w * h, hipMemcpyDeviceToHost);
assert(ercpy == 0);
// write out
std::ostringstream filename;
filename << "ising" << std::setfill('0') << std::setw(digit(step))
<< i << ".png";
png::image<png::rgb_pixel> image(w, h);
for(std::size_t i=0; i<w; ++i)
{
for(std::size_t j=0; j<h; ++j)
{
std::size_t offset = i + w * j;
if(snapshot[offset])
image[i][j] = png::rgb_pixel(255, 255, 255);
else
image[i][j] = png::rgb_pixel(0,0,0);
}
}
image.write(filename.str().c_str());
#elif defined(OUTPUT_TEXT)
hipError_t ercpy = hipMemcpy(
snapshot, spins, sizeof(bool) * w * h, hipMemcpyDeviceToHost);
assert(ercpy == 0);
for(std::size_t i=0; i<w*h; ++i)
traj[i] = static_cast<char>(snapshot[i]) + 48;
ofs << traj << std::endl;
#endif //OUTPUT
// generate random numbers
hiprandStatus_t st_genrnd = hiprandGenerateUniform(rng, random, w * h);
assert(st_genrnd == HIPRAND_STATUS_SUCCESS);
// update spins
dim3 blocks(w/32, h/32);
dim3 threads(32, 32);
hipLaunchKernelGGL(( update_field), dim3(blocks), dim3(threads), 0, 0, spins, random, w, h, true);
hipLaunchKernelGGL(( update_field), dim3(blocks), dim3(threads), 0, 0, spins, random, w, h, false);
}
hiprandStatus_t destroy = hiprandDestroyGenerator(rng);
assert(destroy == HIPRAND_STATUS_SUCCESS);
hipError_t free_spin = hipFree(spins);
assert(free_spin == 0);
hipError_t free_random = hipFree(random);
assert(free_random == 0);
return 0;
}
| 0baf0b005d5cbc865c182136ae52acc80ea299b8.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <map>
#include <random>
#include <iomanip>
#include <cmath>
#include <png++/png.hpp>
#include <curand.h>
template<typename charT>
std::map<std::string, std::string>
read_file(std::basic_istream<charT>& is)
{
if(!is.good())
throw std::invalid_argument("file open error");
std::map<std::string, std::string> contents;
while(!is.eof())
{
std::string line;
std::getline(is, line);
if(line.empty()) continue;
std::istringstream iss(line);
iss >> std::ws;
if(iss.peek() == '#') continue;
std::string key, value;
char eq;
iss >> key >> eq >> value;
if(eq != '=') throw std::runtime_error("file format error");
contents[key] = value;
}
return contents;
}
std::size_t digit(std::size_t n)
{
std::size_t dig = 0;
while(n > 0)
{
++dig;
n /= 10;
}
return dig;
}
// pre-calculated exp(dE / (kB * T))
__constant__ float exp_dE_beta[10];
// use texture memory as spins
// texture<bool, 2, cudaReadModeElementType> field1;
// texture<bool, 2, cudaReadModeElementType> field2;
__global__
void update_field(bool* spins, const float* random,
const std::size_t x_size, const std::size_t y_size, bool turn)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if(turn)
{
if((x+y)%2 == 1) return;
}
else
{
if((x+y)%2 == 0) return;
}
const std::size_t xdim = blockDim.x * gridDim.x;
const std::size_t offset = x + y * xdim;
if(offset >= x_size * y_size) return;
const std::size_t n_offset = (y+1 < y_size) ? x + (y+1) * xdim : x;
const std::size_t e_offset = (x+1 < x_size) ? (x+1) + y * xdim : y * xdim;
const std::size_t s_offset = (y-1 >= 0) ? x + (y-1) * xdim : x + (y_size-1) * xdim;
const std::size_t w_offset = (x-1 >= 0) ? (x-1) + y * xdim : x_size - 1 + y * xdim;
const bool c = spins[offset]; // center
const bool n = spins[n_offset]; // north
const bool e = spins[e_offset]; // east
const bool s = spins[s_offset]; // south
const bool w = spins[w_offset]; // west
std::size_t dJ = 0;
if(c == n) ++dJ;
if(c == e) ++dJ;
if(c == s) ++dJ;
if(c == w) ++dJ;
const std::size_t dH = c ? 1 : 0;
if(exp_dE_beta[dH + dJ * 2] > random[offset])
spins[offset] = (!c);
return;
}
int main(int argc, char **argv)
{
if (argc != 2)
{
std::cerr << "Usage: ./ising <input.dat>" << std::endl;
std::cerr << "input: width = <int>" << std::endl;
std::cerr << " : height = <int>" << std::endl;
std::cerr << " : steps = <int>" << std::endl;
std::cerr << " : seed = <int>" << std::endl;
std::cerr << " : J = <float>" << std::endl;
std::cerr << " : H = <float>" << std::endl;
std::cerr << " : T = <float>" << std::endl;
std::cerr << " : kB = <float>" << std::endl;
return 1;
}
std::ifstream ifs(argv[1]);
if(!ifs.good())
{
std::cerr << "file output error: " << argv[1] << std::endl;
return 1;
}
const std::map<std::string, std::string> contents = read_file(ifs);
const std::size_t w = std::stoul(contents.at("width"));
const std::size_t h = std::stoul(contents.at("height"));
const std::size_t step = std::stoul(contents.at("steps"));
const std::uint64_t seed = std::stoul(contents.at("seed"));
const float J = std::stof(contents.at("J"));
const float H = std::stof(contents.at("H"));
const float T = std::stof(contents.at("T"));
const float kB = std::stof(contents.at("kB"));
const float beta = 1. / (kB * T);
std::cerr << "input file read" << std::endl;
// up == true, down == false;
// cash exp(dE) to constant memory
const float exp_dE[10] = { // case {neighbors}, center
std::exp(beta * ( 4*J + 2*H)), // {up, up, up, up}, down
std::exp(beta * ( 4*J - 2*H)), // {dn, dn, dn, dn}, up
std::exp(beta * ( 2*J + 2*H)), // {up, up, up, dn}, down
std::exp(beta * ( 2*J - 2*H)), // {dn, dn, dn, up}, up
std::exp(beta * ( 0*J + 2*H)), // {up, up, dn, dn}, down
std::exp(beta * ( 0*J - 2*H)), // {dn, dn, up, up}, up
std::exp(beta * (-2*J + 2*H)), // {up, dn, dn, dn}, down
std::exp(beta * (-2*J - 2*H)), // {dn, up, up, up}, up
std::exp(beta * (-4*J + 2*H)), // {dn, dn, dn, dn}, down
std::exp(beta * (-4*J - 2*H)) // {up, up, up, up}, up
};
const cudaError_t err_dE =
cudaMemcpyToSymbol(exp_dE_beta, exp_dE, sizeof(float) * 10);
assert(err_dE == 0);
std::cerr << "precalculated exp(dE) are copied to constant memory" << std::endl;
// allocate global memory to store spins and random numbers
bool *spins;
cudaError_t err_spins = cudaMalloc((void**)&spins, sizeof(bool) * w * h);
assert(err_spins == 0);
float *random;
cudaError_t err_random = cudaMalloc((void**)&random, sizeof(float) * w * h);
assert(err_random == 0);
std::cerr << "device memory for spins and randoms are allocated" << std::endl;
// prepair cuRAND generators
curandGenerator_t rng;
curandStatus_t st_gen = curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT);
assert(st_gen == CURAND_STATUS_SUCCESS);
curandStatus_t st_seed = curandSetPseudoRandomGeneratorSeed(rng, seed);
assert(st_seed == CURAND_STATUS_SUCCESS);
std::cerr << "cuRAND generator created" << std::endl;
// set initial configuration as random boolean
bool *snapshot = new bool[w * h];
std::cerr << "host memory for snapshot allocated" << std::endl;
std::mt19937 mt(seed);
std::bernoulli_distribution distro(0.5);
for(std::size_t i=0; i < w * h; ++i)
snapshot[i] = distro(mt);
std::cerr << "initial snapshot created" << std::endl;
cudaError_t cpy_init =
cudaMemcpy(spins, snapshot, sizeof(bool)*w*h, cudaMemcpyHostToDevice);
assert(cpy_init == 0);
std::cerr << "initial state copied" << std::endl;
#if defined(OUTPUT_TEXT)
std::ofstream ofs("ising_traj.dat");
char *traj = new char[w * h];
#endif
for(std::size_t i=0; i<step; ++i)
{
#ifdef OUTPUT_PNG
// copy snapshot
cudaError_t ercpy = cudaMemcpy(
snapshot, spins, sizeof(bool) * w * h, cudaMemcpyDeviceToHost);
assert(ercpy == 0);
// write out
std::ostringstream filename;
filename << "ising" << std::setfill('0') << std::setw(digit(step))
<< i << ".png";
png::image<png::rgb_pixel> image(w, h);
for(std::size_t i=0; i<w; ++i)
{
for(std::size_t j=0; j<h; ++j)
{
std::size_t offset = i + w * j;
if(snapshot[offset])
image[i][j] = png::rgb_pixel(255, 255, 255);
else
image[i][j] = png::rgb_pixel(0,0,0);
}
}
image.write(filename.str().c_str());
#elif defined(OUTPUT_TEXT)
cudaError_t ercpy = cudaMemcpy(
snapshot, spins, sizeof(bool) * w * h, cudaMemcpyDeviceToHost);
assert(ercpy == 0);
for(std::size_t i=0; i<w*h; ++i)
traj[i] = static_cast<char>(snapshot[i]) + 48;
ofs << traj << std::endl;
#endif //OUTPUT
// generate random numbers
curandStatus_t st_genrnd = curandGenerateUniform(rng, random, w * h);
assert(st_genrnd == CURAND_STATUS_SUCCESS);
// update spins
dim3 blocks(w/32, h/32);
dim3 threads(32, 32);
update_field<<<blocks, threads>>>(spins, random, w, h, true);
update_field<<<blocks, threads>>>(spins, random, w, h, false);
}
curandStatus_t destroy = curandDestroyGenerator(rng);
assert(destroy == CURAND_STATUS_SUCCESS);
cudaError_t free_spin = cudaFree(spins);
assert(free_spin == 0);
cudaError_t free_random = cudaFree(random);
assert(free_random == 0);
return 0;
}
|
44e77281105b37e9889bfc25791c5efe6255d65c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, h2, momentum, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
| 44e77281105b37e9889bfc25791c5efe6255d65c.cu | /*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, h2, momentum, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
|
05896e7f477d01de75c6db9e6c0f4f5822cb854b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Just run sh compileRun.sh
* Use config.h in order to adjust problem size
*/
#include <hip/hip_runtime.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "config.h"
void printSparseMatrix(double* A) {
for (int i = 0; i < N; i++) {
int x = i % D;
int y = i / D;
for (int j = 0; j < N; j++) {
double value = 0;
if (j == i) {
value = A[5 * i];
} else if (j == i - 1 && x > 0) {
value = A[5 * i + 1];
} else if (j == i + 1 && x < D - 1) {
value = A[5 * i + 2];
} else if (j == i - D && y > 0) {
value = A[5 * i + 3];
} else if (j == i + D && y < D - 1) {
value = A[5 * i + 4];
}
printf("%10.6f", value);
}
printf("\n");
}
}
__global__
void iterateILU(double* srcU, double* dstU, char* smallError) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = index % 3;
int i = index / 3;
int x = i % D;
int y = i / D;
if (i < N && (offset == 0 || x < D - 1 && offset == 1 || y < D - 1 && offset == 2)) {
double value = 0;
if (offset == 0) {
if (x > 0) value += srcU[5 * (i - 1) + 2] * srcU[5 * (i - 1) + 2];
if (y > 0) value += srcU[5 * (i - D) + 4] * srcU[5 * (i - D) + 4];
value = sqrt(4 - value);
} else {
value = -1 / srcU[5 * i];
}
dstU[5 * i + 2 * offset] = value;
if (fabs(value - srcU[5 * i + 2 * offset]) >= EPSILON_ILU) {
*smallError = 0;
}
}
}
__global__
void initMatrix(double* A) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 5 * N) {
A[i] = (i % 5 == 0);
}
}
__global__
void transpose(double* A, double* B) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 5 * N) {
int offset = i % 5;
if (offset == 0) {
A[i] = B[i];
} else {
int j = i / 5 - D * (offset == 3) + D * (offset == 4)
- (offset == 1) + (offset == 2);
A[i] = B[5 * j + 2 - (offset - 1) % 2 + 2 * ((offset - 1) / 2)];
}
}
}
double func(double x, double y) {
return 8 * M_PI * M_PI * sin(2 * M_PI * x) * sin(2 * M_PI * y);
}
__global__
void calculateGSV(double* A, double *uHistory, double *base, char *smallError, int sourceTime, int time, int lastTime, int offset, int k) {
int i = 2 * (blockIdx.x * blockDim.x + threadIdx.x) + offset;
if (i < N) {
int x = i % D;
int y = i / D;
int diagIdx = (x + y) / 2;
if (diagIdx < k) {
double sum = base[i];
if (y > 0) sum -= A[5 * i + 3] * uHistory[i - D + sourceTime];
if (y < D - 1) sum -= A[5 * i + 4] * uHistory[i + D + sourceTime];
if (x > 0) sum -= A[5 * i + 1] * uHistory[i - 1 + sourceTime];
if (x < D - 1) sum -= A[5 * i + 2] * uHistory[i + 1 + sourceTime];
sum /= A[5 * i];
if (fabsf(sum - uHistory[i + lastTime]) >= EPSILON_GSV) {
smallError[(k - diagIdx + D) % D] = 0;
}
uHistory[i + time] = sum;
}
}
}
__global__
void fetchU(double *uHistory, double *u, int k) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int x = i % D;
int y = i / D;
int diagIdx = (x + y) / 2;
u[i] = uHistory[i + ((k + 1 + diagIdx) % D) * N];
}
}
void decompose(double* d_U_, double* d_L, char* d_smallError, int* iterations, int blockSize) {
*iterations = 0;
int gridSize3N = (3 * N + blockSize - 1) / blockSize;
int gridSize5N = (5 * N + blockSize - 1) / blockSize;
double* d_U[2];
d_U[0] = d_U_;
hipMalloc((void**) &d_U[1], 5 * N * sizeof(double));
hipMemset(d_smallError, 0, 1);
// Initialize matrices with identity
hipLaunchKernelGGL(( initMatrix), dim3(gridSize5N), dim3(blockSize), 0, 0, d_U[0]);
hipLaunchKernelGGL(( initMatrix), dim3(gridSize5N), dim3(blockSize), 0, 0, d_U[1]);
for (int m = 0;; m++) {
hipMemset(d_smallError, 1, 1);
hipLaunchKernelGGL(( iterateILU), dim3(gridSize3N),dim3(blockSize), 0, 0, d_U[m % 2 == 0], d_U[m % 2], d_smallError);
(*iterations)++;
char smallError;
hipMemcpy(&smallError, d_smallError, 1, hipMemcpyDeviceToHost);
if (smallError && *iterations % 2 == 0) break;
}
hipLaunchKernelGGL(( transpose), dim3(gridSize5N), dim3(blockSize), 0, 0, d_L, d_U[(*iterations) % 2 == 0]);
hipFree(d_U[1]);
}
void solveGSV(double* d_A, double* d_u, double* d_b, double* d_uHistory, char* d_smallError, int *iterations, int blockSize) {
*iterations = 0;
int halfN = (N + 1) / 2;
int gridSizeN = (N + blockSize - 1) / blockSize;
int gridSizeHalfN = (halfN + blockSize - 1) / blockSize;
hipMemset(d_smallError, 0, D);
// Calculate u
for (int k = 1; ; k++) {
int time = (k % D) * N;
int lastTime = ((k - 1 + D) % D) * N;
hipMemset(d_smallError + (k % D), 1, 1);
// Black fields
hipLaunchKernelGGL(( calculateGSV), dim3(gridSizeHalfN), dim3(blockSize), 0, 0, d_A, d_uHistory, d_b, d_smallError, lastTime, time, lastTime, 0, k);
// White fields
hipLaunchKernelGGL(( calculateGSV), dim3(gridSizeHalfN), dim3(blockSize), 0, 0, d_A, d_uHistory, d_b, d_smallError, time, time, lastTime, 1, k);
(*iterations)++;
if (k >= D) {
char smallError;
hipMemcpy(&smallError, d_smallError + ((k + 1) % D), 1, hipMemcpyDeviceToHost);
if (smallError) break;
}
}
// Fetch result
hipLaunchKernelGGL(( fetchU), dim3(gridSizeN), dim3(blockSize), 0, 0, d_uHistory, d_u, *iterations);
}
void solveBr(double* d_L, double* d_U, double* d_r, double* d_p, double* d_tmp, double* d_uHistory, char* d_smallError, int blockSize) {
int it;
solveGSV(d_L, d_tmp, d_r, d_uHistory, d_smallError, &it, blockSize);
solveGSV(d_U, d_p, d_tmp, d_uHistory, d_smallError, &it, blockSize);
}
void solve(double *x, int *iterations, int blockSize) {
*iterations = 0;
// Allocate memory
double *d_uHistory;
hipMalloc((void**) &d_uHistory, D * N * sizeof(double));
hipMemset(d_uHistory, 0, D * N * sizeof(double));
char *d_smallError;
hipMalloc((void**) &d_smallError, D);
hipMemset(d_smallError, 0, D);
double* d_U;
hipMalloc((void**) &d_U, 5 * N * sizeof(double));
double* d_L;
hipMalloc((void**) &d_L, 5 * N * sizeof(double));
int it;
decompose(d_U, d_L, d_smallError, &it, blockSize);
printf("%d iterations for ILU decomposition\n", it);
double* d_r;
hipMalloc((void**) &d_r, N * sizeof(double));
double* d_p;
hipMalloc((void**) &d_p, N * sizeof(double));
double* d_tmp0;
hipMalloc((void**) &d_tmp0, N * sizeof(double));
double* d_tmp1;
hipMalloc((void**) &d_tmp1, N * sizeof(double));
double delta = 0, deltaHat;
double* Br = (double*) malloc(N * sizeof(double));
double* p = (double*) malloc(N * sizeof(double));
double* r = (double*) malloc(N * sizeof(double));
double* base = (double*) malloc(N * sizeof(double));
int i, ix;
#pragma omp parallel for private(i)
for (i = 0; i < N; i++) {
x[i] = 1;
int x = i % D;
int y = i / D;
double f = func(H * x + H, H * y + H);
base[i] = H * H * f;
}
//initR0<<<gridSizeN,blockSize>>>(d_r, d_base, d_x);
#pragma omp parallel for private(i,ix)
for (i = 0; i < N; i++) {
int ix = i % D;
double residuum = base[i];
if (ix - 1 >= 0) residuum += x[i - 1];
if (ix + 1 < D) residuum += x[i + 1];
if (i - D >= 0) residuum += x[i - D];
if (i + D < N) residuum += x[i + D];
residuum -= 4 * x[i];
r[i] = residuum;
}
hipMemcpy(d_r, r, N * sizeof(double), hipMemcpyHostToDevice);
solveBr(d_L, d_U, d_r, d_p, d_tmp0, d_uHistory, d_smallError, blockSize);
hipMemcpy(p, d_p, N * sizeof(double), hipMemcpyDeviceToHost);
//hipMemset(d_delta, 0, 2 * sizeof(double));
//scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_p, d_delta);
delta = 0;
#pragma omp parallel for private(i) reduction(+:delta)
for (i = 0; i < N; i++) {
delta += r[i] * p[i];
}
while (delta >= EPSILON * EPSILON) {
/*calculateAx<<<gridSizeN,blockSize>>>(d_p, d_tmp0);
scalarProduct<<<gridSizeN,blockSize>>>(d_p, d_tmp0, d_delta + 1);
hipMemcpy(&deltaHat, d_delta + 1, sizeof(double), hipMemcpyDeviceToHost);
deltaHat = delta / deltaHat;*/
deltaHat = 0;
#pragma omp parallel for private(i, ix) reduction(+:deltaHat)
for (i = 0; i < N; i++) {
ix = i % D;
double v = 0;
if (ix - 1 >= 0) v -= p[i - 1];
if (ix + 1 < D) v -= p[i + 1];
if (i - D >= 0) v -= p[i - D];
if (i + D < N) v -= p[i + D];
v += 4 * p[i];
deltaHat += p[i] * v;
}
deltaHat = delta / deltaHat;
/*addfv<<<gridSizeN,blockSize>>>(d_x, d_x, deltaHat, d_p);
addfv<<<gridSizeN,blockSize>>>(d_r, d_r, -deltaHat, d_tmp0);*/
#pragma omp parallel for private(i, ix)
for (i = 0; i < N; i++) {
ix = i % D;
double v = 0;
if (ix - 1 >= 0) v -= p[i - 1];
if (ix + 1 < D) v -= p[i + 1];
if (i - D >= 0) v -= p[i - D];
if (i + D < N) v -= p[i + D];
v += 4 * p[i];
x[i] += deltaHat * p[i];
r[i] -= deltaHat * v;
}
hipMemcpy(d_r, r, N * sizeof(double), hipMemcpyHostToDevice);
//hipMemset(d_delta, 0, 2 * sizeof(double));
solveBr(d_L, d_U, d_r, d_tmp1, d_tmp0, d_uHistory, d_smallError, blockSize);
//scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_tmp1, d_delta);
//hipMemcpy(&newDelta, d_delta, sizeof(double), hipMemcpyDeviceToHost);*/
hipMemcpy(Br, d_tmp1, N * sizeof(double), hipMemcpyDeviceToHost);
double newDelta = 0;
#pragma omp parallel for private(i) reduction(+:newDelta)
for (i = 0; i < N; i++) {
newDelta += r[i] * Br[i];
}
//addfv<<<gridSizeN,blockSize>>>(d_p, d_tmp1, newDelta / delta, d_p);
delta = newDelta / delta;
#pragma omp parallel for private(i)
for (i = 0; i < N; i++) {
p[i] = Br[i] + delta * p[i];
}
delta = newDelta;
(*iterations)++;
}
// Release memory
hipFree(d_uHistory);
hipFree(d_smallError);
hipFree(d_U);
hipFree(d_L);
hipFree(d_r);
hipFree(d_p);
hipFree(d_tmp0);
hipFree(d_tmp1);
free(Br);
free(p);
free(r);
free(base);
}
double analyticU(double x, double y) {
return sin(2 * M_PI * x) * sin(2 * M_PI * y);
}
int main(void) {
int i, j;
double u[N];
hipSetDevice(CUDA_DEVICE);
int device;
hipGetDevice(&device);
struct hipDeviceProp_t prop;
hipGetDeviceProperties(& prop, device);
int blockSize = prop.warpSize;
printf("Run on %s (device %d) with blocksize %d\n",
prop.name, device, blockSize);
printf("l = %d\nd = %d\nn = %d\n\n", L, D, N);
int it;
solve(u, &it, blockSize);
if (SHOW_RESULTS) {
printf("\nResult:\n");
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
printf("%8.4f", u[j + D * i]);
}
printf("\n");
}
printf("\nAnalytic:\n");
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
printf("%8.4f", analyticU(j * H + H, i * H + H));
}
printf("\n");
}
printf("\n");
}
double maxError = 0.0;
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
double error = fabs(analyticU(j * H + H, i * H + H) - u[j + D * i]);
maxError = fmax(error, maxError);
}
}
printf("Max error: %4.8f\n", maxError);
printf("Iterations: %d\n", it);
return 0;
}
| 05896e7f477d01de75c6db9e6c0f4f5822cb854b.cu | /*
* Just run sh compileRun.sh
* Use config.h in order to adjust problem size
*/
#include <cuda.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "config.h"
void printSparseMatrix(double* A) {
for (int i = 0; i < N; i++) {
int x = i % D;
int y = i / D;
for (int j = 0; j < N; j++) {
double value = 0;
if (j == i) {
value = A[5 * i];
} else if (j == i - 1 && x > 0) {
value = A[5 * i + 1];
} else if (j == i + 1 && x < D - 1) {
value = A[5 * i + 2];
} else if (j == i - D && y > 0) {
value = A[5 * i + 3];
} else if (j == i + D && y < D - 1) {
value = A[5 * i + 4];
}
printf("%10.6f", value);
}
printf("\n");
}
}
__global__
void iterateILU(double* srcU, double* dstU, char* smallError) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = index % 3;
int i = index / 3;
int x = i % D;
int y = i / D;
if (i < N && (offset == 0 || x < D - 1 && offset == 1 || y < D - 1 && offset == 2)) {
double value = 0;
if (offset == 0) {
if (x > 0) value += srcU[5 * (i - 1) + 2] * srcU[5 * (i - 1) + 2];
if (y > 0) value += srcU[5 * (i - D) + 4] * srcU[5 * (i - D) + 4];
value = sqrt(4 - value);
} else {
value = -1 / srcU[5 * i];
}
dstU[5 * i + 2 * offset] = value;
if (fabs(value - srcU[5 * i + 2 * offset]) >= EPSILON_ILU) {
*smallError = 0;
}
}
}
__global__
void initMatrix(double* A) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 5 * N) {
A[i] = (i % 5 == 0);
}
}
__global__
void transpose(double* A, double* B) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 5 * N) {
int offset = i % 5;
if (offset == 0) {
A[i] = B[i];
} else {
int j = i / 5 - D * (offset == 3) + D * (offset == 4)
- (offset == 1) + (offset == 2);
A[i] = B[5 * j + 2 - (offset - 1) % 2 + 2 * ((offset - 1) / 2)];
}
}
}
double func(double x, double y) {
return 8 * M_PI * M_PI * sin(2 * M_PI * x) * sin(2 * M_PI * y);
}
__global__
void calculateGSV(double* A, double *uHistory, double *base, char *smallError, int sourceTime, int time, int lastTime, int offset, int k) {
int i = 2 * (blockIdx.x * blockDim.x + threadIdx.x) + offset;
if (i < N) {
int x = i % D;
int y = i / D;
int diagIdx = (x + y) / 2;
if (diagIdx < k) {
double sum = base[i];
if (y > 0) sum -= A[5 * i + 3] * uHistory[i - D + sourceTime];
if (y < D - 1) sum -= A[5 * i + 4] * uHistory[i + D + sourceTime];
if (x > 0) sum -= A[5 * i + 1] * uHistory[i - 1 + sourceTime];
if (x < D - 1) sum -= A[5 * i + 2] * uHistory[i + 1 + sourceTime];
sum /= A[5 * i];
if (fabsf(sum - uHistory[i + lastTime]) >= EPSILON_GSV) {
smallError[(k - diagIdx + D) % D] = 0;
}
uHistory[i + time] = sum;
}
}
}
__global__
void fetchU(double *uHistory, double *u, int k) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int x = i % D;
int y = i / D;
int diagIdx = (x + y) / 2;
u[i] = uHistory[i + ((k + 1 + diagIdx) % D) * N];
}
}
void decompose(double* d_U_, double* d_L, char* d_smallError, int* iterations, int blockSize) {
*iterations = 0;
int gridSize3N = (3 * N + blockSize - 1) / blockSize;
int gridSize5N = (5 * N + blockSize - 1) / blockSize;
double* d_U[2];
d_U[0] = d_U_;
cudaMalloc((void**) &d_U[1], 5 * N * sizeof(double));
cudaMemset(d_smallError, 0, 1);
// Initialize matrices with identity
initMatrix<<<gridSize5N, blockSize>>>(d_U[0]);
initMatrix<<<gridSize5N, blockSize>>>(d_U[1]);
for (int m = 0;; m++) {
cudaMemset(d_smallError, 1, 1);
iterateILU<<<gridSize3N,blockSize>>>(d_U[m % 2 == 0], d_U[m % 2], d_smallError);
(*iterations)++;
char smallError;
cudaMemcpy(&smallError, d_smallError, 1, cudaMemcpyDeviceToHost);
if (smallError && *iterations % 2 == 0) break;
}
transpose<<<gridSize5N, blockSize>>>(d_L, d_U[(*iterations) % 2 == 0]);
cudaFree(d_U[1]);
}
void solveGSV(double* d_A, double* d_u, double* d_b, double* d_uHistory, char* d_smallError, int *iterations, int blockSize) {
*iterations = 0;
int halfN = (N + 1) / 2;
int gridSizeN = (N + blockSize - 1) / blockSize;
int gridSizeHalfN = (halfN + blockSize - 1) / blockSize;
cudaMemset(d_smallError, 0, D);
// Calculate u
for (int k = 1; ; k++) {
int time = (k % D) * N;
int lastTime = ((k - 1 + D) % D) * N;
cudaMemset(d_smallError + (k % D), 1, 1);
// Black fields
calculateGSV<<<gridSizeHalfN, blockSize>>>(d_A, d_uHistory, d_b, d_smallError, lastTime, time, lastTime, 0, k);
// White fields
calculateGSV<<<gridSizeHalfN, blockSize>>>(d_A, d_uHistory, d_b, d_smallError, time, time, lastTime, 1, k);
(*iterations)++;
if (k >= D) {
char smallError;
cudaMemcpy(&smallError, d_smallError + ((k + 1) % D), 1, cudaMemcpyDeviceToHost);
if (smallError) break;
}
}
// Fetch result
fetchU<<<gridSizeN, blockSize>>>(d_uHistory, d_u, *iterations);
}
void solveBr(double* d_L, double* d_U, double* d_r, double* d_p, double* d_tmp, double* d_uHistory, char* d_smallError, int blockSize) {
int it;
solveGSV(d_L, d_tmp, d_r, d_uHistory, d_smallError, &it, blockSize);
solveGSV(d_U, d_p, d_tmp, d_uHistory, d_smallError, &it, blockSize);
}
void solve(double *x, int *iterations, int blockSize) {
*iterations = 0;
// Allocate memory
double *d_uHistory;
cudaMalloc((void**) &d_uHistory, D * N * sizeof(double));
cudaMemset(d_uHistory, 0, D * N * sizeof(double));
char *d_smallError;
cudaMalloc((void**) &d_smallError, D);
cudaMemset(d_smallError, 0, D);
double* d_U;
cudaMalloc((void**) &d_U, 5 * N * sizeof(double));
double* d_L;
cudaMalloc((void**) &d_L, 5 * N * sizeof(double));
int it;
decompose(d_U, d_L, d_smallError, &it, blockSize);
printf("%d iterations for ILU decomposition\n", it);
double* d_r;
cudaMalloc((void**) &d_r, N * sizeof(double));
double* d_p;
cudaMalloc((void**) &d_p, N * sizeof(double));
double* d_tmp0;
cudaMalloc((void**) &d_tmp0, N * sizeof(double));
double* d_tmp1;
cudaMalloc((void**) &d_tmp1, N * sizeof(double));
double delta = 0, deltaHat;
double* Br = (double*) malloc(N * sizeof(double));
double* p = (double*) malloc(N * sizeof(double));
double* r = (double*) malloc(N * sizeof(double));
double* base = (double*) malloc(N * sizeof(double));
int i, ix;
#pragma omp parallel for private(i)
for (i = 0; i < N; i++) {
x[i] = 1;
int x = i % D;
int y = i / D;
double f = func(H * x + H, H * y + H);
base[i] = H * H * f;
}
//initR0<<<gridSizeN,blockSize>>>(d_r, d_base, d_x);
#pragma omp parallel for private(i,ix)
for (i = 0; i < N; i++) {
int ix = i % D;
double residuum = base[i];
if (ix - 1 >= 0) residuum += x[i - 1];
if (ix + 1 < D) residuum += x[i + 1];
if (i - D >= 0) residuum += x[i - D];
if (i + D < N) residuum += x[i + D];
residuum -= 4 * x[i];
r[i] = residuum;
}
cudaMemcpy(d_r, r, N * sizeof(double), cudaMemcpyHostToDevice);
solveBr(d_L, d_U, d_r, d_p, d_tmp0, d_uHistory, d_smallError, blockSize);
cudaMemcpy(p, d_p, N * sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemset(d_delta, 0, 2 * sizeof(double));
//scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_p, d_delta);
delta = 0;
#pragma omp parallel for private(i) reduction(+:delta)
for (i = 0; i < N; i++) {
delta += r[i] * p[i];
}
while (delta >= EPSILON * EPSILON) {
/*calculateAx<<<gridSizeN,blockSize>>>(d_p, d_tmp0);
scalarProduct<<<gridSizeN,blockSize>>>(d_p, d_tmp0, d_delta + 1);
cudaMemcpy(&deltaHat, d_delta + 1, sizeof(double), cudaMemcpyDeviceToHost);
deltaHat = delta / deltaHat;*/
deltaHat = 0;
#pragma omp parallel for private(i, ix) reduction(+:deltaHat)
for (i = 0; i < N; i++) {
ix = i % D;
double v = 0;
if (ix - 1 >= 0) v -= p[i - 1];
if (ix + 1 < D) v -= p[i + 1];
if (i - D >= 0) v -= p[i - D];
if (i + D < N) v -= p[i + D];
v += 4 * p[i];
deltaHat += p[i] * v;
}
deltaHat = delta / deltaHat;
/*addfv<<<gridSizeN,blockSize>>>(d_x, d_x, deltaHat, d_p);
addfv<<<gridSizeN,blockSize>>>(d_r, d_r, -deltaHat, d_tmp0);*/
#pragma omp parallel for private(i, ix)
for (i = 0; i < N; i++) {
ix = i % D;
double v = 0;
if (ix - 1 >= 0) v -= p[i - 1];
if (ix + 1 < D) v -= p[i + 1];
if (i - D >= 0) v -= p[i - D];
if (i + D < N) v -= p[i + D];
v += 4 * p[i];
x[i] += deltaHat * p[i];
r[i] -= deltaHat * v;
}
cudaMemcpy(d_r, r, N * sizeof(double), cudaMemcpyHostToDevice);
//cudaMemset(d_delta, 0, 2 * sizeof(double));
solveBr(d_L, d_U, d_r, d_tmp1, d_tmp0, d_uHistory, d_smallError, blockSize);
//scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_tmp1, d_delta);
//cudaMemcpy(&newDelta, d_delta, sizeof(double), cudaMemcpyDeviceToHost);*/
cudaMemcpy(Br, d_tmp1, N * sizeof(double), cudaMemcpyDeviceToHost);
double newDelta = 0;
#pragma omp parallel for private(i) reduction(+:newDelta)
for (i = 0; i < N; i++) {
newDelta += r[i] * Br[i];
}
//addfv<<<gridSizeN,blockSize>>>(d_p, d_tmp1, newDelta / delta, d_p);
delta = newDelta / delta;
#pragma omp parallel for private(i)
for (i = 0; i < N; i++) {
p[i] = Br[i] + delta * p[i];
}
delta = newDelta;
(*iterations)++;
}
// Release memory
cudaFree(d_uHistory);
cudaFree(d_smallError);
cudaFree(d_U);
cudaFree(d_L);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_tmp0);
cudaFree(d_tmp1);
free(Br);
free(p);
free(r);
free(base);
}
double analyticU(double x, double y) {
return sin(2 * M_PI * x) * sin(2 * M_PI * y);
}
int main(void) {
int i, j;
double u[N];
cudaSetDevice(CUDA_DEVICE);
int device;
cudaGetDevice(&device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(& prop, device);
int blockSize = prop.warpSize;
printf("Run on %s (device %d) with blocksize %d\n",
prop.name, device, blockSize);
printf("l = %d\nd = %d\nn = %d\n\n", L, D, N);
int it;
solve(u, &it, blockSize);
if (SHOW_RESULTS) {
printf("\nResult:\n");
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
printf("%8.4f", u[j + D * i]);
}
printf("\n");
}
printf("\nAnalytic:\n");
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
printf("%8.4f", analyticU(j * H + H, i * H + H));
}
printf("\n");
}
printf("\n");
}
double maxError = 0.0;
for (i = 0; i < D; i++) {
for (j = 0; j < D; j++) {
double error = fabs(analyticU(j * H + H, i * H + H) - u[j + D * i]);
maxError = fmax(error, maxError);
}
}
printf("Max error: %4.8f\n", maxError);
printf("Iterations: %d\n", it);
return 0;
}
|
a69ba9648b0e77c46332d81feca5962c0a543f5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void filter_kernel(const float* box_preds, const float* cls_preds, const float* dir_preds, const int* anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* filtered_box, float* filtered_score, int* filtered_dir, float* box_for_nms, int* filter_count, const float FLOAT_MIN, const float FLOAT_MAX, const float score_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE)
{
// boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//sigmoid funciton
float score = 1/(1+expf(-cls_preds[tid]));
if(anchor_mask[tid] == 1 && score > score_threshold)
{
int counter = atomicAdd(filter_count, 1);
float za = dev_anchors_pz[tid] + dev_anchors_dz[tid]/2;
//decode network output
float diagonal = sqrtf(dev_anchors_dx[tid]*dev_anchors_dx[tid] + dev_anchors_dy[tid]*dev_anchors_dy[tid]);
float box_px = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 0] * diagonal + dev_anchors_px[tid];
float box_py = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 1] * diagonal + dev_anchors_py[tid];
float box_pz = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 2] * dev_anchors_dz[tid] + za;
float box_dx = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 3]) * dev_anchors_dx[tid];
float box_dy = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 4]) * dev_anchors_dy[tid];
float box_dz = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 5]) * dev_anchors_dz[tid];
float box_ro = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 6] + dev_anchors_ro[tid];
box_pz = box_pz - box_dz/2;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 0] = box_px;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 1] = box_py;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 2] = box_pz;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 3] = box_dx;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 4] = box_dy;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 5] = box_dz;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 6] = box_ro;
filtered_score[counter] = score;
int direction_label;
if(dir_preds[tid*2 + 0] < dir_preds[tid*2 + 1])
{
direction_label = 1;
}
else
{
direction_label = 0;
}
filtered_dir[counter] = direction_label;
//convrt normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, xmax, ymax) for nms calculation
//First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1)
float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5*box_dx), float(-0.5*box_dy),
float(-0.5*box_dx), float( 0.5*box_dy),
float( 0.5*box_dx), float( 0.5*box_dy),
float( 0.5*box_dx), float(-0.5*box_dy)};
//Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax)
float rotated_corners[NUM_3D_BOX_CORNERS_MACRO];
float offset_corners[NUM_3D_BOX_CORNERS_MACRO];
float sin_yaw = sinf(box_ro);
float cos_yaw = cosf(box_ro);
float xmin = FLOAT_MAX;
float ymin = FLOAT_MAX;
float xmax = FLOAT_MIN;
float ymax = FLOAT_MIN;
for(size_t i = 0; i < NUM_BOX_CORNERS; i++)
{
rotated_corners[i*2 + 0] = cos_yaw*corners[i*2 + 0] - sin_yaw*corners[i*2 + 1];
rotated_corners[i*2 + 1] = sin_yaw*corners[i*2 + 0] + cos_yaw*corners[i*2 + 1];
offset_corners[i*2 + 0] = rotated_corners[i*2 + 0] + box_px;
offset_corners[i*2 + 1] = rotated_corners[i*2 + 1] + box_py;
xmin = fminf(xmin, offset_corners[i*2 + 0]);
ymin = fminf(ymin, offset_corners[i*2 + 1]);
xmax = fmaxf(xmin, offset_corners[i*2 + 0]);
ymax = fmaxf(ymax, offset_corners[i*2 + 1]);
}
// box_for_nms(num_box, 4)
box_for_nms[counter*NUM_BOX_CORNERS + 0] = xmin;
box_for_nms[counter*NUM_BOX_CORNERS + 1] = ymin;
box_for_nms[counter*NUM_BOX_CORNERS + 2] = xmax;
box_for_nms[counter*NUM_BOX_CORNERS + 3] = ymax;
}
} | a69ba9648b0e77c46332d81feca5962c0a543f5c.cu | #include "includes.h"
__global__ void filter_kernel(const float* box_preds, const float* cls_preds, const float* dir_preds, const int* anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* filtered_box, float* filtered_score, int* filtered_dir, float* box_for_nms, int* filter_count, const float FLOAT_MIN, const float FLOAT_MAX, const float score_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE)
{
// boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//sigmoid funciton
float score = 1/(1+expf(-cls_preds[tid]));
if(anchor_mask[tid] == 1 && score > score_threshold)
{
int counter = atomicAdd(filter_count, 1);
float za = dev_anchors_pz[tid] + dev_anchors_dz[tid]/2;
//decode network output
float diagonal = sqrtf(dev_anchors_dx[tid]*dev_anchors_dx[tid] + dev_anchors_dy[tid]*dev_anchors_dy[tid]);
float box_px = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 0] * diagonal + dev_anchors_px[tid];
float box_py = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 1] * diagonal + dev_anchors_py[tid];
float box_pz = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 2] * dev_anchors_dz[tid] + za;
float box_dx = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 3]) * dev_anchors_dx[tid];
float box_dy = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 4]) * dev_anchors_dy[tid];
float box_dz = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 5]) * dev_anchors_dz[tid];
float box_ro = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 6] + dev_anchors_ro[tid];
box_pz = box_pz - box_dz/2;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 0] = box_px;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 1] = box_py;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 2] = box_pz;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 3] = box_dx;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 4] = box_dy;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 5] = box_dz;
filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 6] = box_ro;
filtered_score[counter] = score;
int direction_label;
if(dir_preds[tid*2 + 0] < dir_preds[tid*2 + 1])
{
direction_label = 1;
}
else
{
direction_label = 0;
}
filtered_dir[counter] = direction_label;
//convrt normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, xmax, ymax) for nms calculation
//First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1)
float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5*box_dx), float(-0.5*box_dy),
float(-0.5*box_dx), float( 0.5*box_dy),
float( 0.5*box_dx), float( 0.5*box_dy),
float( 0.5*box_dx), float(-0.5*box_dy)};
//Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax)
float rotated_corners[NUM_3D_BOX_CORNERS_MACRO];
float offset_corners[NUM_3D_BOX_CORNERS_MACRO];
float sin_yaw = sinf(box_ro);
float cos_yaw = cosf(box_ro);
float xmin = FLOAT_MAX;
float ymin = FLOAT_MAX;
float xmax = FLOAT_MIN;
float ymax = FLOAT_MIN;
for(size_t i = 0; i < NUM_BOX_CORNERS; i++)
{
rotated_corners[i*2 + 0] = cos_yaw*corners[i*2 + 0] - sin_yaw*corners[i*2 + 1];
rotated_corners[i*2 + 1] = sin_yaw*corners[i*2 + 0] + cos_yaw*corners[i*2 + 1];
offset_corners[i*2 + 0] = rotated_corners[i*2 + 0] + box_px;
offset_corners[i*2 + 1] = rotated_corners[i*2 + 1] + box_py;
xmin = fminf(xmin, offset_corners[i*2 + 0]);
ymin = fminf(ymin, offset_corners[i*2 + 1]);
xmax = fmaxf(xmin, offset_corners[i*2 + 0]);
ymax = fmaxf(ymax, offset_corners[i*2 + 1]);
}
// box_for_nms(num_box, 4)
box_for_nms[counter*NUM_BOX_CORNERS + 0] = xmin;
box_for_nms[counter*NUM_BOX_CORNERS + 1] = ymin;
box_for_nms[counter*NUM_BOX_CORNERS + 2] = xmax;
box_for_nms[counter*NUM_BOX_CORNERS + 3] = ymax;
}
} |
f5daecb270296298dff9bb31b100d205e53b2eb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include <sys/time.h>
int rowSize;
__global__ void printGpu(float *d_a, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d_a[i * size + j]);
printf("\n");
}
}
__global__ void Cloop_FW(float *d_a, int rowSize)
{
__shared__ int intermed;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= rowSize)
return;
for (int k = 0; k < rowSize; k++)
{
for (int j = 0; j < rowSize; j++)
{
if (threadIdx.x == 0) {
intermed = d_a[k * rowSize + j];
}
__syncthreads();
d_a[col * rowSize + j ] = fmin( d_a[col * rowSize + j ], d_a[col * rowSize + k] + intermed);
}
}
}
void print_matrix(float *d, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d[i * size + j]);
puts("");
}
}
int main(int argc, char** argv)
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j;
hipError_t err = hipSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = hipMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for hipMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", hipGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
//puts("input matrix :");
//print_matrix(a,rowSize);
err = hipMemcpy(d_a, a, totalSize, hipMemcpyHostToDevice);
if (err != 0) {
printf("after h2d %s-%d", hipGetErrorString(err), 3);
return 1;
}
int threadsPerBlock;
int noOfBlocks;
if (rowSize < 1024)
{
threadsPerBlock = rowSize;
}
else
{
threadsPerBlock = 1024;
}
noOfBlocks = rowSize / threadsPerBlock;
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
/*for(k=0;k<rowSize;k++)
{
for(j = 0; j < colSize; j++)
{
Cloop_FW<<<noOfBlocks,threadsPerBlock>>>(d_a,j, k, rowSize);
hipDeviceSynchronize();
}
}*/
hipLaunchKernelGGL(( Cloop_FW) , dim3(noOfBlocks), dim3(threadsPerBlock), 0, 0, d_a, rowSize);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
printf("error = %s\n", hipGetErrorString(hipGetLastError()));
err = hipMemcpy(a, d_a, totalSize, hipMemcpyDeviceToHost);
if (err != 0) {
printf("final %s-%d", hipGetErrorString(err), 3);
return 1;
}
//puts("output matrix :");
print_matrix(a, rowSize);
free(a);
hipFree(d_a);
return 0;
}
| f5daecb270296298dff9bb31b100d205e53b2eb8.cu | #include<stdio.h>
#include<math.h>
#include <sys/time.h>
int rowSize;
__global__ void printGpu(float *d_a, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d_a[i * size + j]);
printf("\n");
}
}
__global__ void Cloop_FW(float *d_a, int rowSize)
{
__shared__ int intermed;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= rowSize)
return;
for (int k = 0; k < rowSize; k++)
{
for (int j = 0; j < rowSize; j++)
{
if (threadIdx.x == 0) {
intermed = d_a[k * rowSize + j];
}
__syncthreads();
d_a[col * rowSize + j ] = fmin( d_a[col * rowSize + j ], d_a[col * rowSize + k] + intermed);
}
}
}
void print_matrix(float *d, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d[i * size + j]);
puts("");
}
}
int main(int argc, char** argv)
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j;
cudaError_t err = cudaSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = cudaMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for cudaMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", cudaGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
//puts("input matrix :");
//print_matrix(a,rowSize);
err = cudaMemcpy(d_a, a, totalSize, cudaMemcpyHostToDevice);
if (err != 0) {
printf("after h2d %s-%d", cudaGetErrorString(err), 3);
return 1;
}
int threadsPerBlock;
int noOfBlocks;
if (rowSize < 1024)
{
threadsPerBlock = rowSize;
}
else
{
threadsPerBlock = 1024;
}
noOfBlocks = rowSize / threadsPerBlock;
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
/*for(k=0;k<rowSize;k++)
{
for(j = 0; j < colSize; j++)
{
Cloop_FW<<<noOfBlocks,threadsPerBlock>>>(d_a,j, k, rowSize);
cudaThreadSynchronize();
}
}*/
Cloop_FW <<< noOfBlocks, threadsPerBlock>>>(d_a, rowSize);
cudaThreadSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
printf("error = %s\n", cudaGetErrorString(cudaGetLastError()));
err = cudaMemcpy(a, d_a, totalSize, cudaMemcpyDeviceToHost);
if (err != 0) {
printf("final %s-%d", cudaGetErrorString(err), 3);
return 1;
}
//puts("output matrix :");
print_matrix(a, rowSize);
free(a);
cudaFree(d_a);
return 0;
}
|
deb0d72b09bb46597b5184d8352dbac494aa9a21.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <unistd.h>
#include <string>
#include <sys/time.h>
#include <float.h>
// helper functions
#include "helper_string.h"
#include "helper_cuda.h"
#define DEFAULT_INPUT_SIZE 8192
#define BLOCK_SIZE 32
int k=0;
int size;
float *A, *B, *GOLD;
bool host_check = false;
bool generator_debug = false;
char *gold_matrix_path, *a_matrix_path, *b_matrix_path;
void usage() {
printf("Usage: generateMatricesSingle -size=N [-generator_debug] [-host_check] [-input_a=<path>] [-input_b=<path>] [-gold=<path>]\n");
}
void generateInputMatrices()
{
float *h_A, *h_B;
FILE *f_A, *f_B;
h_A = (float*)malloc(sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE);
h_B = (float*)malloc(sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE);
printf("Max value: %f Min: %f\n", (-4.06e16-4.0004e16)+4.1e16, 4.1e16);
srand(time(NULL));
if (!generator_debug) {
for (int i=0; i<DEFAULT_INPUT_SIZE; i++) {
for (int j=0; j<DEFAULT_INPUT_SIZE; j++) {
h_A[i * DEFAULT_INPUT_SIZE + j] = (rand()/((float)(RAND_MAX)+1)*(-4.06e16-4.4e16))+4.1e16;
h_B[i * DEFAULT_INPUT_SIZE + j] = (rand()/((float)(RAND_MAX)+1)*(-4.06e16-4.4e16))+4.1e16;
}
}
} else {
for (int i=0; i<DEFAULT_INPUT_SIZE; i++) {
for (int j=0; j<DEFAULT_INPUT_SIZE; j++) {
h_A[i * DEFAULT_INPUT_SIZE + j] = float(2.0);
h_B[i * DEFAULT_INPUT_SIZE + j] = float(2.0);
}
}
}
int numZeros;
int numNans;
int numInfs;
// printf("Write\n");
f_A = fopen(a_matrix_path, "wb");
f_B = fopen(b_matrix_path, "wb");
float val;
numZeros = 0;
numNans = 0;
numInfs = 0;
for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) {
val=h_A[i];
if (val == 0) numZeros++;
if (isnan(val)) numNans++;
if (isinf(val)) numInfs++;
}
printf("Number of zeros/NaNs/INFs on matrix A: %d/%d/%d\n", numZeros, numNans, numInfs);
numZeros = 0;
numNans = 0;
numInfs = 0;
for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) {
val=h_B[i];
if (val == 0) numZeros++;
if (isnan(val)) numNans++;
if (isinf(val)) numInfs++;
}
printf("Number of zeros/NaNs/INFs on matrix B: %d/%d/%d\n", numZeros, numNans, numInfs);
for(int i=0; i<DEFAULT_INPUT_SIZE; i++)
{
fwrite(&(h_A[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_A);
}
printf("Element 32 of matrix A: %f\n", (float)h_A[32]);
printf("Element 50 of matrix B: %f\n", (float)h_B[50]);
for(int i=0; i<DEFAULT_INPUT_SIZE; i++)
{
fwrite(&(h_B[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_B);
}
printf("Done\n");
fclose(f_A);
fclose(f_B);
free(h_A);
free(h_B);
return;
}
void ReadMatrixFromFile(){
int i;
FILE *f_A, *f_B;
f_A = fopen(a_matrix_path,"rb");
f_B = fopen(b_matrix_path,"rb");
if (!(f_A&&f_B))
{
printf("Error opening matrices A, B.\n");
printf("exit on line: %d", __LINE__); exit(-1);
}
size_t ret_value[2];
for(i=0; i<k; i++)
{
ret_value[0] = fread (&A[ k * i ], sizeof(float)*k, 1, f_A);
ret_value[1] = fread (&B[ k * i ], sizeof(float)*k, 1, f_B);
if (ret_value[0] != 1 || ret_value[1] != 1) {
printf("Bad input/gold formatting: %lu ; %lu .\n", ret_value[0], ret_value[1]);
}
}
printf("Done reading matrices\n");
fclose(f_A);
fclose(f_B);
}
void GetDevice(){
hipDeviceProp_t prop;
hipError_t teste;
int count=0;
teste = hipGetDeviceCount(&count);
printf("Get Device Test: %s\n", hipGetErrorString(teste));
for (int i=0; i< count; i++) {
hipGetDeviceProperties( &prop, i );
printf( "Name: %s\n", prop.name );
}
int *ndevice; int dev = 0;
ndevice = &dev;
hipGetDevice(ndevice);
hipSetDevice(0);
hipGetDeviceProperties( &prop, 0 );
printf("\ndevice: %d %s\n", *ndevice, prop.name);
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
float* openmpMul(float* a, float* b, size_t size) {
double time = mysecond();
float* bT = (float*) malloc(sizeof(float)*size*size);
float* c = (float*) calloc(size*size, sizeof(float));
if (c == NULL || bT == NULL) {
printf("could not alloc hostGold matrix.");
return NULL;
}
#pragma omp parallel for
for (int i=0;i<size;i++)
for (int j=0;j<size;j++)
bT[j*size+i] = b[i*size+j];
#pragma omp parallel for
for (int i=0;i<size;i++) {
for (int j=0;j<size;j++) {
for (int k=0;k<size;k++) {
c[j*size+i] += a[j*size+k] * bT[i*size+k];
}
}
}
printf("host mmul time: %.2f seconds\n", mysecond()-time);
return c;
}
__global__ void MatrixMulKernel (float *d_A, float *d_B, float *d_C, int n)
{
int tx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int ty = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int k;
for (k = 0; k < n; k++)
d_C[ty*n + tx] += d_A[ty*n + k]*d_B[k*n + tx];
}
void generateGoldMatrixHalf()
{
//================== Set block and grid size for MxM kernel
int gridsize = k/BLOCK_SIZE < 1 ? 1 : k/BLOCK_SIZE;
int blocksize = k/BLOCK_SIZE < 1 ? k : BLOCK_SIZE;
dim3 dimBlock(blocksize,blocksize);
dim3 dimGrid(gridsize,gridsize);
//====================================
////////////////////////////////////////////////////
//////////DEVICE VARS///////////////////////////////
float *d_A;
float *d_B;
float *d_C;
////////////////////////////////////////////////////
A = ( float* ) malloc( size * sizeof( float ) );
B = ( float* ) malloc( size * sizeof( float ) );
GOLD = ( float* ) malloc( size * sizeof( float ) );
ReadMatrixFromFile();
if (k <= 16) {
printf("\nMatrix A: \n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)A[i]);
if ((i+1)%k == 0) printf("\n");
}
printf("\nMatrix B: \n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)B[i]);
if ((i+1)%k == 0) printf("\n");
}
}
checkCudaErrors( hipMalloc( ( void** ) &d_A, size * sizeof( float ) ));
checkCudaErrors( hipMalloc( ( void** ) &d_B, size * sizeof( float ) ));
checkCudaErrors( hipMalloc( ( void** ) &d_C, size * sizeof( float ) ));
checkCudaErrors( hipMemset( d_C, 0, size * sizeof( float )) ); // ZERA C
checkCudaErrors( hipMemcpy( d_A, A, size * sizeof( float ), hipMemcpyHostToDevice ) ); // PUSH A
checkCudaErrors( hipMemcpy( d_B, B, size * sizeof( float ), hipMemcpyHostToDevice ) ); // PUSH B
printf("cudaMxM... k=%d\n", k);
double time = mysecond();
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, k);
checkCudaErrors( hipPeekAtLastError() );
checkCudaErrors( hipDeviceSynchronize() );
time=mysecond()-time;
/////////// PERF
double flops = 2.0*(double)k*k*k;
double gflops = flops / time;
double outputpersec = (double)k*k/time;
printf("kernel time: %lf\n",time);
printf("SIZE:%d OUTPUT/S:%f FLOPS:%f (GFLOPS:%.2f)\n",k, outputpersec, gflops, gflops/1000000000);
///////////
checkCudaErrors( hipMemcpy(GOLD, d_C, size * sizeof( float ), hipMemcpyDeviceToHost) );
hipFree( d_A );
hipFree( d_B );
hipFree( d_C );
printf("Analysing output on host...\n");
int i, j;
FILE *f_GOLD;
f_GOLD = fopen(gold_matrix_path, "wb");
float val;
int numZeros = 0;
int numNans = 0;
int numInfs = 0;
float maxAbsVal = 0.0;
#pragma omp parallel for
for (int i = 0; i<k*k; i++) {
val=GOLD[i];
if (fabs(val) > maxAbsVal) {
#pragma omp critical
maxAbsVal = max(fabs(val), maxAbsVal);
}
if (val == 0) {
#pragma omp atomic
numZeros++;
if (numZeros<5) printf("Zero in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
if (isnan(val)) {
#pragma omp atomic
numNans++;
if (numNans<5) printf("NaN in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
if (isinf(val)) {
#pragma omp atomic
numInfs++;
if (numInfs<5) printf("INF in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
}
printf("Number of zeros/NaNs/INFs on gold: %d/%d/%d\n", numZeros, numNans, numInfs);
printf("Maximum absolute value on gold: %f\n", maxAbsVal);
if (k <= 16) {
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)GOLD[i]);
if ((i+1)%k == 0) printf("\n");
}
}
if (host_check) {
printf("Calculating mMul using OpenMP on Host...\n");
float *hostGold = openmpMul(A, B, k);
if (k <= 16) {
printf("Host CPU Gold:\n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)hostGold[i]);
if ((i+1)%k == 0) printf("\n");
}
}
printf("Comparing GPU result with Host result...\n");
float maxDiff = 0.0;
float maxAbsDiff = 0.0;
for (i=0; i<k; i++) {
for (j=0; j<k; j++) {
register float diff = fabs((hostGold[i*k+j]-GOLD[i*k+j])/hostGold[i*k+j]);
register float absDiff = hostGold[i*k+j]-GOLD[i*k+j];
if (diff > maxDiff) {
maxDiff = max(diff, maxDiff);
printf("New diff! (%d,%d) hostGold!=gpuGold %e != %e (diff: %e)\n", i, j, hostGold[i*k+j], GOLD[i*k+j], diff);
}
if (absDiff > maxAbsDiff) {
maxAbsDiff = max(absDiff, maxAbsDiff);
}
// if (diff > 0.1) {
// printf("Fail! (%d,%d) hostGold!=gpuGold %f != %f (diff: %e)\n", i, j, (float)hostGold[i*k+j], (float)GOLD[i*k+j], diff);
// fflush(stdout);
// exit(-1);
// }
}
}
printf("CPU and GPU match by a relative error of up to %e element difference.\nMaximum element absolute difference: %e (relatively to float representation: %e)\nWriting to file...\n",
maxDiff, maxAbsDiff, maxAbsDiff / FLT_MAX);
}
//printf("-------------------------\n%.10f\n%.10f\n%.10f\n", GOLD[0], GOLD[1], GOLD[2]);
for(i=0; i<k; i++)
{
fwrite( &(GOLD[i * k]), sizeof(float)*k, 1, f_GOLD );
}
fclose(f_GOLD);
return;
}
int main (int argc, char** argv)
{
//====================================
//================== Read parameters
if (argc<2) {
usage();
exit (-1);
}
if (checkCmdLineFlag(argc, (const char **)argv, "size"))
{
k = getCmdLineArgumentInt(argc, (const char **)argv, "size");
if ((k <= 0)||(k % 16 != 0))
{
printf("Invalid input size given on the command-line: %d\n", k);
printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE);
}
}
else
{
usage();
printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_a"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_a", &a_matrix_path);
}
else
{
a_matrix_path = new char[100];
snprintf(a_matrix_path, 100, "smxm_a_%i.matrix", (signed int)DEFAULT_INPUT_SIZE);
printf("Using default input_a path: %s\n", a_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_b"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_b", &b_matrix_path);
}
else
{
b_matrix_path = new char[100];
snprintf(b_matrix_path, 100, "smxm_b_%i.matrix", (signed int)DEFAULT_INPUT_SIZE);
printf("Using default input_a path: %s\n", b_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "gold"))
{
getCmdLineArgumentString(argc, (const char **)argv, "gold", &gold_matrix_path);
}
else
{
gold_matrix_path = new char[100];
snprintf(gold_matrix_path, 100, "smxm_gold_%i.matrix", (signed int)k);
printf("Using default gold path: %s\n", gold_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "host_check"))
{
host_check = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "generator_debug"))
{
generator_debug = true;
}
//====================================
GetDevice();
size = k * k;
printf("Each input matrix size: %.4fGB\n", (float)sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE / (1024*1024*1024));
FILE *test_file;
test_file=fopen(a_matrix_path, "rb");
if (!test_file)
{
printf("Generating input matrices...\n");
generateInputMatrices();
}
else
{ printf("Input matrices already exist...\n"); }
generateGoldMatrixHalf();
return 0;
}
| deb0d72b09bb46597b5184d8352dbac494aa9a21.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <unistd.h>
#include <string>
#include <sys/time.h>
#include <float.h>
// helper functions
#include "helper_string.h"
#include "helper_cuda.h"
#define DEFAULT_INPUT_SIZE 8192
#define BLOCK_SIZE 32
int k=0;
int size;
float *A, *B, *GOLD;
bool host_check = false;
bool generator_debug = false;
char *gold_matrix_path, *a_matrix_path, *b_matrix_path;
void usage() {
printf("Usage: generateMatricesSingle -size=N [-generator_debug] [-host_check] [-input_a=<path>] [-input_b=<path>] [-gold=<path>]\n");
}
void generateInputMatrices()
{
float *h_A, *h_B;
FILE *f_A, *f_B;
h_A = (float*)malloc(sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE);
h_B = (float*)malloc(sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE);
printf("Max value: %f Min: %f\n", (-4.06e16-4.0004e16)+4.1e16, 4.1e16);
srand(time(NULL));
if (!generator_debug) {
for (int i=0; i<DEFAULT_INPUT_SIZE; i++) {
for (int j=0; j<DEFAULT_INPUT_SIZE; j++) {
h_A[i * DEFAULT_INPUT_SIZE + j] = (rand()/((float)(RAND_MAX)+1)*(-4.06e16-4.4e16))+4.1e16;
h_B[i * DEFAULT_INPUT_SIZE + j] = (rand()/((float)(RAND_MAX)+1)*(-4.06e16-4.4e16))+4.1e16;
}
}
} else {
for (int i=0; i<DEFAULT_INPUT_SIZE; i++) {
for (int j=0; j<DEFAULT_INPUT_SIZE; j++) {
h_A[i * DEFAULT_INPUT_SIZE + j] = float(2.0);
h_B[i * DEFAULT_INPUT_SIZE + j] = float(2.0);
}
}
}
int numZeros;
int numNans;
int numInfs;
// printf("Write\n");
f_A = fopen(a_matrix_path, "wb");
f_B = fopen(b_matrix_path, "wb");
float val;
numZeros = 0;
numNans = 0;
numInfs = 0;
for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) {
val=h_A[i];
if (val == 0) numZeros++;
if (isnan(val)) numNans++;
if (isinf(val)) numInfs++;
}
printf("Number of zeros/NaNs/INFs on matrix A: %d/%d/%d\n", numZeros, numNans, numInfs);
numZeros = 0;
numNans = 0;
numInfs = 0;
for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) {
val=h_B[i];
if (val == 0) numZeros++;
if (isnan(val)) numNans++;
if (isinf(val)) numInfs++;
}
printf("Number of zeros/NaNs/INFs on matrix B: %d/%d/%d\n", numZeros, numNans, numInfs);
for(int i=0; i<DEFAULT_INPUT_SIZE; i++)
{
fwrite(&(h_A[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_A);
}
printf("Element 32 of matrix A: %f\n", (float)h_A[32]);
printf("Element 50 of matrix B: %f\n", (float)h_B[50]);
for(int i=0; i<DEFAULT_INPUT_SIZE; i++)
{
fwrite(&(h_B[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_B);
}
printf("Done\n");
fclose(f_A);
fclose(f_B);
free(h_A);
free(h_B);
return;
}
void ReadMatrixFromFile(){
int i;
FILE *f_A, *f_B;
f_A = fopen(a_matrix_path,"rb");
f_B = fopen(b_matrix_path,"rb");
if (!(f_A&&f_B))
{
printf("Error opening matrices A, B.\n");
printf("exit on line: %d", __LINE__); exit(-1);
}
size_t ret_value[2];
for(i=0; i<k; i++)
{
ret_value[0] = fread (&A[ k * i ], sizeof(float)*k, 1, f_A);
ret_value[1] = fread (&B[ k * i ], sizeof(float)*k, 1, f_B);
if (ret_value[0] != 1 || ret_value[1] != 1) {
printf("Bad input/gold formatting: %lu ; %lu .\n", ret_value[0], ret_value[1]);
}
}
printf("Done reading matrices\n");
fclose(f_A);
fclose(f_B);
}
void GetDevice(){
cudaDeviceProp prop;
cudaError_t teste;
int count=0;
teste = cudaGetDeviceCount(&count);
printf("Get Device Test: %s\n", cudaGetErrorString(teste));
for (int i=0; i< count; i++) {
cudaGetDeviceProperties( &prop, i );
printf( "Name: %s\n", prop.name );
}
int *ndevice; int dev = 0;
ndevice = &dev;
cudaGetDevice(ndevice);
cudaSetDevice(0);
cudaGetDeviceProperties( &prop, 0 );
printf("\ndevice: %d %s\n", *ndevice, prop.name);
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
float* openmpMul(float* a, float* b, size_t size) {
double time = mysecond();
float* bT = (float*) malloc(sizeof(float)*size*size);
float* c = (float*) calloc(size*size, sizeof(float));
if (c == NULL || bT == NULL) {
printf("could not alloc hostGold matrix.");
return NULL;
}
#pragma omp parallel for
for (int i=0;i<size;i++)
for (int j=0;j<size;j++)
bT[j*size+i] = b[i*size+j];
#pragma omp parallel for
for (int i=0;i<size;i++) {
for (int j=0;j<size;j++) {
for (int k=0;k<size;k++) {
c[j*size+i] += a[j*size+k] * bT[i*size+k];
}
}
}
printf("host mmul time: %.2f seconds\n", mysecond()-time);
return c;
}
__global__ void MatrixMulKernel (float *d_A, float *d_B, float *d_C, int n)
{
int tx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int ty = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int k;
for (k = 0; k < n; k++)
d_C[ty*n + tx] += d_A[ty*n + k]*d_B[k*n + tx];
}
void generateGoldMatrixHalf()
{
//================== Set block and grid size for MxM kernel
int gridsize = k/BLOCK_SIZE < 1 ? 1 : k/BLOCK_SIZE;
int blocksize = k/BLOCK_SIZE < 1 ? k : BLOCK_SIZE;
dim3 dimBlock(blocksize,blocksize);
dim3 dimGrid(gridsize,gridsize);
//====================================
////////////////////////////////////////////////////
//////////DEVICE VARS///////////////////////////////
float *d_A;
float *d_B;
float *d_C;
////////////////////////////////////////////////////
A = ( float* ) malloc( size * sizeof( float ) );
B = ( float* ) malloc( size * sizeof( float ) );
GOLD = ( float* ) malloc( size * sizeof( float ) );
ReadMatrixFromFile();
if (k <= 16) {
printf("\nMatrix A: \n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)A[i]);
if ((i+1)%k == 0) printf("\n");
}
printf("\nMatrix B: \n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)B[i]);
if ((i+1)%k == 0) printf("\n");
}
}
checkCudaErrors( cudaMalloc( ( void** ) &d_A, size * sizeof( float ) ));
checkCudaErrors( cudaMalloc( ( void** ) &d_B, size * sizeof( float ) ));
checkCudaErrors( cudaMalloc( ( void** ) &d_C, size * sizeof( float ) ));
checkCudaErrors( cudaMemset( d_C, 0, size * sizeof( float )) ); // ZERA C
checkCudaErrors( cudaMemcpy( d_A, A, size * sizeof( float ), cudaMemcpyHostToDevice ) ); // PUSH A
checkCudaErrors( cudaMemcpy( d_B, B, size * sizeof( float ), cudaMemcpyHostToDevice ) ); // PUSH B
printf("cudaMxM... k=%d\n", k);
double time = mysecond();
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, k);
checkCudaErrors( cudaPeekAtLastError() );
checkCudaErrors( cudaDeviceSynchronize() );
time=mysecond()-time;
/////////// PERF
double flops = 2.0*(double)k*k*k;
double gflops = flops / time;
double outputpersec = (double)k*k/time;
printf("kernel time: %lf\n",time);
printf("SIZE:%d OUTPUT/S:%f FLOPS:%f (GFLOPS:%.2f)\n",k, outputpersec, gflops, gflops/1000000000);
///////////
checkCudaErrors( cudaMemcpy(GOLD, d_C, size * sizeof( float ), cudaMemcpyDeviceToHost) );
cudaFree( d_A );
cudaFree( d_B );
cudaFree( d_C );
printf("Analysing output on host...\n");
int i, j;
FILE *f_GOLD;
f_GOLD = fopen(gold_matrix_path, "wb");
float val;
int numZeros = 0;
int numNans = 0;
int numInfs = 0;
float maxAbsVal = 0.0;
#pragma omp parallel for
for (int i = 0; i<k*k; i++) {
val=GOLD[i];
if (fabs(val) > maxAbsVal) {
#pragma omp critical
maxAbsVal = max(fabs(val), maxAbsVal);
}
if (val == 0) {
#pragma omp atomic
numZeros++;
if (numZeros<5) printf("Zero in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
if (isnan(val)) {
#pragma omp atomic
numNans++;
if (numNans<5) printf("NaN in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
if (isinf(val)) {
#pragma omp atomic
numInfs++;
if (numInfs<5) printf("INF in position (%d,%d)\n", (int)floor(i / k), (int)(i - floor(i / k) * k));
}
}
printf("Number of zeros/NaNs/INFs on gold: %d/%d/%d\n", numZeros, numNans, numInfs);
printf("Maximum absolute value on gold: %f\n", maxAbsVal);
if (k <= 16) {
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)GOLD[i]);
if ((i+1)%k == 0) printf("\n");
}
}
if (host_check) {
printf("Calculating mMul using OpenMP on Host...\n");
float *hostGold = openmpMul(A, B, k);
if (k <= 16) {
printf("Host CPU Gold:\n");
for (int i = 0; i<k*k; i++) {
printf(" %.2e", (float)hostGold[i]);
if ((i+1)%k == 0) printf("\n");
}
}
printf("Comparing GPU result with Host result...\n");
float maxDiff = 0.0;
float maxAbsDiff = 0.0;
for (i=0; i<k; i++) {
for (j=0; j<k; j++) {
register float diff = fabs((hostGold[i*k+j]-GOLD[i*k+j])/hostGold[i*k+j]);
register float absDiff = hostGold[i*k+j]-GOLD[i*k+j];
if (diff > maxDiff) {
maxDiff = max(diff, maxDiff);
printf("New diff! (%d,%d) hostGold!=gpuGold %e != %e (diff: %e)\n", i, j, hostGold[i*k+j], GOLD[i*k+j], diff);
}
if (absDiff > maxAbsDiff) {
maxAbsDiff = max(absDiff, maxAbsDiff);
}
// if (diff > 0.1) {
// printf("Fail! (%d,%d) hostGold!=gpuGold %f != %f (diff: %e)\n", i, j, (float)hostGold[i*k+j], (float)GOLD[i*k+j], diff);
// fflush(stdout);
// exit(-1);
// }
}
}
printf("CPU and GPU match by a relative error of up to %e element difference.\nMaximum element absolute difference: %e (relatively to float representation: %e)\nWriting to file...\n",
maxDiff, maxAbsDiff, maxAbsDiff / FLT_MAX);
}
//printf("-------------------------\n%.10f\n%.10f\n%.10f\n", GOLD[0], GOLD[1], GOLD[2]);
for(i=0; i<k; i++)
{
fwrite( &(GOLD[i * k]), sizeof(float)*k, 1, f_GOLD );
}
fclose(f_GOLD);
return;
}
int main (int argc, char** argv)
{
//====================================
//================== Read parameters
if (argc<2) {
usage();
exit (-1);
}
if (checkCmdLineFlag(argc, (const char **)argv, "size"))
{
k = getCmdLineArgumentInt(argc, (const char **)argv, "size");
if ((k <= 0)||(k % 16 != 0))
{
printf("Invalid input size given on the command-line: %d\n", k);
printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE);
}
}
else
{
usage();
printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_a"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_a", &a_matrix_path);
}
else
{
a_matrix_path = new char[100];
snprintf(a_matrix_path, 100, "smxm_a_%i.matrix", (signed int)DEFAULT_INPUT_SIZE);
printf("Using default input_a path: %s\n", a_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_b"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_b", &b_matrix_path);
}
else
{
b_matrix_path = new char[100];
snprintf(b_matrix_path, 100, "smxm_b_%i.matrix", (signed int)DEFAULT_INPUT_SIZE);
printf("Using default input_a path: %s\n", b_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "gold"))
{
getCmdLineArgumentString(argc, (const char **)argv, "gold", &gold_matrix_path);
}
else
{
gold_matrix_path = new char[100];
snprintf(gold_matrix_path, 100, "smxm_gold_%i.matrix", (signed int)k);
printf("Using default gold path: %s\n", gold_matrix_path);
}
if (checkCmdLineFlag(argc, (const char **)argv, "host_check"))
{
host_check = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "generator_debug"))
{
generator_debug = true;
}
//====================================
GetDevice();
size = k * k;
printf("Each input matrix size: %.4fGB\n", (float)sizeof(float) * DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE / (1024*1024*1024));
FILE *test_file;
test_file=fopen(a_matrix_path, "rb");
if (!test_file)
{
printf("Generating input matrices...\n");
generateInputMatrices();
}
else
{ printf("Input matrices already exist...\n"); }
generateGoldMatrixHalf();
return 0;
}
|
8627fa0dcd85e498fb5bf4053df4410c660a9e6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "optimization.cuh"
#define BIAS 1.0f
using namespace CUDA;
// l = layer
// n = neuron number in layers
// w = weight number and result in neurons
extern "C"
{
//=====================================================================
// /!\ WARINING /!\
//=====================================================================
// can still be optimized by decomposing the sum
// can still be optimized by using cuDNN for compute tanh(x)
// can still be optimized by suppressig all for loops
// Can we remove "int n = threadIdx.x;"?
// The best is no loop and no new variable
// Know if const take time or not
// use reference to primitive
// check that debugError produce 0 code in release mode
// do not reset error of output
// option compiler for MSVC
// compute brackpropagation in same time of calcul error ????
// view if use shared memory is more quick that pass pointer or value.
// see pragma unroll
//=====================================================================
// The compute of output
//=====================================================================
__global__ void calculOutputPerceptronForInputLayer(float* weightsInInputLayer,
float* outputsInInputLayer,
const int numberOfInput,
float* inputs)
{
int n = threadIdx.x;
setOutputInInputLayer(outputsInInputLayer, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfInput; w++)
{
addOutputInInputLayer(outputsInInputLayer, n, getWeightInInputLayer(weightsInInputLayer, numberOfInput, n, w) * inputs[w]);
}
setOutputInInputLayer(outputsInInputLayer, n, tanh(getOutputInInputLayer(outputsInInputLayer, n) + BIAS));
}
__global__ void calculOutputPerceptronForFistHiddenLayer(float* weightsInHiddenLayers,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, w) *
getOutputInInputLayer(outputsInInputLayer, w));
}
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, tanh(getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n) + BIAS));
}
__global__ void calculOutputPerceptronForOtherHiddenLayers(float* weightsInHiddenLayers,
float* outputsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
int n = threadIdx.x;
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, w)
* getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l-1, w));
}
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, tanh(getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n) + BIAS));
}
__global__ void calculOutputPerceptronForOutputLayer(float* weightsInOutputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
setOutputInOutputLayer(outputsInOutputLayer, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInOutputLayer(outputsInOutputLayer, n, getWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, n, w) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
//printf("w : %f\n", getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
}
//printf("output %i : %f\n", n, getOutputInOutputLayer(outputsInOutputLayer, n));
setOutputInOutputLayer(outputsInOutputLayer, n, (tanh(getOutputInOutputLayer(outputsInOutputLayer, n) + BIAS))/2.0f + 0.5f);
}
__forceinline__ __host__ void calculOutputWithoutConvertInputs(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputs)
{
hipLaunchKernelGGL(( calculOutputPerceptronForInputLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInInputLayer,
outputsInInputLayer,
numberOfInput,
inputs);
debugError();
hipLaunchKernelGGL(( calculOutputPerceptronForFistHiddenLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
outputsInInputLayer,
outputsInHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
#pragma unroll
for(int l = 2; l < numberOfHiddenLayers; l++)
{
hipLaunchKernelGGL(( calculOutputPerceptronForOtherHiddenLayers), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
outputsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l);
debugError();
}
hipLaunchKernelGGL(( calculOutputPerceptronForOutputLayer), dim3(1), dim3(numberOfOutput), 0, 0, weightsInOutputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
}
__host__ void calculOutput(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputsCPU)
{
float* inputs_device;
hipMalloc((void**)&inputs_device, sizeof(float) * numberOfInput);
hipMemcpy(inputs_device, inputsCPU, sizeof(float) * numberOfInput, hipMemcpyHostToDevice);
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
inputs_device);
}
//=====================================================================
// The compute of backpropagation
//=====================================================================
__global__ void resetErrorPerceptronForInputLayer(float* errorsInInputLayer)
{
setErrorInInputLayer(errorsInInputLayer, threadIdx.x, 0.0f);
}
__global__ void resetErrorPerceptronForHiddenLayers(float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
setErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, threadIdx.x, 0.0f);
}
__global__ void resetErrorPerceptronForOutputLayer(float* errorsInOutputLayer)
{
setErrorInOutputLayer(errorsInOutputLayer, threadIdx.x, 0.0f);
}
__forceinline__ __host__ void resetError(float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfOutput)
{
hipLaunchKernelGGL(( resetErrorPerceptronForOutputLayer), dim3(1), dim3(numberOfOutput), 0, 0, errorsInOutputLayer);
debugError();
#pragma unroll
for(int l = 0; l < numberOfHiddenLayers; l++)
{
hipLaunchKernelGGL(( resetErrorPerceptronForHiddenLayers), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l);
debugError();
}
hipLaunchKernelGGL(( resetErrorPerceptronForInputLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, errorsInInputLayer);
debugError();
}
__global__ void calculErrorPerceptronForOutputLayer(float* outputsInOutputLayer,
float* errorsInOutputLayer,
float* desires)
{
int n = threadIdx.x;
setErrorInOutputLayer(errorsInOutputLayer, n, (desires[n] - getOutputInOutputLayer(outputsInOutputLayer, n)) * abs(desires[n] - getOutputInOutputLayer(outputsInOutputLayer, n)));
//printf("error GPU (%i,%i) : %f\n", 10, n, getErrorInOutputLayer(errorsInOutputLayer, n));
}
__global__ void calculErrorPerceptronForLastHiddenLayers(float* weightsInOutputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfOutput,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfOutput; w++)
{
addErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n,
getErrorInOutputLayer(errorsInOutputLayer, w) *
getWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, w, n));
}
divideErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n, numberOfOutput);
//printf("error GPU (%i,%i) : %f\n", numberOfHiddenLayers-1, n, getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n));
}
__global__ void calculErrorPerceptronForOtherHiddenLayers(float* weightsInHiddenLayers,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n,
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w) *
getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w, n));
//if(n == 0 && w == 3)
// printf("error GPU (%i,%i) : %f\n", l+1, w, getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w));
}
divideErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, numberOfNeuronInHiddenLayers);
//printf("error GPU : %f\n", getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n));
}
__global__ void calculErrorPerceptronForInputLayer(float* weightsInHiddenLayers,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addErrorInInputLayer(errorsInInputLayer, n,
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, w) *
getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, w, n));
}
divideErrorInInputLayer(errorsInInputLayer, n, numberOfNeuronInHiddenLayers);
//printf("error GPU il : %f\n", getErrorInInputLayer(errorsInInputLayer, n));
}
__global__ void trainPerceptronForOutputLayer(float* weightsInOutputLayer,
float* outputsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0;w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, n, w,
learningRate *
getErrorInOutputLayer(errorsInOutputLayer, n) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
}
}
__global__ void trainPerceptronForOtherHiddenLayer(float* weightsInHiddenLayers,
float* outputsInHiddenLayers,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, w,
learningRate *
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l-1, w));
}
}
__global__ void trainPerceptronForFirtHiddenLayer(float* weightsInHiddenLayers,
float* outputsInInputLayer,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, w,
learningRate *
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n) *
getOutputInInputLayer(outputsInInputLayer, w));
}
}
__global__ void trainPerceptronForInputLayer(float* weightsInInputLayer,
float* errorsInInputLayer,
const int numberOfInput,
float learningRate,
float* inputs)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfInput; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInInputLayer(weightsInInputLayer, numberOfInput, n, w,
learningRate *
getErrorInInputLayer(errorsInInputLayer, n) *
inputs[w]);
}
}
void backpropagation(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float learningRate,
float* inputsCPU,
float* desiresCPU)
{
float* inputsDevice;
hipMalloc((void**)&inputsDevice, sizeof(float) * numberOfInput);
hipMemcpy(inputsDevice, inputsCPU, sizeof(float) * numberOfInput, hipMemcpyHostToDevice);
float* desiresDevice;
hipMalloc((void**)&desiresDevice, sizeof(float) * numberOfInput);
hipMemcpy(desiresDevice, desiresCPU, sizeof(float) * numberOfInput, hipMemcpyHostToDevice);
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
inputsDevice);
resetError(errorsInInputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfOutput);
hipLaunchKernelGGL(( calculErrorPerceptronForOutputLayer), dim3(1), dim3(numberOfOutput), 0, 0, outputsInOutputLayer,
errorsInOutputLayer,
desiresDevice);
debugError();
hipLaunchKernelGGL(( calculErrorPerceptronForLastHiddenLayers), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInOutputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfOutput,
numberOfNeuronInHiddenLayers);
debugError();
#pragma unroll
for(int l = numberOfHiddenLayers-2; l >= 1; l--) // show - 1
{
hipLaunchKernelGGL(( calculErrorPerceptronForOtherHiddenLayers), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l);
debugError();
}
hipLaunchKernelGGL(( calculErrorPerceptronForInputLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
errorsInInputLayer,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
hipLaunchKernelGGL(( trainPerceptronForOutputLayer), dim3(1), dim3(numberOfOutput), 0, 0, weightsInOutputLayer,
outputsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
learningRate);
debugError();
#pragma unroll
for(int l = numberOfHiddenLayers-1; l >= 2; l--)
{
hipLaunchKernelGGL(( trainPerceptronForOtherHiddenLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
outputsInHiddenLayers,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l,
learningRate);
debugError();
}
hipLaunchKernelGGL(( trainPerceptronForFirtHiddenLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInHiddenLayers,
outputsInInputLayer,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
learningRate);
debugError();
hipLaunchKernelGGL(( trainPerceptronForInputLayer), dim3(1), dim3(numberOfNeuronInHiddenLayers), 0, 0, weightsInInputLayer,
errorsInInputLayer,
numberOfInput,
learningRate,
inputsDevice);
debugError();
}
//=====================================================================
// The compute for all data
//=====================================================================
__forceinline__ float calculateClusteringRate(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputs,
float* desires,
const int sizeOfTestingSet)
{
int numberOfResultsClassifiedWell;
int numberOfResultsMisclassefied;
for(int i = 0; i < sizeOfTestingSet; i++)
{
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
&inputs[numberOfInput*i]);
for(int j = 0; j < numberOfOutput; j++)// EXECUTE INSIDE GLOBAL DESIRES IN IN GPU
{
if(desires[j] == 0 && outputsInOutputLayer[j] >= 0.5f
|| desires[j] == 1 && outputsInOutputLayer[j] < 0.5f)
{
numberOfResultsMisclassefied ++;
break;
}
else if(j == numberOfOutput-1)
{
numberOfResultsClassifiedWell ++;
break;
}
}
}
return -2.0;//(float)numberOfResultsClassifiedWell/(numberOfResultsClassifiedWell+numberOfResultsMisclassefied);
}
__forceinline__ void shuffleLearningSet(float* array,
const int sizeOfLearningSet,
int random,
float temp)
{
for(int i = 0; i < sizeOfLearningSet; i++)
{
random = rand()%sizeOfLearningSet;
temp = array[random];
array[random] = array[i];
array[i] = temp;
}
}
__forceinline__ void saveOnCPU()
{
}
void TrainWithAllDatabase(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float learningRate,
float* inputsLearningCPU,
float* inputsTestingCPU,
float* desiresLearningCPU,
float* desiresTestingCPU,
const int sizeOfLearningSet,
const int sizeOfTestingSet,
float clusteringRate,
const int numberOfTrain,
const int frequencyOfClusteringRateVerification)
{
//float newClusteringRate = 0;
float* inputsLearningDevice;
float* desiresLearningDevice;
float* inputsTestingDevice;
float* desiresTestingDevice;
srand(time(NULL));
hipMalloc((void**)&inputsLearningDevice, sizeof(float) * numberOfInput * sizeOfLearningSet);
hipMemcpy(inputsLearningDevice, inputsLearningCPU, sizeof(float) * numberOfInput * sizeOfLearningSet, hipMemcpyHostToDevice);
hipMalloc((void**)&desiresLearningDevice, sizeof(float) * numberOfInput* sizeOfLearningSet);
hipMemcpy(desiresLearningDevice, desiresLearningCPU, sizeof(float) * numberOfInput * sizeOfLearningSet, hipMemcpyHostToDevice);
hipMalloc((void**)&inputsTestingDevice, sizeof(float) * numberOfInput * sizeOfTestingSet);
hipMemcpy(inputsTestingDevice, inputsTestingCPU, sizeof(float) * numberOfInput * sizeOfTestingSet, hipMemcpyHostToDevice);
hipMalloc((void**)&desiresTestingDevice, sizeof(float) * numberOfInput * sizeOfTestingSet);
hipMemcpy(desiresTestingDevice, desiresTestingCPU, sizeof(float) * numberOfInput * sizeOfTestingSet, hipMemcpyHostToDevice);
int i, j, index;
int randomForShuffle;
float tempForShuffle;
float* arrayForShuffle = (float*)malloc(sizeof(float) * sizeOfLearningSet);
for(int i = 0; i < sizeOfLearningSet; i++)
{
arrayForShuffle[i] = i;
}
for(i = 0; i < numberOfTrain;)
{
for(j = 0; j < frequencyOfClusteringRateVerification; i++, j++)
{
if(i%sizeOfLearningSet == 0)
{
shuffleLearningSet(arrayForShuffle,
sizeOfLearningSet,
randomForShuffle,
tempForShuffle);
}
index = arrayForShuffle[i%sizeOfLearningSet];
backpropagation(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
errorsInInputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
learningRate,
&inputsLearningDevice[index*numberOfInput],
&desiresLearningDevice[index*numberOfOutput]);
}
calculateClusteringRate(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
&inputsTestingDevice[0],
&desiresTestingDevice[0],
sizeOfTestingSet);
/*if(newClusteringRate > clusteringRate)
{
clusteringRate = newClusteringRate;
saveOnCPU();
printf("Clustering rate = %f", clusteringRate);
}*/
}
}
//=====================================================================
// Tool functions
//=====================================================================
__host__ void returnNetworkOnCPU(float* weightsInInputLayerCPU,
float* weightsInHiddenLayersCPU,
float* weightsInOutputLayerCPU,
float* weightsInInputLayerGPU,
float* weightsInHiddenLayersGPU,
float* weightsInOutputLayerGPU,
int numberOfHiddenLayers,
int numberOfNeuronsInHiddenLayers,
int numberOfInput,
int numberOfOutput)
{
hipMemcpy(weightsInInputLayerCPU, weightsInInputLayerGPU, sizeof(float) * numberOfHiddenLayers * numberOfInput ,hipMemcpyDeviceToHost);
hipMemcpy(weightsInHiddenLayersCPU, weightsInHiddenLayersGPU, sizeof(float) * numberOfNeuronsInHiddenLayers * numberOfNeuronsInHiddenLayers * (numberOfHiddenLayers-1), hipMemcpyDeviceToHost);
hipMemcpy(weightsInOutputLayerCPU, weightsInOutputLayerGPU, sizeof(float) * numberOfOutput * numberOfNeuronsInHiddenLayers, hipMemcpyDeviceToHost);
}
__host__ void returnOutputOnCPU(float* outputsInOutputLayerCPU,
float* outputsInOutputLayerGPU,
int numberOfOutput)
{
hipMemcpy(outputsInOutputLayerCPU, outputsInOutputLayerGPU, sizeof(float) * numberOfOutput, hipMemcpyDeviceToHost);
debugError();
}
}
| 8627fa0dcd85e498fb5bf4053df4410c660a9e6c.cu | #include "optimization.cuh"
#define BIAS 1.0f
using namespace CUDA;
// l = layer
// n = neuron number in layers
// w = weight number and result in neurons
extern "C"
{
//=====================================================================
// /!\ WARINING /!\
//=====================================================================
// can still be optimized by decomposing the sum
// can still be optimized by using cuDNN for compute tanh(x)
// can still be optimized by suppressig all for loops
// Can we remove "int n = threadIdx.x;"?
// The best is no loop and no new variable
// Know if const take time or not
// use reference to primitive
// check that debugError produce 0 code in release mode
// do not reset error of output
// option compiler for MSVC
// compute brackpropagation in same time of calcul error ????
// view if use shared memory is more quick that pass pointer or value.
// see pragma unroll
//=====================================================================
// The compute of output
//=====================================================================
__global__ void calculOutputPerceptronForInputLayer(float* weightsInInputLayer,
float* outputsInInputLayer,
const int numberOfInput,
float* inputs)
{
int n = threadIdx.x;
setOutputInInputLayer(outputsInInputLayer, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfInput; w++)
{
addOutputInInputLayer(outputsInInputLayer, n, getWeightInInputLayer(weightsInInputLayer, numberOfInput, n, w) * inputs[w]);
}
setOutputInInputLayer(outputsInInputLayer, n, tanh(getOutputInInputLayer(outputsInInputLayer, n) + BIAS));
}
__global__ void calculOutputPerceptronForFistHiddenLayer(float* weightsInHiddenLayers,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, w) *
getOutputInInputLayer(outputsInInputLayer, w));
}
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, tanh(getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n) + BIAS));
}
__global__ void calculOutputPerceptronForOtherHiddenLayers(float* weightsInHiddenLayers,
float* outputsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
int n = threadIdx.x;
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, w)
* getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l-1, w));
}
setOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, tanh(getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n) + BIAS));
}
__global__ void calculOutputPerceptronForOutputLayer(float* weightsInOutputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
setOutputInOutputLayer(outputsInOutputLayer, n, 0.0f);
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addOutputInOutputLayer(outputsInOutputLayer, n, getWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, n, w) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
//printf("w : %f\n", getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
}
//printf("output %i : %f\n", n, getOutputInOutputLayer(outputsInOutputLayer, n));
setOutputInOutputLayer(outputsInOutputLayer, n, (tanh(getOutputInOutputLayer(outputsInOutputLayer, n) + BIAS))/2.0f + 0.5f);
}
__forceinline__ __host__ void calculOutputWithoutConvertInputs(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputs)
{
calculOutputPerceptronForInputLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInInputLayer,
outputsInInputLayer,
numberOfInput,
inputs);
debugError();
calculOutputPerceptronForFistHiddenLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
outputsInInputLayer,
outputsInHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
#pragma unroll
for(int l = 2; l < numberOfHiddenLayers; l++)
{
calculOutputPerceptronForOtherHiddenLayers<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
outputsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l);
debugError();
}
calculOutputPerceptronForOutputLayer<<<1, numberOfOutput>>>(weightsInOutputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
}
__host__ void calculOutput(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputsCPU)
{
float* inputs_device;
cudaMalloc((void**)&inputs_device, sizeof(float) * numberOfInput);
cudaMemcpy(inputs_device, inputsCPU, sizeof(float) * numberOfInput, cudaMemcpyHostToDevice);
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
inputs_device);
}
//=====================================================================
// The compute of backpropagation
//=====================================================================
__global__ void resetErrorPerceptronForInputLayer(float* errorsInInputLayer)
{
setErrorInInputLayer(errorsInInputLayer, threadIdx.x, 0.0f);
}
__global__ void resetErrorPerceptronForHiddenLayers(float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
setErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, threadIdx.x, 0.0f);
}
__global__ void resetErrorPerceptronForOutputLayer(float* errorsInOutputLayer)
{
setErrorInOutputLayer(errorsInOutputLayer, threadIdx.x, 0.0f);
}
__forceinline__ __host__ void resetError(float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfOutput)
{
resetErrorPerceptronForOutputLayer<<<1, numberOfOutput>>>(errorsInOutputLayer);
debugError();
#pragma unroll
for(int l = 0; l < numberOfHiddenLayers; l++)
{
resetErrorPerceptronForHiddenLayers<<<1, numberOfNeuronInHiddenLayers>>>(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l);
debugError();
}
resetErrorPerceptronForInputLayer<<<1, numberOfNeuronInHiddenLayers>>>(errorsInInputLayer);
debugError();
}
__global__ void calculErrorPerceptronForOutputLayer(float* outputsInOutputLayer,
float* errorsInOutputLayer,
float* desires)
{
int n = threadIdx.x;
setErrorInOutputLayer(errorsInOutputLayer, n, (desires[n] - getOutputInOutputLayer(outputsInOutputLayer, n)) * abs(desires[n] - getOutputInOutputLayer(outputsInOutputLayer, n)));
//printf("error GPU (%i,%i) : %f\n", 10, n, getErrorInOutputLayer(errorsInOutputLayer, n));
}
__global__ void calculErrorPerceptronForLastHiddenLayers(float* weightsInOutputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfOutput,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfOutput; w++)
{
addErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n,
getErrorInOutputLayer(errorsInOutputLayer, w) *
getWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, w, n));
}
divideErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n, numberOfOutput);
//printf("error GPU (%i,%i) : %f\n", numberOfHiddenLayers-1, n, getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, n));
}
__global__ void calculErrorPerceptronForOtherHiddenLayers(float* weightsInHiddenLayers,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n,
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w) *
getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w, n));
//if(n == 0 && w == 3)
// printf("error GPU (%i,%i) : %f\n", l+1, w, getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l+1, w));
}
divideErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, numberOfNeuronInHiddenLayers);
//printf("error GPU : %f\n", getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n));
}
__global__ void calculErrorPerceptronForInputLayer(float* weightsInHiddenLayers,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++)
{
addErrorInInputLayer(errorsInInputLayer, n,
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, w) *
getWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, w, n));
}
divideErrorInInputLayer(errorsInInputLayer, n, numberOfNeuronInHiddenLayers);
//printf("error GPU il : %f\n", getErrorInInputLayer(errorsInInputLayer, n));
}
__global__ void trainPerceptronForOutputLayer(float* weightsInOutputLayer,
float* outputsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0;w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInOutputLayer(weightsInOutputLayer, numberOfNeuronInHiddenLayers, n, w,
learningRate *
getErrorInOutputLayer(errorsInOutputLayer, n) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, numberOfHiddenLayers-1, w));
}
}
__global__ void trainPerceptronForOtherHiddenLayer(float* weightsInHiddenLayers,
float* outputsInHiddenLayers,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int l,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n, w,
learningRate *
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, l, n) *
getOutputInHiddenLayers(outputsInHiddenLayers, numberOfNeuronInHiddenLayers, l-1, w));
}
}
__global__ void trainPerceptronForFirtHiddenLayer(float* weightsInHiddenLayers,
float* outputsInInputLayer,
float* errorsInHiddenLayers,
const int numberOfNeuronInHiddenLayers,
float learningRate)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfNeuronInHiddenLayers; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInHiddenLayers(weightsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n, w,
learningRate *
getErrorInHiddenLayers(errorsInHiddenLayers, numberOfNeuronInHiddenLayers, 1, n) *
getOutputInInputLayer(outputsInInputLayer, w));
}
}
__global__ void trainPerceptronForInputLayer(float* weightsInInputLayer,
float* errorsInInputLayer,
const int numberOfInput,
float learningRate,
float* inputs)
{
int n = threadIdx.x;
#pragma unroll
for(int w = 0; w < numberOfInput; w++) // weights[i] += learningRate * error * inputs[i];
{
addWeightInInputLayer(weightsInInputLayer, numberOfInput, n, w,
learningRate *
getErrorInInputLayer(errorsInInputLayer, n) *
inputs[w]);
}
}
void backpropagation(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float learningRate,
float* inputsCPU,
float* desiresCPU)
{
float* inputsDevice;
cudaMalloc((void**)&inputsDevice, sizeof(float) * numberOfInput);
cudaMemcpy(inputsDevice, inputsCPU, sizeof(float) * numberOfInput, cudaMemcpyHostToDevice);
float* desiresDevice;
cudaMalloc((void**)&desiresDevice, sizeof(float) * numberOfInput);
cudaMemcpy(desiresDevice, desiresCPU, sizeof(float) * numberOfInput, cudaMemcpyHostToDevice);
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
inputsDevice);
resetError(errorsInInputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfOutput);
calculErrorPerceptronForOutputLayer<<<1, numberOfOutput>>>(outputsInOutputLayer,
errorsInOutputLayer,
desiresDevice);
debugError();
calculErrorPerceptronForLastHiddenLayers<<<1, numberOfNeuronInHiddenLayers>>>(weightsInOutputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfOutput,
numberOfNeuronInHiddenLayers);
debugError();
#pragma unroll
for(int l = numberOfHiddenLayers-2; l >= 1; l--) // show - 1
{
calculErrorPerceptronForOtherHiddenLayers<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l);
debugError();
}
calculErrorPerceptronForInputLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
errorsInInputLayer,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers);
debugError();
trainPerceptronForOutputLayer<<<1, numberOfOutput>>>(weightsInOutputLayer,
outputsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
learningRate);
debugError();
#pragma unroll
for(int l = numberOfHiddenLayers-1; l >= 2; l--)
{
trainPerceptronForOtherHiddenLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
outputsInHiddenLayers,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
l,
learningRate);
debugError();
}
trainPerceptronForFirtHiddenLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInHiddenLayers,
outputsInInputLayer,
errorsInHiddenLayers,
numberOfNeuronInHiddenLayers,
learningRate);
debugError();
trainPerceptronForInputLayer<<<1, numberOfNeuronInHiddenLayers>>>(weightsInInputLayer,
errorsInInputLayer,
numberOfInput,
learningRate,
inputsDevice);
debugError();
}
//=====================================================================
// The compute for all data
//=====================================================================
__forceinline__ float calculateClusteringRate(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float* inputs,
float* desires,
const int sizeOfTestingSet)
{
int numberOfResultsClassifiedWell;
int numberOfResultsMisclassefied;
for(int i = 0; i < sizeOfTestingSet; i++)
{
calculOutputWithoutConvertInputs(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
&inputs[numberOfInput*i]);
for(int j = 0; j < numberOfOutput; j++)// EXECUTE INSIDE GLOBAL DESIRES IN IN GPU
{
if(desires[j] == 0 && outputsInOutputLayer[j] >= 0.5f
|| desires[j] == 1 && outputsInOutputLayer[j] < 0.5f)
{
numberOfResultsMisclassefied ++;
break;
}
else if(j == numberOfOutput-1)
{
numberOfResultsClassifiedWell ++;
break;
}
}
}
return -2.0;//(float)numberOfResultsClassifiedWell/(numberOfResultsClassifiedWell+numberOfResultsMisclassefied);
}
__forceinline__ void shuffleLearningSet(float* array,
const int sizeOfLearningSet,
int random,
float temp)
{
for(int i = 0; i < sizeOfLearningSet; i++)
{
random = rand()%sizeOfLearningSet;
temp = array[random];
array[random] = array[i];
array[i] = temp;
}
}
__forceinline__ void saveOnCPU()
{
}
void TrainWithAllDatabase(float* weightsInInputLayer,
float* weightsInHiddenLayers,
float* weightsInOutputLayer,
float* outputsInInputLayer,
float* outputsInHiddenLayers,
float* outputsInOutputLayer,
float* errorsInInputLayer,
float* errorsInHiddenLayers,
float* errorsInOutputLayer,
const int numberOfHiddenLayers,
const int numberOfNeuronInHiddenLayers,
const int numberOfInput,
const int numberOfOutput,
float learningRate,
float* inputsLearningCPU,
float* inputsTestingCPU,
float* desiresLearningCPU,
float* desiresTestingCPU,
const int sizeOfLearningSet,
const int sizeOfTestingSet,
float clusteringRate,
const int numberOfTrain,
const int frequencyOfClusteringRateVerification)
{
//float newClusteringRate = 0;
float* inputsLearningDevice;
float* desiresLearningDevice;
float* inputsTestingDevice;
float* desiresTestingDevice;
srand(time(NULL));
cudaMalloc((void**)&inputsLearningDevice, sizeof(float) * numberOfInput * sizeOfLearningSet);
cudaMemcpy(inputsLearningDevice, inputsLearningCPU, sizeof(float) * numberOfInput * sizeOfLearningSet, cudaMemcpyHostToDevice);
cudaMalloc((void**)&desiresLearningDevice, sizeof(float) * numberOfInput* sizeOfLearningSet);
cudaMemcpy(desiresLearningDevice, desiresLearningCPU, sizeof(float) * numberOfInput * sizeOfLearningSet, cudaMemcpyHostToDevice);
cudaMalloc((void**)&inputsTestingDevice, sizeof(float) * numberOfInput * sizeOfTestingSet);
cudaMemcpy(inputsTestingDevice, inputsTestingCPU, sizeof(float) * numberOfInput * sizeOfTestingSet, cudaMemcpyHostToDevice);
cudaMalloc((void**)&desiresTestingDevice, sizeof(float) * numberOfInput * sizeOfTestingSet);
cudaMemcpy(desiresTestingDevice, desiresTestingCPU, sizeof(float) * numberOfInput * sizeOfTestingSet, cudaMemcpyHostToDevice);
int i, j, index;
int randomForShuffle;
float tempForShuffle;
float* arrayForShuffle = (float*)malloc(sizeof(float) * sizeOfLearningSet);
for(int i = 0; i < sizeOfLearningSet; i++)
{
arrayForShuffle[i] = i;
}
for(i = 0; i < numberOfTrain;)
{
for(j = 0; j < frequencyOfClusteringRateVerification; i++, j++)
{
if(i%sizeOfLearningSet == 0)
{
shuffleLearningSet(arrayForShuffle,
sizeOfLearningSet,
randomForShuffle,
tempForShuffle);
}
index = arrayForShuffle[i%sizeOfLearningSet];
backpropagation(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
errorsInInputLayer,
errorsInHiddenLayers,
errorsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
learningRate,
&inputsLearningDevice[index*numberOfInput],
&desiresLearningDevice[index*numberOfOutput]);
}
calculateClusteringRate(weightsInInputLayer,
weightsInHiddenLayers,
weightsInOutputLayer,
outputsInInputLayer,
outputsInHiddenLayers,
outputsInOutputLayer,
numberOfHiddenLayers,
numberOfNeuronInHiddenLayers,
numberOfInput,
numberOfOutput,
&inputsTestingDevice[0],
&desiresTestingDevice[0],
sizeOfTestingSet);
/*if(newClusteringRate > clusteringRate)
{
clusteringRate = newClusteringRate;
saveOnCPU();
printf("Clustering rate = %f", clusteringRate);
}*/
}
}
//=====================================================================
// Tool functions
//=====================================================================
__host__ void returnNetworkOnCPU(float* weightsInInputLayerCPU,
float* weightsInHiddenLayersCPU,
float* weightsInOutputLayerCPU,
float* weightsInInputLayerGPU,
float* weightsInHiddenLayersGPU,
float* weightsInOutputLayerGPU,
int numberOfHiddenLayers,
int numberOfNeuronsInHiddenLayers,
int numberOfInput,
int numberOfOutput)
{
cudaMemcpy(weightsInInputLayerCPU, weightsInInputLayerGPU, sizeof(float) * numberOfHiddenLayers * numberOfInput ,cudaMemcpyDeviceToHost);
cudaMemcpy(weightsInHiddenLayersCPU, weightsInHiddenLayersGPU, sizeof(float) * numberOfNeuronsInHiddenLayers * numberOfNeuronsInHiddenLayers * (numberOfHiddenLayers-1), cudaMemcpyDeviceToHost);
cudaMemcpy(weightsInOutputLayerCPU, weightsInOutputLayerGPU, sizeof(float) * numberOfOutput * numberOfNeuronsInHiddenLayers, cudaMemcpyDeviceToHost);
}
__host__ void returnOutputOnCPU(float* outputsInOutputLayerCPU,
float* outputsInOutputLayerGPU,
int numberOfOutput)
{
cudaMemcpy(outputsInOutputLayerCPU, outputsInOutputLayerGPU, sizeof(float) * numberOfOutput, cudaMemcpyDeviceToHost);
debugError();
}
}
|
f4916b1a52c873cede0c2a217df5937fba327d8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#include<stdio.h>
#include<iostream>
#warning "Proper usage of the object file is : ./MatrixMultiplication input_file0 input_file1 output_file"
using namespace std;
#define TILE_WIDTH 32
__global__
void tiled_mm(float *d_A, float *d_B, float *d_C, int P, int Q,int R)
{
__shared__ float A[TILE_WIDTH][TILE_WIDTH];
__shared__ float B[TILE_WIDTH][TILE_WIDTH];
float sum = 0.f;
int tileIdx;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
for(tileIdx = 0;tileIdx < ((Q-1)/TILE_WIDTH + 1); tileIdx++)
{
if(row<P && tileIdx*TILE_WIDTH +threadIdx.x<Q)
A[threadIdx.y][threadIdx.x] = d_A[row * Q + tileIdx*TILE_WIDTH + threadIdx.x];
else
A[threadIdx.y][threadIdx.x] = 0;
if(col<R && tileIdx*TILE_WIDTH + threadIdx.y<Q)
B[threadIdx.y][threadIdx.x] = d_B[(tileIdx*TILE_WIDTH + threadIdx.y) + col * Q ];
else
B[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
// #pragma unroll
for(int k=0;k<TILE_WIDTH;k++)
sum += A[threadIdx.y][k] * B[k][threadIdx.x];
__syncthreads();
}
if (row < P && col < R)
{
d_C[row*R + col] = sum;
}
}
int main(int argc, char *argv[]) {
float *hostInput1;
float *hostInput2;
float *hostInput2_trans;
float *hostOutput;
float *expectedOutput;
int size1, size2, size3;
/* parse the input arguments */
//@@ Insert code here
char *inputFileName1;
char *inputFileName2;
char *outputFileName;
//cout<<"BLAH!!";
if(argc!=4)
{
cerr<<"The proper usage is: ./a.out \"input0_x.raw\" \"input1_x.raw\" \"output_x.raw\"\n";
exit(0);
}
wbArg_t args = wbArg_read(argc, argv);
inputFileName1 = wbArg_getInputFile(args, 0);
inputFileName2 = wbArg_getInputFile(args, 1);
outputFileName = wbArg_getInputFile(args, 2);
hostInput1 = (float *)wbImport(inputFileName1, &size1);
hostInput2 = (float *)wbImport(inputFileName2, &size2);
expectedOutput = (float *)wbImport(outputFileName, &size3);
int P = sqrt((float)((float)((float)size1 / (float)size2) * (float)size3));
int R = sqrt((float)((float)((float)size2 / (float)size1) * (float)size3));
int Q = sqrt((float)((float)((float)size1 * (float)size2) / (float)size3));
hostInput2_trans = (float *)malloc(sizeof(float) * size2);
hostOutput = (float *)malloc(sizeof(float) * size3);
for(int i=0;i<Q;i++)
for(int j=0;j<R;j++)
hostInput2_trans[j * Q + i] = hostInput2[i * R + j];
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
//cout<<"BLAH!!";
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInput1, sizeof(float) * size1);
hipMalloc((void **)&deviceInput2, sizeof(float) * size2);
hipMalloc((void **)&deviceOutput, sizeof(float) * size3);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInput1, hostInput1, sizeof(float) * size1, hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2_trans, sizeof(float) * size2, hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 thread_block(TILE_WIDTH, TILE_WIDTH );
dim3 grid((R-1)/TILE_WIDTH + 1, (P-1)/TILE_WIDTH + 1);
hipLaunchKernelGGL(( tiled_mm), dim3(grid), dim3(thread_block), 0, 0, deviceInput1, deviceInput2, deviceOutput, P, Q, R);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * size3, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
int flag = 1;
for(int i=0;i<size3;i++)
if(hostOutput[i] != expectedOutput[i])
{
flag = 0;
cout<<hostOutput[i]<<expectedOutput[i]<<endl;
cout<<i;
break;
}
if(flag)
printf("\nThe results have been verified.\n");
else
printf("\nThe result is wrong.\n");
// Import host input data
//@@ Read data from the raw files here
//@@ Insert code here
// Declare and allocate host output
//@@ Insert code here
// Declare and allocate thrust device input and output vectors
//@@ Insert code here
//thrust::device_ptr<float> dp1 = &hostInput1[0] ;
//thrust::device_ptr<float> dp2 = &hostInput2[0] ;
// Copy to device
//@@ Insert code here
// Execute vector addition
//@@ Insert Code here
/////////////////////////////////////////////////////////
// Copy data back to host
//@@ Insert code here
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
return 0;
}
| f4916b1a52c873cede0c2a217df5937fba327d8f.cu |
#include "wb.h"
#include<stdio.h>
#include<iostream>
#warning "Proper usage of the object file is : ./MatrixMultiplication input_file0 input_file1 output_file"
using namespace std;
#define TILE_WIDTH 32
__global__
void tiled_mm(float *d_A, float *d_B, float *d_C, int P, int Q,int R)
{
__shared__ float A[TILE_WIDTH][TILE_WIDTH];
__shared__ float B[TILE_WIDTH][TILE_WIDTH];
float sum = 0.f;
int tileIdx;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
for(tileIdx = 0;tileIdx < ((Q-1)/TILE_WIDTH + 1); tileIdx++)
{
if(row<P && tileIdx*TILE_WIDTH +threadIdx.x<Q)
A[threadIdx.y][threadIdx.x] = d_A[row * Q + tileIdx*TILE_WIDTH + threadIdx.x];
else
A[threadIdx.y][threadIdx.x] = 0;
if(col<R && tileIdx*TILE_WIDTH + threadIdx.y<Q)
B[threadIdx.y][threadIdx.x] = d_B[(tileIdx*TILE_WIDTH + threadIdx.y) + col * Q ];
else
B[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
// #pragma unroll
for(int k=0;k<TILE_WIDTH;k++)
sum += A[threadIdx.y][k] * B[k][threadIdx.x];
__syncthreads();
}
if (row < P && col < R)
{
d_C[row*R + col] = sum;
}
}
int main(int argc, char *argv[]) {
float *hostInput1;
float *hostInput2;
float *hostInput2_trans;
float *hostOutput;
float *expectedOutput;
int size1, size2, size3;
/* parse the input arguments */
//@@ Insert code here
char *inputFileName1;
char *inputFileName2;
char *outputFileName;
//cout<<"BLAH!!";
if(argc!=4)
{
cerr<<"The proper usage is: ./a.out \"input0_x.raw\" \"input1_x.raw\" \"output_x.raw\"\n";
exit(0);
}
wbArg_t args = wbArg_read(argc, argv);
inputFileName1 = wbArg_getInputFile(args, 0);
inputFileName2 = wbArg_getInputFile(args, 1);
outputFileName = wbArg_getInputFile(args, 2);
hostInput1 = (float *)wbImport(inputFileName1, &size1);
hostInput2 = (float *)wbImport(inputFileName2, &size2);
expectedOutput = (float *)wbImport(outputFileName, &size3);
int P = sqrt((float)((float)((float)size1 / (float)size2) * (float)size3));
int R = sqrt((float)((float)((float)size2 / (float)size1) * (float)size3));
int Q = sqrt((float)((float)((float)size1 * (float)size2) / (float)size3));
hostInput2_trans = (float *)malloc(sizeof(float) * size2);
hostOutput = (float *)malloc(sizeof(float) * size3);
for(int i=0;i<Q;i++)
for(int j=0;j<R;j++)
hostInput2_trans[j * Q + i] = hostInput2[i * R + j];
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
//cout<<"BLAH!!";
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInput1, sizeof(float) * size1);
cudaMalloc((void **)&deviceInput2, sizeof(float) * size2);
cudaMalloc((void **)&deviceOutput, sizeof(float) * size3);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInput1, hostInput1, sizeof(float) * size1, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2_trans, sizeof(float) * size2, cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 thread_block(TILE_WIDTH, TILE_WIDTH );
dim3 grid((R-1)/TILE_WIDTH + 1, (P-1)/TILE_WIDTH + 1);
tiled_mm<<<grid, thread_block>>>(deviceInput1, deviceInput2, deviceOutput, P, Q, R);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * size3, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
int flag = 1;
for(int i=0;i<size3;i++)
if(hostOutput[i] != expectedOutput[i])
{
flag = 0;
cout<<hostOutput[i]<<expectedOutput[i]<<endl;
cout<<i;
break;
}
if(flag)
printf("\nThe results have been verified.\n");
else
printf("\nThe result is wrong.\n");
// Import host input data
//@@ Read data from the raw files here
//@@ Insert code here
// Declare and allocate host output
//@@ Insert code here
// Declare and allocate thrust device input and output vectors
//@@ Insert code here
//thrust::device_ptr<float> dp1 = &hostInput1[0] ;
//thrust::device_ptr<float> dp2 = &hostInput2[0] ;
// Copy to device
//@@ Insert code here
// Execute vector addition
//@@ Insert Code here
/////////////////////////////////////////////////////////
// Copy data back to host
//@@ Insert code here
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
return 0;
}
|
ea968ca866302b454f838f75e477fbccff40c148.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
}
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
}
}
// Top Right
if (threadIdx.y < k_width && threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y < 0) ? 0 : input[gmem_index];
}
}
}
// Copy GMEm to SMEM here
// Left Overhang
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0) ? 0 : input[gmem_index];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
}
}
}
// Right
if (threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x >= width) ? 0 : input[gmem_index];
}
}
// Bottom Right
if (threadIdx.x >= blockDim.x - k_width &&
threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index =
c * smem_2d_size + smem_x + smem_y * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * ckernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
hipMalloc((void **)&d_input, input_bytes);
hipMalloc((void **)&d_output, output_bytes);
hipMemcpy(d_input, h_input, input_bytes, hipMemcpyHostToDevice);
// invoke Kernel
int bx = 64;
int by = 16;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
hipMalloc((void **)&d_kernel, kernel_bytes);
hipMemcpy(d_kernel, h_kernel, kernel_bytes, hipMemcpyHostToDevice);
hipMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
hipLaunchKernelGGL(( conv_cuda), dim3(grid), dim3(block), smem_size, 0, d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
hipDeviceSynchronize();
double timeStampB = getTimeStamp();
hipMemcpy(h_output, d_output, input_bytes, hipMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
hipFree(d_input);
hipFree(d_output);
hipFree(d_kernel);
hipDeviceReset();
delete[] h_output;
return 0;
} | ea968ca866302b454f838f75e477fbccff40c148.cu | #include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
}
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
}
}
// Top Right
if (threadIdx.y < k_width && threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y < 0) ? 0 : input[gmem_index];
}
}
}
// Copy GMEm to SMEM here
// Left Overhang
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0) ? 0 : input[gmem_index];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
}
}
}
// Right
if (threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x >= width) ? 0 : input[gmem_index];
}
}
// Bottom Right
if (threadIdx.x >= blockDim.x - k_width &&
threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index =
c * smem_2d_size + smem_x + smem_y * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * ckernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
cudaMalloc((void **)&d_input, input_bytes);
cudaMalloc((void **)&d_output, output_bytes);
cudaMemcpy(d_input, h_input, input_bytes, cudaMemcpyHostToDevice);
// invoke Kernel
int bx = 64;
int by = 16;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
cudaMalloc((void **)&d_kernel, kernel_bytes);
cudaMemcpy(d_kernel, h_kernel, kernel_bytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
conv_cuda<<<grid, block, smem_size>>>(d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
cudaDeviceSynchronize();
double timeStampB = getTimeStamp();
cudaMemcpy(h_output, d_output, input_bytes, cudaMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_kernel);
cudaDeviceReset();
delete[] h_output;
return 0;
} |
f13665e6710eec732ca35f27880af0c7b4f29326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_grad_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeGradOpKernel(const T* in,
const I* indicator,
size_t row,
size_t col,
T* out) {
size_t id_num = row * col;
CUDA_KERNEL_LOOP(k, id_num) {
size_t i = k / col;
size_t j = k % col;
I rrow = indicator[i];
common::gpu_atomic_add<T>(in[k], out + rrow * col + j);
}
}
} // namespace
template <typename T, typename I>
class TakeGradGpuOp : public GpuOpKernel {
public:
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGradGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx,
CudaStream* stream) {
Tensor grad, indicator, feature, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &grad));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_STATUS(ctx->GetInput(2, &feature));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
XDL_CHECK_COND(grad.Shape()[0] == indicator.Shape().NumElements(),
Status::ArgumentError("input and indicator size not match"));
auto grad_dims = grad.Shape().Dims();
size_t row = grad_dims[0];
size_t col = grad.Shape().NumElements() / row;
T* pin = grad.Raw<T>();
I* pind = indicator.Raw<I>();
std::vector<size_t> dims(grad_dims.begin(), grad_dims.end());
dims[0] = feature.Shape()[0];
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
T* pout = output.Raw<T>();
hipStream_t st = stream->GetInternal();
CUDA_CHECK(hipMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
hipLaunchKernelGGL(( TakeGradOpKernel<T, I>),
dim3(CUDA_GET_BLOCKS(row * col)),
dim3(CUDA_NUM_THREADS),
0,
st, pin, pind, row, col, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeGrad, TakeGradGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| f13665e6710eec732ca35f27880af0c7b4f29326.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_grad_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeGradOpKernel(const T* in,
const I* indicator,
size_t row,
size_t col,
T* out) {
size_t id_num = row * col;
CUDA_KERNEL_LOOP(k, id_num) {
size_t i = k / col;
size_t j = k % col;
I rrow = indicator[i];
common::gpu_atomic_add<T>(in[k], out + rrow * col + j);
}
}
} // namespace
template <typename T, typename I>
class TakeGradGpuOp : public GpuOpKernel {
public:
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGradGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx,
CudaStream* stream) {
Tensor grad, indicator, feature, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &grad));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_STATUS(ctx->GetInput(2, &feature));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
XDL_CHECK_COND(grad.Shape()[0] == indicator.Shape().NumElements(),
Status::ArgumentError("input and indicator size not match"));
auto grad_dims = grad.Shape().Dims();
size_t row = grad_dims[0];
size_t col = grad.Shape().NumElements() / row;
T* pin = grad.Raw<T>();
I* pind = indicator.Raw<I>();
std::vector<size_t> dims(grad_dims.begin(), grad_dims.end());
dims[0] = feature.Shape()[0];
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
T* pout = output.Raw<T>();
cudaStream_t st = stream->GetInternal();
CUDA_CHECK(cudaMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
TakeGradOpKernel<T, I><<<
CUDA_GET_BLOCKS(row * col),
CUDA_NUM_THREADS,
0,
st>>>(pin, pind, row, col, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeGrad, TakeGradGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
4b856d5d74a3061ac6cdc897d41185f3cb0d4b4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaCollidingParticles.cuh"
//When using the thrust library, anytime you want to use an anonomous function
// to process the array, you need to wrap it in a struct and pass that in instead.
//For example, this method is triggered by thrust for each element in our Particle
// array, and the output will is stored automatically in our openGL particle array.
struct CopyToOpenGL
{
__host__ __device__
float3 operator()(const Particle& p)
{
//Particles are go from 0 - grid width, and we want it to be centred on 0,0,0!
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE;
const float3 world_offset = make_float3(world_dim * 0.5f, 0.0f, world_dim * 0.5f);
float3 centred_pos = p._pos - world_offset;
return make_float3(centred_pos.x, centred_pos.y, centred_pos.z);
}
};
/****************************
*** ALGORITHM EXPLANATION ***
*****************************/
//Parallel collision resolution:
// - Making any serial algorithm parallel is very hard, and what
// will almost certainly take up 99% of any GPU project. For this
// example, collision resolution, we just take a n*2 approach.
// Simply: For each collision, we process it twice, once for object A
// and once for object B. The reason we do this is to avoid reading and
// writing to the same data at the same time (e.g. our physics constraints in parallel).
// Instead, we allocate a thread to each particle, let it sum up all of the 'resolution'
// forces acting on it from nearby collisions.
//
// On paper, this is just a much slower version of our CPU solver, though when split
// onto hundreds of cores is still much faster than our CPU approach.
//How do we know which particles are neighbours?
// - To do the collision resolution above, we need to know for each particle
// which other particles are nearby and possibly colliding. To accomplish this
// we do use a bucket sort. We generate a large 3D grid of cells and put each particle
// into it's corresponding cell, resulting in finding all nearby particles a quick search
// around the current and neighbouring grid cells and all their contained particles.
//
//If we have a fixed grid (like a texture) how do we place more than one particle in a single cell?
// - Instead of having a static grid array, each grid cell just contains a start and end index which
// points into the particle array. To generate this, we have to do a couple of steps:-
// 1: For each particle, compute it's grid cell index
// 2: Sort the particles by their grid cell indices
// 3. Run through the grid cell indices and save the 'start' of any grid cell change into our grid array
// 4. Run through the grid cell indices and save the 'end' of any grid cell change into our grid array
//
//-Footnote-
// The result of this final codebase is actually very similar the CUDA "particles" example that comes
// packaged with the samples. Their implementation is a bit faster, sorting lookups over entire particles
// and using spring forces to resolve collisions in a more stable manner. If your interested, it's definetely
// worth a look.
//
// Another thing, for those that are interested, is a more descriptive explanation of how this works. It isn't
// done exactly as mentioned in the article, as we don't create 8 seperate update kernels and instead just process
// each collision pair twice. Though it explains the process much better, and is a more elegant solution to collision
// resolution.
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch32.html
__host__ __device__
int3 GetGridCell(const float3& pos)
{
int3 cell;
//Get a x,y,z cell index for the particle
// Assumes positions go from 0 - (PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE)
cell.x = static_cast<int>(pos.x / PARTICLE_GRID_CELL_SIZE);
cell.y = static_cast<int>(pos.y / PARTICLE_GRID_CELL_SIZE);
cell.z = static_cast<int>(pos.z / PARTICLE_GRID_CELL_SIZE);
return cell;
}
__host__ __device__
uint GetGridCellHash(const int3& cell)
{
//Generate a unique 'cell index' for the given cell.
// - To handle 'edge' cases, we do a quick bitwise
// modulus to make sure all particles are correctly handled.
int x = cell.x & (PARTICLE_GRID_SIZE - 1);
int y = cell.y & (PARTICLE_GRID_SIZE - 1);
int z = cell.z & (PARTICLE_GRID_SIZE - 1);
return ((z * PARTICLE_GRID_SIZE) + x) * PARTICLE_GRID_SIZE + y;
}
//Bucket Sort: 1: For each particle, compute it's grid cell index
// Note: The other parts of the bucket sort list are all handled inside thrust library functions =]
struct GetCellGridIndex
{
GetCellGridIndex() {}
__host__ __device__
uint operator()(const Particle& p) const
{
int3 cell = GetGridCell(p._pos);
return GetGridCellHash(cell);
}
};
//Given a particle p, check for and collide it with all particles in the given cell index
__device__
void CollideParticleWithCell(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
int3 cell,
Particle* all_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint cellHash = GetGridCellHash(cell);
//Get the start and end indices in the particle array which correspond
// to the given grid cell
uint arr_idx = grid_cell_start[cellHash];
uint arr_end = grid_cell_end[cellHash];
for (; arr_idx < arr_end; arr_idx++)
{
//Make sure we don't collide with ourselves!
if (arr_idx == particle_idx)
continue;
Particle other_particle = all_particles[arr_idx];
//Do a quick sphere-sphere test
float3 ab = other_particle._pos - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = PARTICLE_RADIUS * PARTICLE_RADIUS * 4.f;
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot(other_particle._vel - particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS * 2.f - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
jn += b;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * (jn * 0.5f);
}
}
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle p = particles[index];
Particle out_p = p;
int3 cell = GetGridCell(p._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, p, out_p, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
out_particles[index] = out_p;
}
// Update particle positions
// - Also handles boundary resolution. We don't want our particles
// leaving our lookup grid.
struct UpdatePositions
{
UpdatePositions(float dt, float3 gravity)
: _dt(dt)
, _gravity(gravity)
, _gridMaxBounds(PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS)
{
}
float _dt;
float3 _gravity;
float _gridMaxBounds;
__host__ __device__
void operator()(Particle& p)
{
//Time integration
p._vel += _gravity;
p._vel *= 0.999f;
p._pos += p._vel * _dt;
//Out of Bounds Check
// - Horrible branching mess... Hopefully your a better programmer than me. :(
//X
if (p._pos.x < PARTICLE_RADIUS)
{
p._pos.x = PARTICLE_RADIUS;
p._vel.x = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.x > _gridMaxBounds)
{
p._pos.x = _gridMaxBounds;
p._vel.x = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Y
if (p._pos.y < PARTICLE_RADIUS)
{
p._pos.y = PARTICLE_RADIUS;
p._vel.y = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.y > _gridMaxBounds)
{
p._pos.y = _gridMaxBounds;
p._vel.y = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Z
if (p._pos.z < PARTICLE_RADIUS)
{
p._pos.z = PARTICLE_RADIUS;
p._vel.z = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.z > _gridMaxBounds)
{
p._pos.z = _gridMaxBounds;
p._vel.z = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
}
};
//All the code below this point is ONLY executed on the CPU
CudaCollidingParticles::CudaCollidingParticles()
: num_particles(0)
, particles_ping(NULL)
, cGLOutPositions(NULL)
{
}
CudaCollidingParticles::~CudaCollidingParticles()
{
if (particles_ping)
{
gpuErrchk(hipFree(particles_ping));
gpuErrchk(hipFree(particles_pong));
gpuErrchk(hipFree(particles_grid_cell_index));
gpuErrchk(hipFree(grid_cell_start));
gpuErrchk(hipFree(grid_cell_end));
particles_ping = NULL;
}
if (cGLOutPositions)
{
gpuErrchk(hipGraphicsUnregisterResource(cGLOutPositions));
cGLOutPositions = NULL;
}
}
void CudaCollidingParticles::InitializeParticleDam(int dam_width, int dam_height, int dam_depth)
{
///This function could have been a lot simpler, but I wanted nicely compacted dam... >.>
uint num_even_rowed_particles = dam_width * dam_depth * dam_height / 2;
num_particles = num_even_rowed_particles + (dam_width - 1) * (dam_depth - 1) * dam_height / 2;
//Allocate Particle Arrays
gpuErrchk(hipMalloc(&particles_pong, num_particles * sizeof(Particle)));
gpuErrchk(hipMalloc(&particles_grid_cell_index, num_particles * sizeof(uint)));
//Allocate our lookup grid
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
gpuErrchk(hipMalloc(&grid_cell_start, num_grid_cells * sizeof(uint)));
gpuErrchk(hipMalloc(&grid_cell_end, num_grid_cells * sizeof(uint)));
//Generate initial Particle data for our dam
const float sqrt2 = sqrt(2.f);
const float3 dam_size = make_float3(
dam_width * PARTICLE_RADIUS * 2.f,
dam_height * PARTICLE_RADIUS * (2.f + sqrt2) * 0.5f,
dam_depth * PARTICLE_RADIUS * 2.f);
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS * 2.f;
const float3 world_size = make_float3(world_dim, world_dim, world_dim);
float3 start_offset = world_size * 0.5f - dam_size * 0.5f;
start_offset.y = 0.0f;
Particle* tmp_particles = new Particle[num_particles];
//Initialize all the even rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth; ++z)
{
for (int x = 0; x < dam_width; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
1.0f + x * 2.f,
1.0f + y * (2.f + sqrt2),
1.0f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * dam_depth) + z) * dam_width + x;
tmp_particles[idx] = p;
}
}
}
//Initialize all the odd rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth - 1; ++z)
{
for (int x = 0; x < dam_width - 1; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
2.f + x * 2.f,
(1.f + sqrt2) + y * (2.f + sqrt2),
2.f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * (dam_depth-1)) + z) * (dam_width-1) + x;
tmp_particles[num_even_rowed_particles + idx] = p;
}
}
}
gpuErrchk(hipMalloc(&particles_ping, num_particles * sizeof(Particle)));
gpuErrchk(hipMemcpy(particles_ping, tmp_particles, num_particles * sizeof(Particle), hipMemcpyHostToDevice));
delete[] tmp_particles;
}
void CudaCollidingParticles::InitializeOpenGLVertexBuffer(GLuint buffer_idx)
{
//As the number of particles in this example is generated by the above function, the
// opengl array has to be allocated after and initialized here later.
gpuErrchk(hipGraphicsGLRegisterBuffer(&cGLOutPositions, buffer_idx, hipGraphicsMapFlagsNone));
}
void CudaCollidingParticles::UpdateParticles(float dt)
{
//See "ALGORITHM EXPLANATION" (top of this file) for info on what is meant to be happening here.
//Note: Gravity here is tiny! The reason being that of stability, as the particles themselves are
// small, and the timestep is comparitively massive, we need to make sure the maximum movement
// of each particle per timestep is small. Try messing around with it, it's also important
// for our CPU physics engine aswell (but hopefully never been noticed ^^ ).
// For stability, particle systems normally use spring based collision resolution instead which
// handles correctional energy (our baumgarte scalar) more leanently.
const float3 gravity = make_float3(0, -0.02f, 0);
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
const float fixed_timestep = 1.0f / 60.0f;
//Integrate our particles through time
// - thrust::for_each applies a given function to each element in the array
thrust::for_each(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
UpdatePositions(fixed_timestep*10, gravity));
//Generate our grid cell indices
// - thrust::transform calls a given function on each element in the first array
// and outputs the result into the second array.
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<uint>(particles_grid_cell_index),
GetCellGridIndex());
//Sort our Particles based on their grid cell indices
// - thrust::sort_by_key sorts both keys and values based on the key array passed in.
// Note: Sorting is still very slow (comparitively) on the GPU and is one case where the
// CPU is still often faster. However, copying all our data back to the host, sorting
// and copying back to the device is not a feasible option. Though it's something
// to keep in mind when doing your own algorithms.
thrust::sort_by_key(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
thrust::device_ptr<Particle>(particles_ping));
//Compute grid cell start indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it first appears.
thrust::counting_iterator<uint> search_begin(0u);
thrust::lower_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_start));
//Compute grid cell end indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it last appears (+1).
thrust::upper_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_end));
//Handle our collision resolution
// - For each particle, check and handle collisions with all neighbouring particles.
// Thrust?? - To my knowledge, thrust doesn't allow you raw array access. Everything must be
// done with iterators - Which can be used for this function, but for me, was
// easier just to write our own kernel and just access the particle array directly.
dim3 block(64, 1, 1);
dim3 grid((num_particles + block.x - 1) / block.x, 1, 1);
float baumgarte_factor = 0.05f / fixed_timestep;
for (int i = 0; i < 10; ++i)
{
hipLaunchKernelGGL(( CollideParticles), dim3(grid), dim3(block) , 0, 0, baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end);
std::swap(particles_ping, particles_pong);
//Should really do boundary check's here...
}
//Finally, copy our particle positions to openGL to be renderered as particles.
size_t tmpVertexPtrSize;
float3 *tmpVertexPtr;
gpuErrchk(hipGraphicsMapResources(1, &cGLOutPositions, 0));
gpuErrchk(hipGraphicsResourceGetMappedPointer((void **)&tmpVertexPtr, &tmpVertexPtrSize, cGLOutPositions));
if (tmpVertexPtrSize < num_particles * sizeof(float3))
{
NCLERROR("OpenGL vertex buffer not large enough to encompass all our particles!");
return;
}
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<float3>(tmpVertexPtr),
CopyToOpenGL());
gpuErrchk(hipGraphicsUnmapResources(1, &cGLOutPositions, 0));
} | 4b856d5d74a3061ac6cdc897d41185f3cb0d4b4e.cu | #include "CudaCollidingParticles.cuh"
//When using the thrust library, anytime you want to use an anonomous function
// to process the array, you need to wrap it in a struct and pass that in instead.
//For example, this method is triggered by thrust for each element in our Particle
// array, and the output will is stored automatically in our openGL particle array.
struct CopyToOpenGL
{
__host__ __device__
float3 operator()(const Particle& p)
{
//Particles are go from 0 - grid width, and we want it to be centred on 0,0,0!
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE;
const float3 world_offset = make_float3(world_dim * 0.5f, 0.0f, world_dim * 0.5f);
float3 centred_pos = p._pos - world_offset;
return make_float3(centred_pos.x, centred_pos.y, centred_pos.z);
}
};
/****************************
*** ALGORITHM EXPLANATION ***
*****************************/
//Parallel collision resolution:
// - Making any serial algorithm parallel is very hard, and what
// will almost certainly take up 99% of any GPU project. For this
// example, collision resolution, we just take a n*2 approach.
// Simply: For each collision, we process it twice, once for object A
// and once for object B. The reason we do this is to avoid reading and
// writing to the same data at the same time (e.g. our physics constraints in parallel).
// Instead, we allocate a thread to each particle, let it sum up all of the 'resolution'
// forces acting on it from nearby collisions.
//
// On paper, this is just a much slower version of our CPU solver, though when split
// onto hundreds of cores is still much faster than our CPU approach.
//How do we know which particles are neighbours?
// - To do the collision resolution above, we need to know for each particle
// which other particles are nearby and possibly colliding. To accomplish this
// we do use a bucket sort. We generate a large 3D grid of cells and put each particle
// into it's corresponding cell, resulting in finding all nearby particles a quick search
// around the current and neighbouring grid cells and all their contained particles.
//
//If we have a fixed grid (like a texture) how do we place more than one particle in a single cell?
// - Instead of having a static grid array, each grid cell just contains a start and end index which
// points into the particle array. To generate this, we have to do a couple of steps:-
// 1: For each particle, compute it's grid cell index
// 2: Sort the particles by their grid cell indices
// 3. Run through the grid cell indices and save the 'start' of any grid cell change into our grid array
// 4. Run through the grid cell indices and save the 'end' of any grid cell change into our grid array
//
//-Footnote-
// The result of this final codebase is actually very similar the CUDA "particles" example that comes
// packaged with the samples. Their implementation is a bit faster, sorting lookups over entire particles
// and using spring forces to resolve collisions in a more stable manner. If your interested, it's definetely
// worth a look.
//
// Another thing, for those that are interested, is a more descriptive explanation of how this works. It isn't
// done exactly as mentioned in the article, as we don't create 8 seperate update kernels and instead just process
// each collision pair twice. Though it explains the process much better, and is a more elegant solution to collision
// resolution.
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch32.html
__host__ __device__
int3 GetGridCell(const float3& pos)
{
int3 cell;
//Get a x,y,z cell index for the particle
// Assumes positions go from 0 - (PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE)
cell.x = static_cast<int>(pos.x / PARTICLE_GRID_CELL_SIZE);
cell.y = static_cast<int>(pos.y / PARTICLE_GRID_CELL_SIZE);
cell.z = static_cast<int>(pos.z / PARTICLE_GRID_CELL_SIZE);
return cell;
}
__host__ __device__
uint GetGridCellHash(const int3& cell)
{
//Generate a unique 'cell index' for the given cell.
// - To handle 'edge' cases, we do a quick bitwise
// modulus to make sure all particles are correctly handled.
int x = cell.x & (PARTICLE_GRID_SIZE - 1);
int y = cell.y & (PARTICLE_GRID_SIZE - 1);
int z = cell.z & (PARTICLE_GRID_SIZE - 1);
return ((z * PARTICLE_GRID_SIZE) + x) * PARTICLE_GRID_SIZE + y;
}
//Bucket Sort: 1: For each particle, compute it's grid cell index
// Note: The other parts of the bucket sort list are all handled inside thrust library functions =]
struct GetCellGridIndex
{
GetCellGridIndex() {}
__host__ __device__
uint operator()(const Particle& p) const
{
int3 cell = GetGridCell(p._pos);
return GetGridCellHash(cell);
}
};
//Given a particle p, check for and collide it with all particles in the given cell index
__device__
void CollideParticleWithCell(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
int3 cell,
Particle* all_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint cellHash = GetGridCellHash(cell);
//Get the start and end indices in the particle array which correspond
// to the given grid cell
uint arr_idx = grid_cell_start[cellHash];
uint arr_end = grid_cell_end[cellHash];
for (; arr_idx < arr_end; arr_idx++)
{
//Make sure we don't collide with ourselves!
if (arr_idx == particle_idx)
continue;
Particle other_particle = all_particles[arr_idx];
//Do a quick sphere-sphere test
float3 ab = other_particle._pos - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = PARTICLE_RADIUS * PARTICLE_RADIUS * 4.f;
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot(other_particle._vel - particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS * 2.f - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
jn += b;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * (jn * 0.5f);
}
}
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle p = particles[index];
Particle out_p = p;
int3 cell = GetGridCell(p._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, p, out_p, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
out_particles[index] = out_p;
}
// Update particle positions
// - Also handles boundary resolution. We don't want our particles
// leaving our lookup grid.
struct UpdatePositions
{
UpdatePositions(float dt, float3 gravity)
: _dt(dt)
, _gravity(gravity)
, _gridMaxBounds(PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS)
{
}
float _dt;
float3 _gravity;
float _gridMaxBounds;
__host__ __device__
void operator()(Particle& p)
{
//Time integration
p._vel += _gravity;
p._vel *= 0.999f;
p._pos += p._vel * _dt;
//Out of Bounds Check
// - Horrible branching mess... Hopefully your a better programmer than me. :(
//X
if (p._pos.x < PARTICLE_RADIUS)
{
p._pos.x = PARTICLE_RADIUS;
p._vel.x = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.x > _gridMaxBounds)
{
p._pos.x = _gridMaxBounds;
p._vel.x = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Y
if (p._pos.y < PARTICLE_RADIUS)
{
p._pos.y = PARTICLE_RADIUS;
p._vel.y = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.y > _gridMaxBounds)
{
p._pos.y = _gridMaxBounds;
p._vel.y = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Z
if (p._pos.z < PARTICLE_RADIUS)
{
p._pos.z = PARTICLE_RADIUS;
p._vel.z = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.z > _gridMaxBounds)
{
p._pos.z = _gridMaxBounds;
p._vel.z = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
}
};
//All the code below this point is ONLY executed on the CPU
CudaCollidingParticles::CudaCollidingParticles()
: num_particles(0)
, particles_ping(NULL)
, cGLOutPositions(NULL)
{
}
CudaCollidingParticles::~CudaCollidingParticles()
{
if (particles_ping)
{
gpuErrchk(cudaFree(particles_ping));
gpuErrchk(cudaFree(particles_pong));
gpuErrchk(cudaFree(particles_grid_cell_index));
gpuErrchk(cudaFree(grid_cell_start));
gpuErrchk(cudaFree(grid_cell_end));
particles_ping = NULL;
}
if (cGLOutPositions)
{
gpuErrchk(cudaGraphicsUnregisterResource(cGLOutPositions));
cGLOutPositions = NULL;
}
}
void CudaCollidingParticles::InitializeParticleDam(int dam_width, int dam_height, int dam_depth)
{
///This function could have been a lot simpler, but I wanted nicely compacted dam... >.>
uint num_even_rowed_particles = dam_width * dam_depth * dam_height / 2;
num_particles = num_even_rowed_particles + (dam_width - 1) * (dam_depth - 1) * dam_height / 2;
//Allocate Particle Arrays
gpuErrchk(cudaMalloc(&particles_pong, num_particles * sizeof(Particle)));
gpuErrchk(cudaMalloc(&particles_grid_cell_index, num_particles * sizeof(uint)));
//Allocate our lookup grid
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
gpuErrchk(cudaMalloc(&grid_cell_start, num_grid_cells * sizeof(uint)));
gpuErrchk(cudaMalloc(&grid_cell_end, num_grid_cells * sizeof(uint)));
//Generate initial Particle data for our dam
const float sqrt2 = sqrt(2.f);
const float3 dam_size = make_float3(
dam_width * PARTICLE_RADIUS * 2.f,
dam_height * PARTICLE_RADIUS * (2.f + sqrt2) * 0.5f,
dam_depth * PARTICLE_RADIUS * 2.f);
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS * 2.f;
const float3 world_size = make_float3(world_dim, world_dim, world_dim);
float3 start_offset = world_size * 0.5f - dam_size * 0.5f;
start_offset.y = 0.0f;
Particle* tmp_particles = new Particle[num_particles];
//Initialize all the even rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth; ++z)
{
for (int x = 0; x < dam_width; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
1.0f + x * 2.f,
1.0f + y * (2.f + sqrt2),
1.0f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * dam_depth) + z) * dam_width + x;
tmp_particles[idx] = p;
}
}
}
//Initialize all the odd rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth - 1; ++z)
{
for (int x = 0; x < dam_width - 1; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
2.f + x * 2.f,
(1.f + sqrt2) + y * (2.f + sqrt2),
2.f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * (dam_depth-1)) + z) * (dam_width-1) + x;
tmp_particles[num_even_rowed_particles + idx] = p;
}
}
}
gpuErrchk(cudaMalloc(&particles_ping, num_particles * sizeof(Particle)));
gpuErrchk(cudaMemcpy(particles_ping, tmp_particles, num_particles * sizeof(Particle), cudaMemcpyHostToDevice));
delete[] tmp_particles;
}
void CudaCollidingParticles::InitializeOpenGLVertexBuffer(GLuint buffer_idx)
{
//As the number of particles in this example is generated by the above function, the
// opengl array has to be allocated after and initialized here later.
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cGLOutPositions, buffer_idx, cudaGraphicsMapFlagsNone));
}
void CudaCollidingParticles::UpdateParticles(float dt)
{
//See "ALGORITHM EXPLANATION" (top of this file) for info on what is meant to be happening here.
//Note: Gravity here is tiny! The reason being that of stability, as the particles themselves are
// small, and the timestep is comparitively massive, we need to make sure the maximum movement
// of each particle per timestep is small. Try messing around with it, it's also important
// for our CPU physics engine aswell (but hopefully never been noticed ^^ ).
// For stability, particle systems normally use spring based collision resolution instead which
// handles correctional energy (our baumgarte scalar) more leanently.
const float3 gravity = make_float3(0, -0.02f, 0);
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
const float fixed_timestep = 1.0f / 60.0f;
//Integrate our particles through time
// - thrust::for_each applies a given function to each element in the array
thrust::for_each(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
UpdatePositions(fixed_timestep*10, gravity));
//Generate our grid cell indices
// - thrust::transform calls a given function on each element in the first array
// and outputs the result into the second array.
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<uint>(particles_grid_cell_index),
GetCellGridIndex());
//Sort our Particles based on their grid cell indices
// - thrust::sort_by_key sorts both keys and values based on the key array passed in.
// Note: Sorting is still very slow (comparitively) on the GPU and is one case where the
// CPU is still often faster. However, copying all our data back to the host, sorting
// and copying back to the device is not a feasible option. Though it's something
// to keep in mind when doing your own algorithms.
thrust::sort_by_key(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
thrust::device_ptr<Particle>(particles_ping));
//Compute grid cell start indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it first appears.
thrust::counting_iterator<uint> search_begin(0u);
thrust::lower_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_start));
//Compute grid cell end indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it last appears (+1).
thrust::upper_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_end));
//Handle our collision resolution
// - For each particle, check and handle collisions with all neighbouring particles.
// Thrust?? - To my knowledge, thrust doesn't allow you raw array access. Everything must be
// done with iterators - Which can be used for this function, but for me, was
// easier just to write our own kernel and just access the particle array directly.
dim3 block(64, 1, 1);
dim3 grid((num_particles + block.x - 1) / block.x, 1, 1);
float baumgarte_factor = 0.05f / fixed_timestep;
for (int i = 0; i < 10; ++i)
{
CollideParticles<<< grid, block >>>(baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end);
std::swap(particles_ping, particles_pong);
//Should really do boundary check's here...
}
//Finally, copy our particle positions to openGL to be renderered as particles.
size_t tmpVertexPtrSize;
float3 *tmpVertexPtr;
gpuErrchk(cudaGraphicsMapResources(1, &cGLOutPositions, 0));
gpuErrchk(cudaGraphicsResourceGetMappedPointer((void **)&tmpVertexPtr, &tmpVertexPtrSize, cGLOutPositions));
if (tmpVertexPtrSize < num_particles * sizeof(float3))
{
NCLERROR("OpenGL vertex buffer not large enough to encompass all our particles!");
return;
}
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<float3>(tmpVertexPtr),
CopyToOpenGL());
gpuErrchk(cudaGraphicsUnmapResources(1, &cGLOutPositions, 0));
} |
414f793c7a749a28e020432d5bec3e819929e3a8.hip | // !!! This is a file automatically generated by hipify!!!
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include <hip/hip_runtime.h>
#include "backproject.hcu"
#include <iostream>
using namespace std;
// from
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float geom_[MAX_PROJ_STACK_SIZE * 12];
__constant__ int3 proj_shape_;
__constant__ int3 vol_shape_;
__constant__ float3 vol_orig_;
__constant__ float3 voxel_size_;
static int3 proj_shape_host_;
static int3 vol_shape_host_;
texture<float, hipTextureType2DLayered> projTex_;
inline __device__
float3 map( float3&& vp, int n )
{
const float DSR = 700.0;
const float DDR = 0;
float theta = 0;
if(n == 0){
theta = -7.5;
}else if(n == 1){
theta = -6.43;
}else if(n == 2){
theta = -5.36;
}else if(n == 3){
theta = -4.29;
}else if(n == 4){
theta = -3.22;
}else if(n == 5){
theta = -2.15;
}else if(n == 6){
theta = -1.08;
}else if(n == 7){
theta = 0;
}else if(n == 8){
theta = 1.08;
}else if(n == 9){
theta = 2.15;
}else if(n == 10){
theta = 3.22;
}else if(n == 11){
theta = 4.29;
}else if(n == 12){
theta = 5.36;
}else if(n == 13){
theta = 6.43;
}else if(n == 14){
theta = 7.5;
}
// Transforma em radiano
theta = (theta * M_PI)/180;
// IMPLEMENTAO DO BRUNO!!!!
const float* matrix = &(geom_[n*12]);
float3 oi = make_float3(
( vp.x + (( vp.z * ((DSR * std::sin(theta)) + vp.x)) / ((DSR * std::cos(theta)) + DDR - vp.z)) ),
(vp.y * ((DSR * std::cos(theta))+DDR)) / ((DSR * std::cos(theta))+DDR- vp.z),
((DSR * std::cos(theta))+DDR- vp.z)
);
////////// IMPLEMENTAO DOS ALEMES!!!! ////////
/* const float* matrix = &(geom_[n*12]);
float3 oi = make_float3(
(matrix[0] * vp.x + matrix[1] * vp.y + matrix[2] * vp.z + matrix[3]),// + tx,
(matrix[4] * vp.x + matrix[5] * vp.y + matrix[6] * vp.z + matrix[7]),
(matrix[8] * vp.x + matrix[9] * vp.y + matrix[10] * vp.z + matrix[11])
);
*/
return oi;
}
__global__
void kernel_backproject( float* vol )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
float val = 0.0f;
for( int n = 0; n < proj_shape_.z; ++n )
{
auto ip = map( make_float3( x, y, z ), n );
ip.z = 1.0f / ip.z;
//ip.x *= ip.z;
//ip.y *= ip.z;
// val += tex2DLayered( projTex_, (ip.x + 0.5 + 143.36)/0.14, (ip.y + 0.5)/0.14, n ) * ip.z * ip.z; // bACKUP lINE
val += tex2DLayered( projTex_, (ip.x + 0.5 + 35)/0.14, (ip.y + 0.5)/0.14, n ) * ip.z * ip.z;
}
// linear volume address
const unsigned int l = vol_shape_.x * ( k*vol_shape_.y + j ) + i;
vol[l] = val;
}
__global__
void kernel_project( const float* vol, float* proj )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
const float v = vol[vol_shape_.x * ( k*vol_shape_.y + j ) + i];
for( int n = 0; n < proj_shape_.z; ++n )
{
float3 ip = map( make_float3( x, y, z ), n );
//ip.x *= 1.0f / ip.z;
//ip.y *= 1.0f / ip.z;
const float vz = v / (ip.z*ip.z);
// four neighbours on projection
const int u1 = ((int)ip.x),
v1 = ((int)ip.y);
const int u2 = u1+1,
v2 = v1+1;
// simulate hipAddressModeBorder
if( u1 >= -1 && v1 >= -1 && u2 <= proj_shape_.x && v2 <= proj_shape_.y )
{
const float wu2 = ip.x - ((float)u1);
const float wu1 = 1.0f - wu2;
const float wv2 = ip.y - ((float)v1);
const float wv1 = 1.0f - wv2;
const unsigned int l1 = proj_shape_.x * ( n*proj_shape_.y + v1 ) + u1;
const unsigned int l2 = l1 + proj_shape_.x;
if( u1 >= 0 )
{
const float vzwu1 = vz*wu1;
if( v1 >= 0 )
atomicAdd( &proj[l1], vzwu1*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2], vzwu1*wv2 );
}
if( u2 < proj_shape_.x )
{
const float vzwu2 = vz*wu2;
if( v1 >= 0 )
atomicAdd( &proj[l1 + 1], vzwu2*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2 + 1], vzwu2*wv2 );
}
}
}
}
__host__
void cuda_init_backproject( float* geom,
int U, int V, int N,
int X, int Y, int Z,
float ox, float oy, float oz,
float sx, float sy, float sz )
{
proj_shape_host_ = make_int3( U, V, N );
vol_shape_host_ = make_int3( X, Y, Z );
auto vol_orig = make_float3( ox, oy, oz );
auto voxel_size = make_float3( sx, sy, sz );
gpuErrchk( hipMemcpyToSymbol( geom_, geom, 12 * sizeof(float) * N ) );
gpuErrchk( hipMemcpyToSymbol( proj_shape_, &proj_shape_host_, sizeof(int3) ) );
gpuErrchk( hipMemcpyToSymbol( vol_shape_, &vol_shape_host_, sizeof(int3) ) );
gpuErrchk( hipMemcpyToSymbol( vol_orig_, &vol_orig, sizeof(float3) ) );
gpuErrchk( hipMemcpyToSymbol( voxel_size_, &voxel_size, sizeof(float3) ) );
}
__host__
void cuda_backproject( const float* proj, float* vol )
{
// set texture properties
projTex_.addressMode[0] = hipAddressModeBorder;
projTex_.addressMode[1] = hipAddressModeBorder;
projTex_.addressMode[2] = hipAddressModeBorder;
projTex_.filterMode = hipFilterModeLinear;
projTex_.normalized = false;
// malloc cuda array for texture
hipExtent projExtent = make_hipExtent( proj_shape_host_.x,
proj_shape_host_.y,
proj_shape_host_.z );
hipArray *projArray;
static hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
gpuErrchk( hipMalloc3DArray( &projArray, &channelDesc, projExtent, hipArrayLayered ) );
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr( const_cast<float*>( proj ),
proj_shape_host_.x*sizeof(float),
proj_shape_host_.x,
proj_shape_host_.y
);
copyParams.dstArray = projArray;
copyParams.extent = projExtent;
copyParams.kind = hipMemcpyDeviceToDevice;
gpuErrchk( hipMemcpy3D( ©Params ) );
// bind texture reference
gpuErrchk( hipBindTextureToArray( projTex_, (hipArray*)projArray,
channelDesc ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
fprintf(stderr, "\n\n\n\n");
fprintf(stderr, "Iniciano o backproject");
fprintf(stderr, "Saiu do backproject");
fprintf(stderr, "\n\n\n\n");
hipLaunchKernelGGL(( kernel_backproject), dim3(grid), dim3(block) , 0, 0, vol );
// check for errors
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// cleanup
gpuErrchk( hipUnbindTexture( projTex_ ) );
gpuErrchk( hipFreeArray( projArray ) );
}
__host__
void cuda_project( const float* vol, float* proj )
{
// set proj to zero
hipMemset( proj, 0, proj_shape_host_.x*proj_shape_host_.y*proj_shape_host_.z
* sizeof( float ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
hipLaunchKernelGGL(( kernel_project), dim3(grid), dim3(block) , 0, 0, vol, proj );
}
#endif
| 414f793c7a749a28e020432d5bec3e819929e3a8.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include <cuda.h>
#include "backproject.hcu"
#include <iostream>
using namespace std;
// from
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float geom_[MAX_PROJ_STACK_SIZE * 12];
__constant__ int3 proj_shape_;
__constant__ int3 vol_shape_;
__constant__ float3 vol_orig_;
__constant__ float3 voxel_size_;
static int3 proj_shape_host_;
static int3 vol_shape_host_;
texture<float, cudaTextureType2DLayered> projTex_;
inline __device__
float3 map( float3&& vp, int n )
{
const float DSR = 700.0;
const float DDR = 0;
float theta = 0;
if(n == 0){
theta = -7.5;
}else if(n == 1){
theta = -6.43;
}else if(n == 2){
theta = -5.36;
}else if(n == 3){
theta = -4.29;
}else if(n == 4){
theta = -3.22;
}else if(n == 5){
theta = -2.15;
}else if(n == 6){
theta = -1.08;
}else if(n == 7){
theta = 0;
}else if(n == 8){
theta = 1.08;
}else if(n == 9){
theta = 2.15;
}else if(n == 10){
theta = 3.22;
}else if(n == 11){
theta = 4.29;
}else if(n == 12){
theta = 5.36;
}else if(n == 13){
theta = 6.43;
}else if(n == 14){
theta = 7.5;
}
// Transforma em radiano
theta = (theta * M_PI)/180;
// IMPLEMENTAÇÃO DO BRUNO!!!!
const float* matrix = &(geom_[n*12]);
float3 oi = make_float3(
( vp.x + (( vp.z * ((DSR * std::sin(theta)) + vp.x)) / ((DSR * std::cos(theta)) + DDR - vp.z)) ),
(vp.y * ((DSR * std::cos(theta))+DDR)) / ((DSR * std::cos(theta))+DDR- vp.z),
((DSR * std::cos(theta))+DDR- vp.z)
);
////////// IMPLEMENTAÇÃO DOS ALEMÃES!!!! ////////
/* const float* matrix = &(geom_[n*12]);
float3 oi = make_float3(
(matrix[0] * vp.x + matrix[1] * vp.y + matrix[2] * vp.z + matrix[3]),// + tx,
(matrix[4] * vp.x + matrix[5] * vp.y + matrix[6] * vp.z + matrix[7]),
(matrix[8] * vp.x + matrix[9] * vp.y + matrix[10] * vp.z + matrix[11])
);
*/
return oi;
}
__global__
void kernel_backproject( float* vol )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
float val = 0.0f;
for( int n = 0; n < proj_shape_.z; ++n )
{
auto ip = map( make_float3( x, y, z ), n );
ip.z = 1.0f / ip.z;
//ip.x *= ip.z;
//ip.y *= ip.z;
// val += tex2DLayered( projTex_, (ip.x + 0.5 + 143.36)/0.14, (ip.y + 0.5)/0.14, n ) * ip.z * ip.z; // bACKUP lINE
val += tex2DLayered( projTex_, (ip.x + 0.5 + 35)/0.14, (ip.y + 0.5)/0.14, n ) * ip.z * ip.z;
}
// linear volume address
const unsigned int l = vol_shape_.x * ( k*vol_shape_.y + j ) + i;
vol[l] = val;
}
__global__
void kernel_project( const float* vol, float* proj )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
const float v = vol[vol_shape_.x * ( k*vol_shape_.y + j ) + i];
for( int n = 0; n < proj_shape_.z; ++n )
{
float3 ip = map( make_float3( x, y, z ), n );
//ip.x *= 1.0f / ip.z;
//ip.y *= 1.0f / ip.z;
const float vz = v / (ip.z*ip.z);
// four neighbours on projection
const int u1 = ((int)ip.x),
v1 = ((int)ip.y);
const int u2 = u1+1,
v2 = v1+1;
// simulate cudaAddressModeBorder
if( u1 >= -1 && v1 >= -1 && u2 <= proj_shape_.x && v2 <= proj_shape_.y )
{
const float wu2 = ip.x - ((float)u1);
const float wu1 = 1.0f - wu2;
const float wv2 = ip.y - ((float)v1);
const float wv1 = 1.0f - wv2;
const unsigned int l1 = proj_shape_.x * ( n*proj_shape_.y + v1 ) + u1;
const unsigned int l2 = l1 + proj_shape_.x;
if( u1 >= 0 )
{
const float vzwu1 = vz*wu1;
if( v1 >= 0 )
atomicAdd( &proj[l1], vzwu1*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2], vzwu1*wv2 );
}
if( u2 < proj_shape_.x )
{
const float vzwu2 = vz*wu2;
if( v1 >= 0 )
atomicAdd( &proj[l1 + 1], vzwu2*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2 + 1], vzwu2*wv2 );
}
}
}
}
__host__
void cuda_init_backproject( float* geom,
int U, int V, int N,
int X, int Y, int Z,
float ox, float oy, float oz,
float sx, float sy, float sz )
{
proj_shape_host_ = make_int3( U, V, N );
vol_shape_host_ = make_int3( X, Y, Z );
auto vol_orig = make_float3( ox, oy, oz );
auto voxel_size = make_float3( sx, sy, sz );
gpuErrchk( cudaMemcpyToSymbol( geom_, geom, 12 * sizeof(float) * N ) );
gpuErrchk( cudaMemcpyToSymbol( proj_shape_, &proj_shape_host_, sizeof(int3) ) );
gpuErrchk( cudaMemcpyToSymbol( vol_shape_, &vol_shape_host_, sizeof(int3) ) );
gpuErrchk( cudaMemcpyToSymbol( vol_orig_, &vol_orig, sizeof(float3) ) );
gpuErrchk( cudaMemcpyToSymbol( voxel_size_, &voxel_size, sizeof(float3) ) );
}
__host__
void cuda_backproject( const float* proj, float* vol )
{
// set texture properties
projTex_.addressMode[0] = cudaAddressModeBorder;
projTex_.addressMode[1] = cudaAddressModeBorder;
projTex_.addressMode[2] = cudaAddressModeBorder;
projTex_.filterMode = cudaFilterModeLinear;
projTex_.normalized = false;
// malloc cuda array for texture
cudaExtent projExtent = make_cudaExtent( proj_shape_host_.x,
proj_shape_host_.y,
proj_shape_host_.z );
cudaArray *projArray;
static cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
gpuErrchk( cudaMalloc3DArray( &projArray, &channelDesc, projExtent, cudaArrayLayered ) );
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr( const_cast<float*>( proj ),
proj_shape_host_.x*sizeof(float),
proj_shape_host_.x,
proj_shape_host_.y
);
copyParams.dstArray = projArray;
copyParams.extent = projExtent;
copyParams.kind = cudaMemcpyDeviceToDevice;
gpuErrchk( cudaMemcpy3D( ©Params ) );
// bind texture reference
gpuErrchk( cudaBindTextureToArray( projTex_, (cudaArray*)projArray,
channelDesc ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
fprintf(stderr, "\n\n\n\n");
fprintf(stderr, "Iniciano o backproject");
fprintf(stderr, "Saiu do backproject");
fprintf(stderr, "\n\n\n\n");
kernel_backproject<<< grid, block >>>( vol );
// check for errors
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// cleanup
gpuErrchk( cudaUnbindTexture( projTex_ ) );
gpuErrchk( cudaFreeArray( projArray ) );
}
__host__
void cuda_project( const float* vol, float* proj )
{
// set proj to zero
cudaMemset( proj, 0, proj_shape_host_.x*proj_shape_host_.y*proj_shape_host_.z
* sizeof( float ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
kernel_project<<< grid, block >>>( vol, proj );
}
#endif
|
ce207af2eb33e7512dbf4bbdf1a16368dec5c9ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA kernel entry points */
#ifdef __CUDA_ARCH__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "util/util_atomic.h"
#include "kernel/kernel_math.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "kernel/kernel_color.h"
#include "kernel/kernels/cuda/kernel_cuda_image.h"
#include "kernel/kernel_film.h"
#include "kernel/kernel_path.h"
#include "kernel/kernel_path_branched.h"
#include "kernel/kernel_bake.h"
#include "kernel/kernel_work_stealing.h"
#include "kernel/kernel_adaptive_sampling.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
uint x, y, sample;
KernelGlobals kg;
if(thread_is_active) {
get_work_pixel(tile, work_index, &x, &y, &sample);
kernel_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
if(kernel_data.film.cryptomatte_passes) {
__syncthreads();
if(thread_is_active) {
kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
}
#ifdef __BRANCHED_PATH__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
uint x, y, sample;
KernelGlobals kg;
if(thread_is_active) {
get_work_pixel(tile, work_index, &x, &y, &sample);
kernel_branched_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
if(kernel_data.film.cryptomatte_passes) {
__syncthreads();
if(thread_is_active) {
kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
}
#endif
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_stopping(WorkTile *tile, int sample, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
KernelGlobals kg;
if(thread_is_active && kernel_data.film.pass_adaptive_aux_buffer) {
uint x = tile->x + work_index % tile->w;
uint y = tile->y + work_index / tile->w;
int index = tile->offset + x + y * tile->stride;
ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride;
kernel_do_adaptive_stopping(&kg, buffer, sample);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_filter_x(WorkTile *tile, int sample, uint)
{
KernelGlobals kg;
if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) {
if(ccl_global_id(0) < tile->h) {
int y = tile->y + ccl_global_id(0);
kernel_do_adaptive_filter_x(&kg, y, tile);
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_filter_y(WorkTile *tile, int sample, uint)
{
KernelGlobals kg;
if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) {
if(ccl_global_id(0) < tile->w) {
int x = tile->x + ccl_global_id(0);
kernel_do_adaptive_filter_y(&kg, x, tile);
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_scale_samples(WorkTile *tile, int start_sample, int sample, uint total_work_size)
{
if(kernel_data.film.pass_adaptive_aux_buffer) {
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
KernelGlobals kg;
if(thread_is_active) {
uint x = tile->x + work_index % tile->w;
uint y = tile->y + work_index / tile->w;
int index = tile->offset + x + y * tile->stride;
ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride;
if(buffer[kernel_data.film.pass_sample_count] < 0.0f) {
buffer[kernel_data.film.pass_sample_count] = -buffer[kernel_data.film.pass_sample_count];
float sample_multiplier = sample / max((float)start_sample + 1.0f, buffer[kernel_data.film.pass_sample_count]);
if(sample_multiplier != 1.0f) {
kernel_adaptive_post_adjust(&kg, buffer, sample_multiplier);
}
}
else {
kernel_adaptive_post_adjust(&kg, buffer, sample / (sample - 1.0f));
}
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_displace(uint4 *input,
float4 *output,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_displace_evaluate(&kg, input, output, x);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_background(uint4 *input,
float4 *output,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_background_evaluate(&kg, input, output, x);
}
}
#ifdef __BAKING__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_bake(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
if(work_index < total_work_size) {
uint x, y, sample;
get_work_pixel(tile, work_index, &x, &y, &sample);
KernelGlobals kg;
kernel_bake_evaluate(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
#endif
#endif
| ce207af2eb33e7512dbf4bbdf1a16368dec5c9ed.cu | /*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA kernel entry points */
#ifdef __CUDA_ARCH__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "util/util_atomic.h"
#include "kernel/kernel_math.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "kernel/kernel_color.h"
#include "kernel/kernels/cuda/kernel_cuda_image.h"
#include "kernel/kernel_film.h"
#include "kernel/kernel_path.h"
#include "kernel/kernel_path_branched.h"
#include "kernel/kernel_bake.h"
#include "kernel/kernel_work_stealing.h"
#include "kernel/kernel_adaptive_sampling.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
uint x, y, sample;
KernelGlobals kg;
if(thread_is_active) {
get_work_pixel(tile, work_index, &x, &y, &sample);
kernel_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
if(kernel_data.film.cryptomatte_passes) {
__syncthreads();
if(thread_is_active) {
kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
}
#ifdef __BRANCHED_PATH__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
uint x, y, sample;
KernelGlobals kg;
if(thread_is_active) {
get_work_pixel(tile, work_index, &x, &y, &sample);
kernel_branched_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
if(kernel_data.film.cryptomatte_passes) {
__syncthreads();
if(thread_is_active) {
kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
}
#endif
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_stopping(WorkTile *tile, int sample, uint total_work_size)
{
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
KernelGlobals kg;
if(thread_is_active && kernel_data.film.pass_adaptive_aux_buffer) {
uint x = tile->x + work_index % tile->w;
uint y = tile->y + work_index / tile->w;
int index = tile->offset + x + y * tile->stride;
ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride;
kernel_do_adaptive_stopping(&kg, buffer, sample);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_filter_x(WorkTile *tile, int sample, uint)
{
KernelGlobals kg;
if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) {
if(ccl_global_id(0) < tile->h) {
int y = tile->y + ccl_global_id(0);
kernel_do_adaptive_filter_x(&kg, y, tile);
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_filter_y(WorkTile *tile, int sample, uint)
{
KernelGlobals kg;
if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) {
if(ccl_global_id(0) < tile->w) {
int x = tile->x + ccl_global_id(0);
kernel_do_adaptive_filter_y(&kg, x, tile);
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_adaptive_scale_samples(WorkTile *tile, int start_sample, int sample, uint total_work_size)
{
if(kernel_data.film.pass_adaptive_aux_buffer) {
int work_index = ccl_global_id(0);
bool thread_is_active = work_index < total_work_size;
KernelGlobals kg;
if(thread_is_active) {
uint x = tile->x + work_index % tile->w;
uint y = tile->y + work_index / tile->w;
int index = tile->offset + x + y * tile->stride;
ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride;
if(buffer[kernel_data.film.pass_sample_count] < 0.0f) {
buffer[kernel_data.film.pass_sample_count] = -buffer[kernel_data.film.pass_sample_count];
float sample_multiplier = sample / max((float)start_sample + 1.0f, buffer[kernel_data.film.pass_sample_count]);
if(sample_multiplier != 1.0f) {
kernel_adaptive_post_adjust(&kg, buffer, sample_multiplier);
}
}
else {
kernel_adaptive_post_adjust(&kg, buffer, sample / (sample - 1.0f));
}
}
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_displace(uint4 *input,
float4 *output,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_displace_evaluate(&kg, input, output, x);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_background(uint4 *input,
float4 *output,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_background_evaluate(&kg, input, output, x);
}
}
#ifdef __BAKING__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_bake(WorkTile *tile, uint total_work_size)
{
int work_index = ccl_global_id(0);
if(work_index < total_work_size) {
uint x, y, sample;
get_work_pixel(tile, work_index, &x, &y, &sample);
KernelGlobals kg;
kernel_bake_evaluate(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
}
}
#endif
#endif
|
22d3bac6bd9e02d8f4a51374dee4100247a2d8fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "file_system.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "stdio.h"
__device__ void user_program(FileSystem *fs, uchar *input, uchar *output) {
/*
/////////////// Test Case 1 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
/////////////// Test Case 2 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs,input, 64, fp);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_READ);
fs_read(fs,output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs,LS_S);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 64, 12, fp);
fs_gsys(fs,LS_S);
fs_gsys(fs,LS_D);
fs_gsys(fs,RM, "t.txt\0");
fs_gsys(fs,LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs,fname[i], G_WRITE);
fs_write(fs,input + i, 24 + i, fp);
}
fs_gsys(fs,LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs,RM, fname[i]);
fs_gsys(fs,LS_D);
*/
/////////////// Test Case 3 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs, fname[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs, RM, fname[i]);
fs_gsys(fs, LS_D);
char fname2[1018][20];
int p = 0;
for (int k = 2; k < 15; k++)
for (int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for (int j = 1; j < k; j++)
fname2[p][j] = 64 + j;
fname2[p][k] = '\0';
}
for (int i = 0; i < 1001; i++)
{
fp = fs_open(fs, fname2[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
fp = fs_open(fs, fname2[1000], G_READ);
fs_read(fs, output + 1000, 1024, fp);
char fname3[17][3];
for (int i = 0; i < 17; i++)
{
fname3[i][0] = 97 + i;
fname3[i][1] = 97 + i;
fname3[i][2] = '\0';
fp = fs_open(fs, fname3[i], G_WRITE);
fs_write(fs, input + 1024 * i, 1024, fp);
}
fp = fs_open(fs, "EA\0", G_WRITE);
fs_write(fs, input + 1024 * 100, 1024, fp);
fs_gsys(fs, LS_S);
/*
/////////////////////// Bonus Test Case ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fs_gsys(fs, MKDIR, "app\0");
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD, "app\0");
fs_gsys(fs, LS_S);
fp = fs_open(fs, "a.txt\0", G_WRITE);
fs_write(fs, input + 128, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 256, 32, fp);
fs_gsys(fs, MKDIR, "soft\0");
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, CD, "soft\0");
fs_gsys(fs, PWD);
fp = fs_open(fs, "A.txt\0", G_WRITE);
fs_write(fs, input + 256, 64, fp);
fp = fs_open(fs, "B.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fp = fs_open(fs, "C.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fp = fs_open(fs, "D.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
fs_gsys(fs, PWD);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD, "app\0");
fs_gsys(fs, RM_RF, "soft\0");
fs_gsys(fs, LS_S);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
printf("Note: I don't count '\\0' as a character in file name\n");
*/
}
| 22d3bac6bd9e02d8f4a51374dee4100247a2d8fa.cu | #include "file_system.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
__device__ void user_program(FileSystem *fs, uchar *input, uchar *output) {
/*
/////////////// Test Case 1 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
/////////////// Test Case 2 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs,input, 64, fp);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_READ);
fs_read(fs,output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs,LS_S);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 64, 12, fp);
fs_gsys(fs,LS_S);
fs_gsys(fs,LS_D);
fs_gsys(fs,RM, "t.txt\0");
fs_gsys(fs,LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs,fname[i], G_WRITE);
fs_write(fs,input + i, 24 + i, fp);
}
fs_gsys(fs,LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs,RM, fname[i]);
fs_gsys(fs,LS_D);
*/
/////////////// Test Case 3 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs, fname[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs, RM, fname[i]);
fs_gsys(fs, LS_D);
char fname2[1018][20];
int p = 0;
for (int k = 2; k < 15; k++)
for (int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for (int j = 1; j < k; j++)
fname2[p][j] = 64 + j;
fname2[p][k] = '\0';
}
for (int i = 0; i < 1001; i++)
{
fp = fs_open(fs, fname2[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
fp = fs_open(fs, fname2[1000], G_READ);
fs_read(fs, output + 1000, 1024, fp);
char fname3[17][3];
for (int i = 0; i < 17; i++)
{
fname3[i][0] = 97 + i;
fname3[i][1] = 97 + i;
fname3[i][2] = '\0';
fp = fs_open(fs, fname3[i], G_WRITE);
fs_write(fs, input + 1024 * i, 1024, fp);
}
fp = fs_open(fs, "EA\0", G_WRITE);
fs_write(fs, input + 1024 * 100, 1024, fp);
fs_gsys(fs, LS_S);
/*
/////////////////////// Bonus Test Case ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fs_gsys(fs, MKDIR, "app\0");
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD, "app\0");
fs_gsys(fs, LS_S);
fp = fs_open(fs, "a.txt\0", G_WRITE);
fs_write(fs, input + 128, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 256, 32, fp);
fs_gsys(fs, MKDIR, "soft\0");
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, CD, "soft\0");
fs_gsys(fs, PWD);
fp = fs_open(fs, "A.txt\0", G_WRITE);
fs_write(fs, input + 256, 64, fp);
fp = fs_open(fs, "B.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fp = fs_open(fs, "C.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fp = fs_open(fs, "D.txt\0", G_WRITE);
fs_write(fs, input + 256, 1024, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
fs_gsys(fs, PWD);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
fs_gsys(fs, CD, "app\0");
fs_gsys(fs, RM_RF, "soft\0");
fs_gsys(fs, LS_S);
fs_gsys(fs, CD_P);
fs_gsys(fs, LS_S);
printf("Note: I don't count '\\0' as a character in file name\n");
*/
}
|
522e88a5d81024c31c46e5b200db1da5c08ceb2e.hip | // !!! This is a file automatically generated by hipify!!!
// Rishabh Agarwal - 18JE0676
#include <bits/stdc++.h>
#include <hip/hip_runtime.h>
using namespace std;
// kernel function
__global__ void kernelFunction(int *a, int *b, int *c, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs)/2;
}
}
int main( void ) {
hipDeviceProp_t prop;
int whichDevice;
hipGetDevice(&whichDevice);
hipGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
cout << "Device will not handle overlaps, so no speed up from streams\n";
return 0;
}
if(prop.concurrentKernels == 0) {
cout << "> GPU does not support concurrent kernel execution\n";
cout << " CUDA kernel runs will be serialized\n";
}
if(prop.asyncEngineCount == 0) {
cout << "GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n";
cout << "Mem copy call will be blocking calls\n";
}
hipEvent_t start, stop;
float elapsedTime;
int n = 1024*1024;
int maxsize = n*20;
int *ha, *hb, *hc;
int *da0, *db0, *dc0, *da1, *db1, *dc1;
hipStream_t stream0, stream1;
// start the timers
hipEventCreate(&start);
hipEventCreate(&stop);
// initialize the streams
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
// allocate the memory on the GPU
hipMalloc(&da0, n * sizeof(int));
hipMalloc(&da1, n * sizeof(int));
hipMalloc(&db0, n * sizeof(int));
hipMalloc(&db1, n * sizeof(int));
hipMalloc(&dc0, n * sizeof(int));
hipMalloc(&dc1, n * sizeof(int));
// allocate host locked memory, used to stream
hipHostMalloc((void**)&ha, maxsize * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&hb, maxsize * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&hc, maxsize * sizeof(int), hipHostMallocDefault);
for(int i=0; i < maxsize; i++) {
ha[i] = i + 10;
hb[i] = i + 200;
}
hipEventRecord(start, 0);
for(int i=0; i < maxsize; i += n*2) {
// enqueue copies of a in stream0 and stream1
hipMemcpyAsync(da0, ha + i, n * sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(da1, ha + i + n, n * sizeof(int), hipMemcpyHostToDevice, stream1);
// enqueue copies of b in stream0 and stream1
hipMemcpyAsync(db0, hb + i, n * sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(db1, hb + i + n, n * sizeof(int), hipMemcpyHostToDevice, stream1);
// enqueue kernels in stream0 and stream1
hipLaunchKernelGGL(( kernelFunction) , dim3(n/256), dim3(256), 0, stream0 , da0, db0, dc0, n);
hipLaunchKernelGGL(( kernelFunction) , dim3(n/256), dim3(256), 0, stream1 , da1, db1, dc1, n);
// enqueue copies of c from device to locked memory
hipMemcpyAsync(hc + i, dc0, n * sizeof(int), hipMemcpyDeviceToHost, stream0);
hipMemcpyAsync(hc + i + n, dc1, n * sizeof(int), hipMemcpyDeviceToHost, stream1);
}
hipStreamSynchronize(stream0);
hipStreamSynchronize(stream1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "Time taken in ms: " << elapsedTime << "\n\n";
// we are printing only upto 20 elements
cout << "Vector A: \n";
for(int i=0; i < 20; i++) {
cout << ha[i] << " ";
}
cout << "\n\n";
cout << "Vector B: \n";
for(int i=0; i < 20; i++) {
cout << hb[i] << " ";
}
cout << "\n\n";
cout <<"After performing operation: C[i] = ((A[i] + A[i+1] + A[i+2]) / 3 + (B[i] + B[i+1] + B[i+2]) / 3) / 2\n";
cout << "Vector C: \n";
for(int i=0; i < 20; i++) {
cout << hc[i] << " ";
}
cout << "\n\n";
hipHostFree(ha);
hipHostFree(hb);
hipHostFree(hc);
hipFree(da0);
hipFree(da1);
hipFree(db0);
hipFree(db1);
hipFree(dc0);
hipFree(dc1);
hipStreamDestroy(stream0);
hipStreamDestroy(stream1);
return 0;
}
| 522e88a5d81024c31c46e5b200db1da5c08ceb2e.cu | // Rishabh Agarwal - 18JE0676
#include <bits/stdc++.h>
#include <cuda.h>
using namespace std;
// kernel function
__global__ void kernelFunction(int *a, int *b, int *c, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs)/2;
}
}
int main( void ) {
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
cout << "Device will not handle overlaps, so no speed up from streams\n";
return 0;
}
if(prop.concurrentKernels == 0) {
cout << "> GPU does not support concurrent kernel execution\n";
cout << " CUDA kernel runs will be serialized\n";
}
if(prop.asyncEngineCount == 0) {
cout << "GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n";
cout << "Mem copy call will be blocking calls\n";
}
cudaEvent_t start, stop;
float elapsedTime;
int n = 1024*1024;
int maxsize = n*20;
int *ha, *hb, *hc;
int *da0, *db0, *dc0, *da1, *db1, *dc1;
cudaStream_t stream0, stream1;
// start the timers
cudaEventCreate(&start);
cudaEventCreate(&stop);
// initialize the streams
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
// allocate the memory on the GPU
cudaMalloc(&da0, n * sizeof(int));
cudaMalloc(&da1, n * sizeof(int));
cudaMalloc(&db0, n * sizeof(int));
cudaMalloc(&db1, n * sizeof(int));
cudaMalloc(&dc0, n * sizeof(int));
cudaMalloc(&dc1, n * sizeof(int));
// allocate host locked memory, used to stream
cudaHostAlloc((void**)&ha, maxsize * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&hb, maxsize * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&hc, maxsize * sizeof(int), cudaHostAllocDefault);
for(int i=0; i < maxsize; i++) {
ha[i] = i + 10;
hb[i] = i + 200;
}
cudaEventRecord(start, 0);
for(int i=0; i < maxsize; i += n*2) {
// enqueue copies of a in stream0 and stream1
cudaMemcpyAsync(da0, ha + i, n * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(da1, ha + i + n, n * sizeof(int), cudaMemcpyHostToDevice, stream1);
// enqueue copies of b in stream0 and stream1
cudaMemcpyAsync(db0, hb + i, n * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(db1, hb + i + n, n * sizeof(int), cudaMemcpyHostToDevice, stream1);
// enqueue kernels in stream0 and stream1
kernelFunction <<< n/256, 256, 0, stream0 >>> (da0, db0, dc0, n);
kernelFunction <<< n/256, 256, 0, stream1 >>> (da1, db1, dc1, n);
// enqueue copies of c from device to locked memory
cudaMemcpyAsync(hc + i, dc0, n * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(hc + i + n, dc1, n * sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Time taken in ms: " << elapsedTime << "\n\n";
// we are printing only upto 20 elements
cout << "Vector A: \n";
for(int i=0; i < 20; i++) {
cout << ha[i] << " ";
}
cout << "\n\n";
cout << "Vector B: \n";
for(int i=0; i < 20; i++) {
cout << hb[i] << " ";
}
cout << "\n\n";
cout <<"After performing operation: C[i] = ((A[i] + A[i+1] + A[i+2]) / 3 + (B[i] + B[i+1] + B[i+2]) / 3) / 2\n";
cout << "Vector C: \n";
for(int i=0; i < 20; i++) {
cout << hc[i] << " ";
}
cout << "\n\n";
cudaFreeHost(ha);
cudaFreeHost(hb);
cudaFreeHost(hc);
cudaFree(da0);
cudaFree(da1);
cudaFree(db0);
cudaFree(db1);
cudaFree(dc0);
cudaFree(dc1);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return 0;
}
|
3c76eb7d7a5947af4f69f6edfc6d5a4ba5aa8388.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
#include <ATen/NumericUtils.h>
namespace at { namespace native {
template<template<class> class Op>
std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(input.size());
for (const auto& t: input) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<4>(tensor_lists,
PointwiseOpScalarFunctor<scalar_t,
/* depth */ 4,
/* r_args_depth */ 3,
/* res_arg_index */ 3>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
return tensor_lists[3];
}
template<template<class> class Op>
void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<3>(tensor_lists,
PointwiseOpScalarFunctor<scalar_t,
/* depth */ 3,
/* r_args_depth */ 3,
/* res_arg_index */ 0>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
}
template<template<class> class Op>
void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.reserve(3);
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<3, opmath_t>(tensor_lists,
scalars,
PointwiseOpScalarListFunctor<scalar_t,
/* depth */ 3,
/* r_args_depth */ 3,
/* res_arg_index */ 0>(),
Op<opmath_t>());
});
}
template<template<class> class Op>
std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.reserve(4);
std::vector<at::Tensor> vec_res;
vec_res.reserve(input.size());
for (const auto& t: input) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<4, opmath_t>(tensor_lists,
scalars,
PointwiseOpScalarListFunctor<scalar_t,
/* depth */ 4,
/* r_args_depth */ 3,
/* res_arg_index */ 3>(),
Op<opmath_t>());
});
return tensor_lists[3];
}
#define FOREACH_POINTWISE_OP_SCALAR(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_scalar_cuda(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \
check_foreach_api_restrictions(input, tensors1, tensors2); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalar)) { \
return at::native::foreach_tensor_##NAME##_scalar_slow(input, tensors1, tensors2, scalar); \
} \
\
return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \
} \
\
void foreach_tensor_##NAME##_scalar_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \
check_foreach_api_restrictions(input, tensors1, tensors2); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalar)) { \
return at::native::foreach_tensor_##NAME##_scalar_slow_(input, tensors1, tensors2, scalar); \
} \
\
foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \
}
#define FOREACH_POINTWISE_OP_SCALARLIST(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_cuda(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \
check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_slow(input, tensors1, tensors2, scalars); \
} \
\
return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalars); \
} \
\
void foreach_tensor_##NAME##_scalarlist_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \
check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_slow_(input, tensors1, tensors2, scalars); \
} \
\
foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalars); \
}
FOREACH_POINTWISE_OP_SCALAR(addcmul, std::multiplies);
FOREACH_POINTWISE_OP_SCALAR(addcdiv, std::divides);
FOREACH_POINTWISE_OP_SCALARLIST(addcmul, std::multiplies);
FOREACH_POINTWISE_OP_SCALARLIST(addcdiv, std::divides);
#define FOREACH_MAXIMUM_MINIMUM_OP(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors1, TensorList tensors2) { \
check_foreach_api_restrictions(tensors1, tensors2); \
if (!can_use_fast_route(tensors1, tensors2)) { \
return at::native::foreach_tensor_##NAME##_slow(tensors1, tensors2); \
} \
\
std::vector<std::vector<at::Tensor>> tensor_lists; \
std::vector<at::Tensor> vec_res; \
vec_res.reserve(tensors1.size()); \
for (const auto& t: tensors1) { \
vec_res.emplace_back(at::native::empty_like(t)); \
} \
\
tensor_lists.emplace_back(tensors1.vec()); \
tensor_lists.emplace_back(tensors2.vec()); \
tensor_lists.emplace_back(std::move(vec_res)); \
\
AT_DISPATCH_ALL_TYPES_AND(kHalf, tensors1[0].scalar_type(), "foreach_maximum_minimum_op_cuda", [&]() { \
using opmath_t = get_opmath_t<scalar_t>::opmath_t; \
auto op = [] GPU_LAMBDA (opmath_t a, opmath_t b) -> opmath_t { \
opmath_t c = a OP b ? a : b; \
if (_isnan(a)) { \
c = a; \
} \
return c;}; \
multi_tensor_apply<3>(tensor_lists, \
PointwiseOpListFunctor<scalar_t, 3>(), \
op); \
}); \
\
return tensor_lists[2]; \
} \
FOREACH_MAXIMUM_MINIMUM_OP(maximum, >)
FOREACH_MAXIMUM_MINIMUM_OP(minimum, <)
}} // namespace at::native
| 3c76eb7d7a5947af4f69f6edfc6d5a4ba5aa8388.cu | #include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/NumericUtils.h>
namespace at { namespace native {
template<template<class> class Op>
std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(input.size());
for (const auto& t: input) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<4>(tensor_lists,
PointwiseOpScalarFunctor<scalar_t,
/* depth */ 4,
/* r_args_depth */ 3,
/* res_arg_index */ 3>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
return tensor_lists[3];
}
template<template<class> class Op>
void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<3>(tensor_lists,
PointwiseOpScalarFunctor<scalar_t,
/* depth */ 3,
/* r_args_depth */ 3,
/* res_arg_index */ 0>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
}
template<template<class> class Op>
void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.reserve(3);
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<3, opmath_t>(tensor_lists,
scalars,
PointwiseOpScalarListFunctor<scalar_t,
/* depth */ 3,
/* r_args_depth */ 3,
/* res_arg_index */ 0>(),
Op<opmath_t>());
});
}
template<template<class> class Op>
std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.reserve(4);
std::vector<at::Tensor> vec_res;
vec_res.reserve(input.size());
for (const auto& t: input) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(input.vec());
tensor_lists.emplace_back(tensors1.vec());
tensor_lists.emplace_back(tensors2.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<4, opmath_t>(tensor_lists,
scalars,
PointwiseOpScalarListFunctor<scalar_t,
/* depth */ 4,
/* r_args_depth */ 3,
/* res_arg_index */ 3>(),
Op<opmath_t>());
});
return tensor_lists[3];
}
#define FOREACH_POINTWISE_OP_SCALAR(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_scalar_cuda(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \
check_foreach_api_restrictions(input, tensors1, tensors2); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalar)) { \
return at::native::foreach_tensor_##NAME##_scalar_slow(input, tensors1, tensors2, scalar); \
} \
\
return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \
} \
\
void foreach_tensor_##NAME##_scalar_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \
check_foreach_api_restrictions(input, tensors1, tensors2); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalar)) { \
return at::native::foreach_tensor_##NAME##_scalar_slow_(input, tensors1, tensors2, scalar); \
} \
\
foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \
}
#define FOREACH_POINTWISE_OP_SCALARLIST(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_cuda(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \
check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_slow(input, tensors1, tensors2, scalars); \
} \
\
return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalars); \
} \
\
void foreach_tensor_##NAME##_scalarlist_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \
check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \
\
if (!can_use_fast_route(input, tensors1, tensors2, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_slow_(input, tensors1, tensors2, scalars); \
} \
\
foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalars); \
}
FOREACH_POINTWISE_OP_SCALAR(addcmul, std::multiplies);
FOREACH_POINTWISE_OP_SCALAR(addcdiv, std::divides);
FOREACH_POINTWISE_OP_SCALARLIST(addcmul, std::multiplies);
FOREACH_POINTWISE_OP_SCALARLIST(addcdiv, std::divides);
#define FOREACH_MAXIMUM_MINIMUM_OP(NAME, OP) \
std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors1, TensorList tensors2) { \
check_foreach_api_restrictions(tensors1, tensors2); \
if (!can_use_fast_route(tensors1, tensors2)) { \
return at::native::foreach_tensor_##NAME##_slow(tensors1, tensors2); \
} \
\
std::vector<std::vector<at::Tensor>> tensor_lists; \
std::vector<at::Tensor> vec_res; \
vec_res.reserve(tensors1.size()); \
for (const auto& t: tensors1) { \
vec_res.emplace_back(at::native::empty_like(t)); \
} \
\
tensor_lists.emplace_back(tensors1.vec()); \
tensor_lists.emplace_back(tensors2.vec()); \
tensor_lists.emplace_back(std::move(vec_res)); \
\
AT_DISPATCH_ALL_TYPES_AND(kHalf, tensors1[0].scalar_type(), "foreach_maximum_minimum_op_cuda", [&]() { \
using opmath_t = get_opmath_t<scalar_t>::opmath_t; \
auto op = [] GPU_LAMBDA (opmath_t a, opmath_t b) -> opmath_t { \
opmath_t c = a OP b ? a : b; \
if (_isnan(a)) { \
c = a; \
} \
return c;}; \
multi_tensor_apply<3>(tensor_lists, \
PointwiseOpListFunctor<scalar_t, 3>(), \
op); \
}); \
\
return tensor_lists[2]; \
} \
FOREACH_MAXIMUM_MINIMUM_OP(maximum, >)
FOREACH_MAXIMUM_MINIMUM_OP(minimum, <)
}} // namespace at::native
|
be2a9e83a3b73a80b128799bba62574d6056be38.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file square_sum.cu
* \brief GPU Implementation of square_sum op.
*/
#include "./square_sum-inl.h"
namespace mxnet {
namespace op {
template<>
void CheckSameIdx<gpu>(const OpContext& ctx,
const TBlob& ograd_row_idx,
const TBlob& in_row_idx) {
MSHADOW_IDX_TYPE_SWITCH(ograd_row_idx.type_flag_, IType, {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const IType* ograd_idx = ograd_row_idx.dptr<IType>();
const IType* in_idx = in_row_idx.dptr<IType>();
const nnvm::dim_t idx_size = ograd_row_idx.Size();
int32_t is_diff = 0;
mshadow::Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(mshadow::Shape1(sizeof(int32_t)), s);
int32_t* is_diff_ptr = reinterpret_cast<int32_t*>(workspace.dptr_);
mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, 1, is_diff_ptr);
mxnet_op::Kernel<CheckSameIdxKernel, gpu>::Launch(s, idx_size,
ograd_idx, in_idx, is_diff_ptr);
CUDA_CALL(hipMemcpyAsync(&is_diff, is_diff_ptr, sizeof(int32_t),
hipMemcpyDeviceToHost, mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(is_diff, 0) << "SquareSumRspGradImpl only supports"
" equal ograd_row_idx and input_row_idx"
" when ograd and input are both"
" row-sparse and input data is not a full"
" row-sparse matrix";
})
}
NNVM_REGISTER_OP(_square_sum)
.set_attr<FComputeEx>("FComputeEx<gpu>", SquareSumOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_square_sum)
.set_attr<FComputeEx>("FComputeEx<gpu>", SquareSumOpBackwardEx<gpu>);
} // namespace op
} // namespace mxnet
| be2a9e83a3b73a80b128799bba62574d6056be38.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file square_sum.cu
* \brief GPU Implementation of square_sum op.
*/
#include "./square_sum-inl.h"
namespace mxnet {
namespace op {
template<>
void CheckSameIdx<gpu>(const OpContext& ctx,
const TBlob& ograd_row_idx,
const TBlob& in_row_idx) {
MSHADOW_IDX_TYPE_SWITCH(ograd_row_idx.type_flag_, IType, {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const IType* ograd_idx = ograd_row_idx.dptr<IType>();
const IType* in_idx = in_row_idx.dptr<IType>();
const nnvm::dim_t idx_size = ograd_row_idx.Size();
int32_t is_diff = 0;
mshadow::Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(mshadow::Shape1(sizeof(int32_t)), s);
int32_t* is_diff_ptr = reinterpret_cast<int32_t*>(workspace.dptr_);
mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, 1, is_diff_ptr);
mxnet_op::Kernel<CheckSameIdxKernel, gpu>::Launch(s, idx_size,
ograd_idx, in_idx, is_diff_ptr);
CUDA_CALL(cudaMemcpyAsync(&is_diff, is_diff_ptr, sizeof(int32_t),
cudaMemcpyDeviceToHost, mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(is_diff, 0) << "SquareSumRspGradImpl only supports"
" equal ograd_row_idx and input_row_idx"
" when ograd and input are both"
" row-sparse and input data is not a full"
" row-sparse matrix";
})
}
NNVM_REGISTER_OP(_square_sum)
.set_attr<FComputeEx>("FComputeEx<gpu>", SquareSumOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_square_sum)
.set_attr<FComputeEx>("FComputeEx<gpu>", SquareSumOpBackwardEx<gpu>);
} // namespace op
} // namespace mxnet
|
64fec70dc018978d4f63b7bb915815b6ff8fed03.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "assisted_activation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha = 2;
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *gt_gpu = NULL;
hipMalloc(>_gpu, XSIZE*YSIZE);
float *a_avg_gpu = NULL;
hipMalloc(&a_avg_gpu, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
assisted_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
assisted_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
assisted_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 64fec70dc018978d4f63b7bb915815b6ff8fed03.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "assisted_activation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha = 2;
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *gt_gpu = NULL;
cudaMalloc(>_gpu, XSIZE*YSIZE);
float *a_avg_gpu = NULL;
cudaMalloc(&a_avg_gpu, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
assisted_activation_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
assisted_activation_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
assisted_activation_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b185d2f83a4262408338116add0f89a946926e11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cassert>
#include <functional>
#include "unified.h"
#include "lock.h"
#include "graph.h"
#include "pool.h"
#include "state.h"
#include "unified_vector.h"
void square_host(void* xv) {
State* state = static_cast<State*>(xv);
state->x = (state->x) * (state->x);
}
__global__ void pool_kernel(Pool* pool) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t size = pool->checked_out_tasks.size();
if (tid < size) {
pool->checked_out_tasks[tid]->advance();
}
}
void Graph::advance(UnifiedVector<State*>& advance_states) {
std::cout << "in advance ..." << std::endl;
std::function<bool(State*)> test;
Pool* p;
test = [=](State* s) -> bool {return s->status == 0;};
p = device_task_pools[0];
std::cout << "calling checkin" << std::endl;
p->checkin(&advance_states, test);
test = [=](State* s) -> bool {return s->status == 1;};
p = device_task_pools[1];
p->checkin(&advance_states, test);
test = [=](State* s) -> bool {return s->status == 2;};
p = device_task_pools[2];
// p = host_task_pools[0];
p->checkin(&advance_states, test);
std::cout << "leaving advance ..." << std::endl;
}
int main(int argc, char* argv[]) {
State* state = NULL;
int size = 1000;
hipError_t cuda_status = hipSuccess;
cuda_status = hipDeviceSynchronize();
std::cout << "device status before Graph: " << hipGetErrorString(cuda_status) << std::endl;
cuda_status = hipMallocManaged(&state, sizeof(State)*size);
assert(cuda_status == hipSuccess);
// create a Graph with 1 host task pool and 2 device task pools
Graph* task_graph = new Graph(size, 0, 3);
//Graph* task_graph = new Graph(size, 1, 2);
cuda_status = hipDeviceSynchronize();
std::cout << "device status after Graph: " << hipGetErrorString(cuda_status) << std::endl;
for (int i = 0; i < size; i++) {
state[i].x = 2.0;
if (i > 200 && i < 700)
state[i].status = 1;
else if (i <= 200)
state[i].status = 0;
else
state[i].status = 2;
task_graph->queue(&(state[i]));
}
task_graph->execute_graph();
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += state[i].x;
}
std::cout << "sum of elementwise squared cubed squared x is: " << xsum << std::endl;
if (xsum == 4096*size) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
| b185d2f83a4262408338116add0f89a946926e11.cu | #include <iostream>
#include <cassert>
#include <functional>
#include "unified.h"
#include "lock.h"
#include "graph.h"
#include "pool.h"
#include "state.h"
#include "unified_vector.h"
void square_host(void* xv) {
State* state = static_cast<State*>(xv);
state->x = (state->x) * (state->x);
}
__global__ void pool_kernel(Pool* pool) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t size = pool->checked_out_tasks.size();
if (tid < size) {
pool->checked_out_tasks[tid]->advance();
}
}
void Graph::advance(UnifiedVector<State*>& advance_states) {
std::cout << "in advance ..." << std::endl;
std::function<bool(State*)> test;
Pool* p;
test = [=](State* s) -> bool {return s->status == 0;};
p = device_task_pools[0];
std::cout << "calling checkin" << std::endl;
p->checkin(&advance_states, test);
test = [=](State* s) -> bool {return s->status == 1;};
p = device_task_pools[1];
p->checkin(&advance_states, test);
test = [=](State* s) -> bool {return s->status == 2;};
p = device_task_pools[2];
// p = host_task_pools[0];
p->checkin(&advance_states, test);
std::cout << "leaving advance ..." << std::endl;
}
int main(int argc, char* argv[]) {
State* state = NULL;
int size = 1000;
cudaError_t cuda_status = cudaSuccess;
cuda_status = cudaDeviceSynchronize();
std::cout << "device status before Graph: " << cudaGetErrorString(cuda_status) << std::endl;
cuda_status = cudaMallocManaged(&state, sizeof(State)*size);
assert(cuda_status == cudaSuccess);
// create a Graph with 1 host task pool and 2 device task pools
Graph* task_graph = new Graph(size, 0, 3);
//Graph* task_graph = new Graph(size, 1, 2);
cuda_status = cudaDeviceSynchronize();
std::cout << "device status after Graph: " << cudaGetErrorString(cuda_status) << std::endl;
for (int i = 0; i < size; i++) {
state[i].x = 2.0;
if (i > 200 && i < 700)
state[i].status = 1;
else if (i <= 200)
state[i].status = 0;
else
state[i].status = 2;
task_graph->queue(&(state[i]));
}
task_graph->execute_graph();
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += state[i].x;
}
std::cout << "sum of elementwise squared cubed squared x is: " << xsum << std::endl;
if (xsum == 4096*size) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
|
b2eb7e91deb06dc2eca970a2fe1282fb3df5121e.hip | // !!! This is a file automatically generated by hipify!!!
// Didymos Optimization Project using CUDA and a genetic algorithm
//TODO: Clarify complexities of the include paths
//TODO: What / why we are including
#include "../Earth_calculations/earthInfo.h" // For launchCon and EarthInfo()
#include "../Genetic_Algorithm/individuals.h" // For individual structs, paths to rkParameters for randomParameters()
#include "../Output_Funcs/output.h" // For terminalDisplay(), recordGenerationPerformance(), and finalRecord()
#include "../Runge_Kutta/runge_kuttaCUDA.cuh" // for testing rk4simple
#include "../Genetic_Algorithm/ga_crossover.h" // for selectSurvivors() and newGeneration()
#include <iostream> // cout
#include <iomanip> // used for setw(), sets spaces between values output
#include <random> // for std::mt19937_64 object
// Used to see if the best individual is changing when compared to a previous individual across generations
// Returns true if the currentBest is not equal to previousBest within a distinguishable difference
// Input: previousBestPos - Position diference at the end of RK simulation, in AU
// previousBestVel - Velocity difference, in AU/s (currently not implemented)
// currentBest - 'best' individual from current run, based on how individuals are sorted
// distinguishRate - magnitude of difference
// Called inside of optimize to see if anneal rate needs to change
bool changeInBest(double previousBestPos, double previousBestVel, const Individual & currentBest, double distinguishRate) {
//truncate is used here to compare doubles via the distinguguishRate, to ensure that there has been relatively no change.
if (trunc(previousBestPos/distinguishRate) != trunc(currentBest.posDiff/distinguishRate)) {
return true;
}
else {
/* //Used if Velocity should be considered
if (trunc(previousBestVel/distinguishRate) != trunc(currentBest.velDiff/distinguishRate)) {
return true;
}
else return false;
*/
return false;
}
}
// ** Assumes pool is sorted array of Individuals **
// Used in determining if main optimize loop continues
// Input: tolerance - posDiff threshold, determines max target distance
// pool - this generation of Individuals, defined/initilized in optimimize
// cConstants - struct holding config values, used for accessing best_count value
// Output: Returns true if top best_count individuals within the pool are within the tolerance
bool allWithinTolerance(double tolerance, Individual * pool, const cudaConstants* cConstants) {
// Iterate to check best_count number of 'top' individuals
for (int i = 0; i < cConstants->best_count; i++) {
if(pool[i].getCost(cConstants) >= tolerance) {
//One was not within tolerance
return false;
}
}
// If iterated through and all were within tolerance, success
return true;
}
// Main processing function for Genetic Algorithm
// - manages memory needs for genetic algorithm
// - deals with processing calls to CUDA callRK
// - exits when individuals converge on tolerance defined in Constants
double optimize(const cudaConstants* cConstants) {
// Not used, previously used for reporting computational performance
double calcPerS = 0;
time_t timeSeed = cConstants->time_seed;
std::mt19937_64 rng(timeSeed); // This rng object is used for generating all random numbers in the genetic algorithm, passed in to functions that need it
std::cout << "----------------------------------------------------------------------------------------------------" << std::endl;
// Initialize the recording files if in record mode
if (cConstants->record_mode == true) {
initializeRecord(cConstants);
}
// input parameters for Runge Kutta process
// Each parameter is the same for each thread on the GPU
double timeInitial = 0; // the starting time of the trip is always defined as zero
// Runge Kutta adaptive time step error tolerance
double absTol = cConstants->rk_tol;
// the starting step size for RK run
// - note that the current step size varies throughout each run
//TODO: Should this be based on max_numsteps?
double stepSize = (orbitalPeriod - timeInitial) / cConstants->GuessMaxPossibleSteps;
// Initial genetic anneal scalar
double currentAnneal = cConstants->anneal_initial;
// Main set of parameters for Genetic Algorithm
// contains all thread unique input parameters
Individual *inputParameters = new Individual[cConstants->num_individuals];
// set to zero to force difference in first generation
double previousBestPos = 0;
double previousBestVel = 0;
// Initilize individuals randomly or from a file
if (cConstants->random_start) {
// individuals set to randomly generated, but reasonable, parameters
for (int i = 0; i < cConstants->num_individuals; i++) {
inputParameters[i] = Individual(randomParameters(rng, cConstants), cConstants);
}
}
// Read from file using cConstants initial_start_file_address to get path
else {
// **Might be depreciated, not tested summer 2020**
// Sets inputParameters to hold initial individuals based from file optimizedVector.bin
const int numStarts = 14; // the number of different sets of starting parameters in the input file
std::ifstream starts;
starts.open(cConstants->initial_start_file_address, std::ifstream::in|std::ios::binary); // a file containing the final parameters of converged results from CPU calculations
// sort the data into 2 dimensions
// one row is one set of starting parameters
// each column is a specific variable:
double startDoubles;
// arrayCPU needs to be updated to handle the fact that OPTIM_VARS may be flexible
double arrayCPU[numStarts][OPTIM_VARS];
for (int i = 0; i < OPTIM_VARS; i++) { // rows
for (int j = 0; j < numStarts; j++) { // columns
starts.read( reinterpret_cast<char*>( &startDoubles ), sizeof startDoubles );
arrayCPU[j][i] = startDoubles;
}
}
starts.close();
// set every thread's input parameters to a set of final values from CPU calculations for use as a good starting point
for (int i = 0; i < cConstants->num_individuals; i++) {
int row = rng() % numStarts; // Choose a random row to get the parameters from
double tripTime = arrayCPU[row][TRIPTIME_OFFSET];
double alpha = arrayCPU[row][ALPHA_OFFSET];
double beta = arrayCPU[row][BETA_OFFSET];
double zeta = arrayCPU[row][ZETA_OFFSET];
coefficients<double> testcoeff;
for (int j = 0; j < testcoeff.gammaSize; j++) {
testcoeff.gamma[j] = arrayCPU[row][j + GAMMA_OFFSET];
}
for (int j = 0; j < testcoeff.tauSize; j++) {
testcoeff.tau[j] = arrayCPU[row][j + TAU_OFFSET];
}
for (int j = 0; j < testcoeff.coastSize; j++) {
testcoeff.coast[j] = arrayCPU[row][j + COAST_OFFSET];
}
rkParameters<double> example(tripTime, alpha, beta, zeta, testcoeff);
inputParameters[i] = Individual(example, cConstants);
}
}
// Collection of individuals used in the genetic selection process
// - filled in selectSurvivors, based on callRK output
// - stores the winners of the head-to-head competition
Individual *survivors = new Individual[cConstants->survivor_count];
// Number of individuals that need to be evaluated
// - the whole population is in first loop
// - subsequent generations only calculate *new* individuals
int newInd = cConstants->num_individuals;
// number of current generation
double generation = 0;
// how far away the best individual is from the tolerance value
double currentDistance;
// Genetic solution tolerance
// - (currently just the position threshold which is furthest distance from the target allowed)
// - could eventually take into account velocity too and become a more complex calculation
double tolerance = cConstants->pos_threshold;
// distinguishable rate used in changeInBest()
// - used to help check for a change in anneal
// - Gets smaller when no change is detected
double dRate = 1.0e-8;
// Flag for finishing the genetic process
// set by allWithinTolerance()
bool convergence = false;
// main gentic algorithm loop
// - continues until allWithinTolerance returns true (specific number of individuals are within threshold)
do {
// each inputParameter represents an individual set of starting parameters
// GPU based runge kutta process determines final position and velocity based on parameters
// newInd - how many individuals that are *new* that need to be evaluated
// - All individuals first generation
// - only new individuals, from crossover, in subsequent generations
// (inputParameters + (cConstants->num_individuals - newInd)) value accesses the start of the section of the inputParameters array that contains new individuals
callRK(newInd, cConstants->thread_block_size, inputParameters + (cConstants->num_individuals - newInd), timeInitial, stepSize, absTol, calcPerS, cConstants); // calculate trajectories for new individuals
// if we got bad results reset the Individual to random starting values (it may still be used for crossover) and set the final position to be way off so it gets replaced by a new Individual
for (int k = 0; k < cConstants->num_individuals; k++) {
//Checking each individuals final position for NaNs
if (isnan(inputParameters[k].finalPos.r) || isnan(inputParameters[k].finalPos.theta) || isnan(inputParameters[k].finalPos.z) || isnan(inputParameters[k].finalPos.vr) || isnan(inputParameters[k].finalPos.vtheta) || isnan(inputParameters[k].finalPos.vz)) {
std::cout << std::endl << std::endl << "NAN FOUND" << std::endl << std::endl;
inputParameters[k] = Individual(randomParameters(rng, cConstants), cConstants);
// Set to be a bad individual by giving it bad posDiff and velDiffs
// therefore also having a bad cost value
// won't be promoted in crossover
inputParameters[k].posDiff = 1.0;
inputParameters[k].velDiff = 0.0;
// calculate its new cost function based on 'bad' differences
inputParameters[k].getCost(cConstants);
}
}
// Preparing survivor pool with individuals for the newGeneration crossover
// Survivor pool contains:
// - individuals with best PosDiff
// - individuals with best velDiffs
// - depends on cConstants->survivorRatio (0.1 is 10% are best PosDiff for example)
// inputParameters is left sorted by individuals with best velDiffs
selectSurvivors(inputParameters, cConstants->num_individuals, cConstants->survivor_count, survivors, cConstants->survivorRatio); // Choose which individuals are in survivors, current method selects half to be best posDiff and other half to be best velDiff
// sort individuals based on overloaded relational operators
// gives reference of which to replace and which to carry to the next generation
std::sort(inputParameters, inputParameters + cConstants->num_individuals);
// Display a '.' to the terminal to show that a generation has been performed
// This also serves to visually seperate the terminalDisplay() calls across generations
std::cout << '.';
// Calculate how far best individual is from the ideal cost value (currently is the positionalDifference of the best individual)
// TODO: Change this later to take into account more than just the best individual and its position difference
currentDistance = inputParameters[0].posDiff;
// Scaling anneal based on proximity to tolerance
// Far away: larger anneal scale, close: smaller anneal
double new_anneal = currentAnneal * (1 - tolerance / currentDistance);
//Process to see if anneal needs to be adjusted
// If generations are stale, anneal drops
Individual currentBest;
// Compare current best individual to that from CHANGE_CHECK many generations ago.
// If they are the same, change size of mutations
if (static_cast<int>(generation) % cConstants->change_check == 0) {
currentBest = inputParameters[0];
// checks for anneal to change
// previousBest starts at 0 to ensure changeInBest = true on generation 0
if ( !(changeInBest(previousBestPos, previousBestVel, currentBest, dRate)) ) {
//this ensures that changeInBest never compares two zeros, thus keeping dRate in relevance as the posDiff lowers
if (trunc(currentBest.posDiff/dRate) == 0) {
while (trunc(currentBest.posDiff/dRate) == 0) {
dRate = dRate/10;
}
std::cout << "\nnew dRate: " << dRate << std::endl;
}
// If no change, multiply currentAnneal with anneal factor
currentAnneal = currentAnneal * cConstants->anneal_factor;
std::cout << "\nnew anneal: " << currentAnneal << std::endl;
}
previousBestPos = currentBest.posDiff;
previousBestVel = currentBest.velDiff;
}
// If in recording mode and write_freq reached, call the record method
if (static_cast<int>(generation) % cConstants->write_freq == 0 && cConstants->record_mode == true) {
recordGenerationPerformance(cConstants, inputParameters, generation, new_anneal, cConstants->num_individuals);
}
// Only call terminalDisplay every DISP_FREQ, not every single generation
if ( static_cast<int>(generation) % cConstants->disp_freq == 0) {
// Prints the best individual's posDiff / velDiff and cost
terminalDisplay(inputParameters[0], generation);
}
// Before replacing new individuals, determine whether all are within tolerance
// Determines when loop is finished
convergence = allWithinTolerance(tolerance, inputParameters, cConstants);
// Create a new generation and increment the generation counter
// Genetic Crossover and mutation occur here
newInd = newGeneration(survivors, inputParameters, cConstants->survivor_count, cConstants->num_individuals, new_anneal, cConstants, rng, generation);
++generation;
//Loop exits based on result of allWithinTolerance and if max_generations has been hit
} while ( !convergence && generation < cConstants->max_generations);
// Call record for final generation regardless of frequency
// for the annealing argument, set to -1 (since the anneal is only relevant to the next generation and so means nothing for the last one)
if (cConstants->record_mode == true) {
recordGenerationPerformance(cConstants, inputParameters, generation, -1, cConstants->num_individuals);
}
// Only call finalRecord if the results actually converged on a solution
// also display last generation onto terminal
if (convergence) {
terminalDisplay(inputParameters[0], generation);
finalRecord(cConstants, inputParameters, static_cast<int>(generation));
}
delete [] inputParameters;
delete [] survivors;
return calcPerS;
}
int main () {
// display GPU properties and ensure we are using the right one
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
std::cout << "\n\nDevice Number: 0 \n";
std::cout << "- Device name: " << prop.name << std::endl << std::endl;
hipSetDevice(0);
// Declare the genetic constants used, with file path being used to receive initial values
cudaConstants * cConstants = new cudaConstants("../Config_Constants/genetic.config");
// Sets run0 seed, used to change seed between runs
// Seed is set in cudaConstants: current time or passed in via config
double zero_seed = cConstants->time_seed;
// Perform the optimization with optimize function
for (int run = 0; run < cConstants->run_count; run++) {
// Adjust the time_seed so it is unique based on each run
cConstants->time_seed = zero_seed + run*100;
// Display contents of cConstants being used for this run and how many runs
std::cout << *cConstants;
std::cout << "\tPerforming run #" << run+1 << "\n\n";
// pre-calculate a table of Earth's position within possible mission time range
// defined as global variable
// accessed on the CPU when individuals are initilized
launchCon = new EarthInfo(cConstants);
// File output of element values that were calculated in EarthInfo constructor for verification
/*if (cConstants->record_mode == true) {
recordEarthData(cConstants, run);
}*/
// Call optimize with the current parameters in cConstants
optimize(cConstants);
delete launchCon; // Deallocate launchCon info for this run as it may be using a different time range in the next run
}
// Now that the optimize function is done (assumed that optimize() also records it), deallocate memory of the cudaConstants
delete cConstants;
return 0;
}
| b2eb7e91deb06dc2eca970a2fe1282fb3df5121e.cu | // Didymos Optimization Project using CUDA and a genetic algorithm
//TODO: Clarify complexities of the include paths
//TODO: What / why we are including
#include "../Earth_calculations/earthInfo.h" // For launchCon and EarthInfo()
#include "../Genetic_Algorithm/individuals.h" // For individual structs, paths to rkParameters for randomParameters()
#include "../Output_Funcs/output.h" // For terminalDisplay(), recordGenerationPerformance(), and finalRecord()
#include "../Runge_Kutta/runge_kuttaCUDA.cuh" // for testing rk4simple
#include "../Genetic_Algorithm/ga_crossover.h" // for selectSurvivors() and newGeneration()
#include <iostream> // cout
#include <iomanip> // used for setw(), sets spaces between values output
#include <random> // for std::mt19937_64 object
// Used to see if the best individual is changing when compared to a previous individual across generations
// Returns true if the currentBest is not equal to previousBest within a distinguishable difference
// Input: previousBestPos - Position diference at the end of RK simulation, in AU
// previousBestVel - Velocity difference, in AU/s (currently not implemented)
// currentBest - 'best' individual from current run, based on how individuals are sorted
// distinguishRate - magnitude of difference
// Called inside of optimize to see if anneal rate needs to change
bool changeInBest(double previousBestPos, double previousBestVel, const Individual & currentBest, double distinguishRate) {
//truncate is used here to compare doubles via the distinguguishRate, to ensure that there has been relatively no change.
if (trunc(previousBestPos/distinguishRate) != trunc(currentBest.posDiff/distinguishRate)) {
return true;
}
else {
/* //Used if Velocity should be considered
if (trunc(previousBestVel/distinguishRate) != trunc(currentBest.velDiff/distinguishRate)) {
return true;
}
else return false;
*/
return false;
}
}
// ** Assumes pool is sorted array of Individuals **
// Used in determining if main optimize loop continues
// Input: tolerance - posDiff threshold, determines max target distance
// pool - this generation of Individuals, defined/initilized in optimimize
// cConstants - struct holding config values, used for accessing best_count value
// Output: Returns true if top best_count individuals within the pool are within the tolerance
bool allWithinTolerance(double tolerance, Individual * pool, const cudaConstants* cConstants) {
// Iterate to check best_count number of 'top' individuals
for (int i = 0; i < cConstants->best_count; i++) {
if(pool[i].getCost(cConstants) >= tolerance) {
//One was not within tolerance
return false;
}
}
// If iterated through and all were within tolerance, success
return true;
}
// Main processing function for Genetic Algorithm
// - manages memory needs for genetic algorithm
// - deals with processing calls to CUDA callRK
// - exits when individuals converge on tolerance defined in Constants
double optimize(const cudaConstants* cConstants) {
// Not used, previously used for reporting computational performance
double calcPerS = 0;
time_t timeSeed = cConstants->time_seed;
std::mt19937_64 rng(timeSeed); // This rng object is used for generating all random numbers in the genetic algorithm, passed in to functions that need it
std::cout << "----------------------------------------------------------------------------------------------------" << std::endl;
// Initialize the recording files if in record mode
if (cConstants->record_mode == true) {
initializeRecord(cConstants);
}
// input parameters for Runge Kutta process
// Each parameter is the same for each thread on the GPU
double timeInitial = 0; // the starting time of the trip is always defined as zero
// Runge Kutta adaptive time step error tolerance
double absTol = cConstants->rk_tol;
// the starting step size for RK run
// - note that the current step size varies throughout each run
//TODO: Should this be based on max_numsteps?
double stepSize = (orbitalPeriod - timeInitial) / cConstants->GuessMaxPossibleSteps;
// Initial genetic anneal scalar
double currentAnneal = cConstants->anneal_initial;
// Main set of parameters for Genetic Algorithm
// contains all thread unique input parameters
Individual *inputParameters = new Individual[cConstants->num_individuals];
// set to zero to force difference in first generation
double previousBestPos = 0;
double previousBestVel = 0;
// Initilize individuals randomly or from a file
if (cConstants->random_start) {
// individuals set to randomly generated, but reasonable, parameters
for (int i = 0; i < cConstants->num_individuals; i++) {
inputParameters[i] = Individual(randomParameters(rng, cConstants), cConstants);
}
}
// Read from file using cConstants initial_start_file_address to get path
else {
// **Might be depreciated, not tested summer 2020**
// Sets inputParameters to hold initial individuals based from file optimizedVector.bin
const int numStarts = 14; // the number of different sets of starting parameters in the input file
std::ifstream starts;
starts.open(cConstants->initial_start_file_address, std::ifstream::in|std::ios::binary); // a file containing the final parameters of converged results from CPU calculations
// sort the data into 2 dimensions
// one row is one set of starting parameters
// each column is a specific variable:
double startDoubles;
// arrayCPU needs to be updated to handle the fact that OPTIM_VARS may be flexible
double arrayCPU[numStarts][OPTIM_VARS];
for (int i = 0; i < OPTIM_VARS; i++) { // rows
for (int j = 0; j < numStarts; j++) { // columns
starts.read( reinterpret_cast<char*>( &startDoubles ), sizeof startDoubles );
arrayCPU[j][i] = startDoubles;
}
}
starts.close();
// set every thread's input parameters to a set of final values from CPU calculations for use as a good starting point
for (int i = 0; i < cConstants->num_individuals; i++) {
int row = rng() % numStarts; // Choose a random row to get the parameters from
double tripTime = arrayCPU[row][TRIPTIME_OFFSET];
double alpha = arrayCPU[row][ALPHA_OFFSET];
double beta = arrayCPU[row][BETA_OFFSET];
double zeta = arrayCPU[row][ZETA_OFFSET];
coefficients<double> testcoeff;
for (int j = 0; j < testcoeff.gammaSize; j++) {
testcoeff.gamma[j] = arrayCPU[row][j + GAMMA_OFFSET];
}
for (int j = 0; j < testcoeff.tauSize; j++) {
testcoeff.tau[j] = arrayCPU[row][j + TAU_OFFSET];
}
for (int j = 0; j < testcoeff.coastSize; j++) {
testcoeff.coast[j] = arrayCPU[row][j + COAST_OFFSET];
}
rkParameters<double> example(tripTime, alpha, beta, zeta, testcoeff);
inputParameters[i] = Individual(example, cConstants);
}
}
// Collection of individuals used in the genetic selection process
// - filled in selectSurvivors, based on callRK output
// - stores the winners of the head-to-head competition
Individual *survivors = new Individual[cConstants->survivor_count];
// Number of individuals that need to be evaluated
// - the whole population is in first loop
// - subsequent generations only calculate *new* individuals
int newInd = cConstants->num_individuals;
// number of current generation
double generation = 0;
// how far away the best individual is from the tolerance value
double currentDistance;
// Genetic solution tolerance
// - (currently just the position threshold which is furthest distance from the target allowed)
// - could eventually take into account velocity too and become a more complex calculation
double tolerance = cConstants->pos_threshold;
// distinguishable rate used in changeInBest()
// - used to help check for a change in anneal
// - Gets smaller when no change is detected
double dRate = 1.0e-8;
// Flag for finishing the genetic process
// set by allWithinTolerance()
bool convergence = false;
// main gentic algorithm loop
// - continues until allWithinTolerance returns true (specific number of individuals are within threshold)
do {
// each inputParameter represents an individual set of starting parameters
// GPU based runge kutta process determines final position and velocity based on parameters
// newInd - how many individuals that are *new* that need to be evaluated
// - All individuals first generation
// - only new individuals, from crossover, in subsequent generations
// (inputParameters + (cConstants->num_individuals - newInd)) value accesses the start of the section of the inputParameters array that contains new individuals
callRK(newInd, cConstants->thread_block_size, inputParameters + (cConstants->num_individuals - newInd), timeInitial, stepSize, absTol, calcPerS, cConstants); // calculate trajectories for new individuals
// if we got bad results reset the Individual to random starting values (it may still be used for crossover) and set the final position to be way off so it gets replaced by a new Individual
for (int k = 0; k < cConstants->num_individuals; k++) {
//Checking each individuals final position for NaNs
if (isnan(inputParameters[k].finalPos.r) || isnan(inputParameters[k].finalPos.theta) || isnan(inputParameters[k].finalPos.z) || isnan(inputParameters[k].finalPos.vr) || isnan(inputParameters[k].finalPos.vtheta) || isnan(inputParameters[k].finalPos.vz)) {
std::cout << std::endl << std::endl << "NAN FOUND" << std::endl << std::endl;
inputParameters[k] = Individual(randomParameters(rng, cConstants), cConstants);
// Set to be a bad individual by giving it bad posDiff and velDiffs
// therefore also having a bad cost value
// won't be promoted in crossover
inputParameters[k].posDiff = 1.0;
inputParameters[k].velDiff = 0.0;
// calculate its new cost function based on 'bad' differences
inputParameters[k].getCost(cConstants);
}
}
// Preparing survivor pool with individuals for the newGeneration crossover
// Survivor pool contains:
// - individuals with best PosDiff
// - individuals with best velDiffs
// - depends on cConstants->survivorRatio (0.1 is 10% are best PosDiff for example)
// inputParameters is left sorted by individuals with best velDiffs
selectSurvivors(inputParameters, cConstants->num_individuals, cConstants->survivor_count, survivors, cConstants->survivorRatio); // Choose which individuals are in survivors, current method selects half to be best posDiff and other half to be best velDiff
// sort individuals based on overloaded relational operators
// gives reference of which to replace and which to carry to the next generation
std::sort(inputParameters, inputParameters + cConstants->num_individuals);
// Display a '.' to the terminal to show that a generation has been performed
// This also serves to visually seperate the terminalDisplay() calls across generations
std::cout << '.';
// Calculate how far best individual is from the ideal cost value (currently is the positionalDifference of the best individual)
// TODO: Change this later to take into account more than just the best individual and its position difference
currentDistance = inputParameters[0].posDiff;
// Scaling anneal based on proximity to tolerance
// Far away: larger anneal scale, close: smaller anneal
double new_anneal = currentAnneal * (1 - tolerance / currentDistance);
//Process to see if anneal needs to be adjusted
// If generations are stale, anneal drops
Individual currentBest;
// Compare current best individual to that from CHANGE_CHECK many generations ago.
// If they are the same, change size of mutations
if (static_cast<int>(generation) % cConstants->change_check == 0) {
currentBest = inputParameters[0];
// checks for anneal to change
// previousBest starts at 0 to ensure changeInBest = true on generation 0
if ( !(changeInBest(previousBestPos, previousBestVel, currentBest, dRate)) ) {
//this ensures that changeInBest never compares two zeros, thus keeping dRate in relevance as the posDiff lowers
if (trunc(currentBest.posDiff/dRate) == 0) {
while (trunc(currentBest.posDiff/dRate) == 0) {
dRate = dRate/10;
}
std::cout << "\nnew dRate: " << dRate << std::endl;
}
// If no change, multiply currentAnneal with anneal factor
currentAnneal = currentAnneal * cConstants->anneal_factor;
std::cout << "\nnew anneal: " << currentAnneal << std::endl;
}
previousBestPos = currentBest.posDiff;
previousBestVel = currentBest.velDiff;
}
// If in recording mode and write_freq reached, call the record method
if (static_cast<int>(generation) % cConstants->write_freq == 0 && cConstants->record_mode == true) {
recordGenerationPerformance(cConstants, inputParameters, generation, new_anneal, cConstants->num_individuals);
}
// Only call terminalDisplay every DISP_FREQ, not every single generation
if ( static_cast<int>(generation) % cConstants->disp_freq == 0) {
// Prints the best individual's posDiff / velDiff and cost
terminalDisplay(inputParameters[0], generation);
}
// Before replacing new individuals, determine whether all are within tolerance
// Determines when loop is finished
convergence = allWithinTolerance(tolerance, inputParameters, cConstants);
// Create a new generation and increment the generation counter
// Genetic Crossover and mutation occur here
newInd = newGeneration(survivors, inputParameters, cConstants->survivor_count, cConstants->num_individuals, new_anneal, cConstants, rng, generation);
++generation;
//Loop exits based on result of allWithinTolerance and if max_generations has been hit
} while ( !convergence && generation < cConstants->max_generations);
// Call record for final generation regardless of frequency
// for the annealing argument, set to -1 (since the anneal is only relevant to the next generation and so means nothing for the last one)
if (cConstants->record_mode == true) {
recordGenerationPerformance(cConstants, inputParameters, generation, -1, cConstants->num_individuals);
}
// Only call finalRecord if the results actually converged on a solution
// also display last generation onto terminal
if (convergence) {
terminalDisplay(inputParameters[0], generation);
finalRecord(cConstants, inputParameters, static_cast<int>(generation));
}
delete [] inputParameters;
delete [] survivors;
return calcPerS;
}
int main () {
// display GPU properties and ensure we are using the right one
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
std::cout << "\n\nDevice Number: 0 \n";
std::cout << "- Device name: " << prop.name << std::endl << std::endl;
cudaSetDevice(0);
// Declare the genetic constants used, with file path being used to receive initial values
cudaConstants * cConstants = new cudaConstants("../Config_Constants/genetic.config");
// Sets run0 seed, used to change seed between runs
// Seed is set in cudaConstants: current time or passed in via config
double zero_seed = cConstants->time_seed;
// Perform the optimization with optimize function
for (int run = 0; run < cConstants->run_count; run++) {
// Adjust the time_seed so it is unique based on each run
cConstants->time_seed = zero_seed + run*100;
// Display contents of cConstants being used for this run and how many runs
std::cout << *cConstants;
std::cout << "\tPerforming run #" << run+1 << "\n\n";
// pre-calculate a table of Earth's position within possible mission time range
// defined as global variable
// accessed on the CPU when individuals are initilized
launchCon = new EarthInfo(cConstants);
// File output of element values that were calculated in EarthInfo constructor for verification
/*if (cConstants->record_mode == true) {
recordEarthData(cConstants, run);
}*/
// Call optimize with the current parameters in cConstants
optimize(cConstants);
delete launchCon; // Deallocate launchCon info for this run as it may be using a different time range in the next run
}
// Now that the optimize function is done (assumed that optimize() also records it), deallocate memory of the cudaConstants
delete cConstants;
return 0;
}
|
a549bcc547c986335856d14f39cca8f0f5ed92e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file im2col.cu
** @brief Image to columns and back (GPU)
** @author Andrea Vedaldi
**/
#include "im2col.cpp"
#include "gpu.hpp"
/* ---------------------------------------------------------------- */
/* im2col (GPU) */
/* ---------------------------------------------------------------- */
template <typename Dtype>
__global__ void
im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int ksize,
const int stride, const int pad,
const int height_col, const int width_col, Dtype* data_col)
{
/* each kernel copies the pixels in an image patch for one channel */
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < n) {
int x_out = index % width_col;
index /= width_col;
int y_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int y_in = y_out * stride - pad;
int x_in = x_out * stride - pad;
data_col += (channel_out * height_col + y_out) * width_col + x_out;
data_im += (channel_in * height + y_in) * width + x_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
if (y_in + i >= 0 && y_in + i < height && x_in + j >= 0 && x_in + j < width) {
*data_col = data_im[i * width + j];
} else {
*data_col = 0;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
Dtype* data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, ksize, stride, pad, height_col, width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
double* data_col);
/* ---------------------------------------------------------------- */
/* col2im (GPU) */
/* ---------------------------------------------------------------- */
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width,
const int channels, const int ksize,
const int stride, const int pad,
const int height_col, const int width_col,
Dtype* data_im)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
Dtype val = 0;
/*
Each kernel computes one pixel of the output image. This is obtained
by summing all the values in the columnised data that were generated as copies of
that particular pixel.
*/
/*
recover the (x,y,c) coordinate of the input pixel based on the kernel
index, using the fact that index = x + width * y + width*height * c.
*/
int x = (index % width) ;
int y = ((index / width) % height) ;
int c = index / (width * height) ;
/*
Let xc be the top left coordinate of the patch(xc,yc) packed at location
(xc,yc) in the columnised data. patch(xc,yc) includes all input image
pixels in the interval:
x1 <= x <= x2, x1(xc) = stride * xc - pad, x2(xc) = x1(xc) + ksize - 1,
y1 <= y <= y2, y1(yc) = stride * yc - pad, y2(yc) = y1(yc) + ksize - 1.
Hence pixel (x,y) is integrated in patch(xc,yc) if, and only if,
(x + pad - ksize + 1) / stride <= xc <= (x + pad) / stride.
Here to find the minimum and maximum value of xc we need to take the ceil
of the left-hand side and the floor of the right hand side. With C integer
math:
xc1 <= xc <= xc2, xc1 = (x + pad - ksize + 1 + stride - 1)/stride
= (x + pad - ksize)/stride + 1,
xc2 =(x + pad) / stride
Some care must be given to the first expression for xc1 as this works
only if the numerator is non-negative (division is otherwise
undefined C89 or truncated upwards C99).
Within a patch(xc,yc), pixel (x,y) has relative coordinates
(dx,dy) given by
dx = x - (xc * stride - pad), dy = y - (yc * stride - pad).
This result in an additional patch-relative offset of
doffset(dx,dy,c) = (x + pad - xc*stride)
+ (y + pad - yc*stride)*ksize
+ c*ksize*ksize
= (x + pad) + (y+pad)*ksize + c*(ksize*ksize)
- xc*stride - yc*stride*ksize.
Thus pixel (x,y) in patch(xc,yc) should be read in the columnised
output with a total offset of
offset(x,y,xc,yc,c)
= xc + yc * widht_col + doffset(dx,dy,c) * width_col*height_col
= ((x + pad) + (y+pad)*ksize + c*(ksize*ksize)) * width_col*height_col
+ xc * (1 - stride * width_col*height_col)
+ yc * (1 - stride * ksize*height_col) * width_col.
*/
int xc1 = (x + pad - ksize >= 0) ? (x + pad - ksize) / stride + 1 : 0 ;
int yc1 = (y + pad - ksize >= 0) ? (y + pad - ksize) / stride + 1 : 0 ;
int xc2 = min((x + pad) / stride, width_col - 1) ;
int yc2 = min((y + pad) / stride, height_col - 1) ;
int offset = (c * ksize * ksize + (y+pad) * ksize + (x+pad)) * height_col * width_col;
int deltax = (1 - stride * height_col * width_col);
int deltay = (1 - stride * ksize * height_col) * width_col;
for (int yc = yc1 ; yc <= yc2 ; ++ yc) {
for (int xc = xc1 ; xc <= xc2 ; ++ xc) {
val += data_col[offset + yc * deltay + xc * deltax];
}
}
#if 0
int x_col_start = (x < ksize) ? 0 : (x - ksize) / stride + 1;
int x_col_end = min(x / stride + 1, width_col);
int y_col_start = (y < ksize) ? 0 : (y - ksize) / stride + 1;
int y_col_end = min(y / stride + 1, height_col);
// scan all the filter applications ?
int offset = (c * ksize * ksize + y * ksize + x) * height_col * width_col;
int coeff_y_col = (1 - stride * ksize * height_col) * width_col;
int coeff_x_col = (1 - stride * height_col * width_col);
for (int y_col = y_col_start; y_col < y_col_end; ++y_col) {
for (int x_col = x_col_start; x_col < x_col_end; ++x_col) {
val += data_col[offset + y_col * coeff_y_col + x_col * coeff_x_col];
}
}
#endif
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
Dtype* data_im)
{
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels,
ksize, stride, pad,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
double* data_im);
| a549bcc547c986335856d14f39cca8f0f5ed92e5.cu | /** @file im2col.cu
** @brief Image to columns and back (GPU)
** @author Andrea Vedaldi
**/
#include "im2col.cpp"
#include "gpu.hpp"
/* ---------------------------------------------------------------- */
/* im2col (GPU) */
/* ---------------------------------------------------------------- */
template <typename Dtype>
__global__ void
im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int ksize,
const int stride, const int pad,
const int height_col, const int width_col, Dtype* data_col)
{
/* each kernel copies the pixels in an image patch for one channel */
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < n) {
int x_out = index % width_col;
index /= width_col;
int y_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int y_in = y_out * stride - pad;
int x_in = x_out * stride - pad;
data_col += (channel_out * height_col + y_out) * width_col + x_out;
data_im += (channel_in * height + y_in) * width + x_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
if (y_in + i >= 0 && y_in + i < height && x_in + j >= 0 && x_in + j < width) {
*data_col = data_im[i * width + j];
} else {
*data_col = 0;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
Dtype* data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<Dtype> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, data_im, height, width, ksize, stride, pad, height_col, width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
double* data_col);
/* ---------------------------------------------------------------- */
/* col2im (GPU) */
/* ---------------------------------------------------------------- */
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width,
const int channels, const int ksize,
const int stride, const int pad,
const int height_col, const int width_col,
Dtype* data_im)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
Dtype val = 0;
/*
Each kernel computes one pixel of the output image. This is obtained
by summing all the values in the columnised data that were generated as copies of
that particular pixel.
*/
/*
recover the (x,y,c) coordinate of the input pixel based on the kernel
index, using the fact that index = x + width * y + width*height * c.
*/
int x = (index % width) ;
int y = ((index / width) % height) ;
int c = index / (width * height) ;
/*
Let xc be the top left coordinate of the patch(xc,yc) packed at location
(xc,yc) in the columnised data. patch(xc,yc) includes all input image
pixels in the interval:
x1 <= x <= x2, x1(xc) = stride * xc - pad, x2(xc) = x1(xc) + ksize - 1,
y1 <= y <= y2, y1(yc) = stride * yc - pad, y2(yc) = y1(yc) + ksize - 1.
Hence pixel (x,y) is integrated in patch(xc,yc) if, and only if,
(x + pad - ksize + 1) / stride <= xc <= (x + pad) / stride.
Here to find the minimum and maximum value of xc we need to take the ceil
of the left-hand side and the floor of the right hand side. With C integer
math:
xc1 <= xc <= xc2, xc1 = (x + pad - ksize + 1 + stride - 1)/stride
= (x + pad - ksize)/stride + 1,
xc2 =(x + pad) / stride
Some care must be given to the first expression for xc1 as this works
only if the numerator is non-negative (division is otherwise
undefined C89 or truncated upwards C99).
Within a patch(xc,yc), pixel (x,y) has relative coordinates
(dx,dy) given by
dx = x - (xc * stride - pad), dy = y - (yc * stride - pad).
This result in an additional patch-relative offset of
doffset(dx,dy,c) = (x + pad - xc*stride)
+ (y + pad - yc*stride)*ksize
+ c*ksize*ksize
= (x + pad) + (y+pad)*ksize + c*(ksize*ksize)
- xc*stride - yc*stride*ksize.
Thus pixel (x,y) in patch(xc,yc) should be read in the columnised
output with a total offset of
offset(x,y,xc,yc,c)
= xc + yc * widht_col + doffset(dx,dy,c) * width_col*height_col
= ((x + pad) + (y+pad)*ksize + c*(ksize*ksize)) * width_col*height_col
+ xc * (1 - stride * width_col*height_col)
+ yc * (1 - stride * ksize*height_col) * width_col.
*/
int xc1 = (x + pad - ksize >= 0) ? (x + pad - ksize) / stride + 1 : 0 ;
int yc1 = (y + pad - ksize >= 0) ? (y + pad - ksize) / stride + 1 : 0 ;
int xc2 = min((x + pad) / stride, width_col - 1) ;
int yc2 = min((y + pad) / stride, height_col - 1) ;
int offset = (c * ksize * ksize + (y+pad) * ksize + (x+pad)) * height_col * width_col;
int deltax = (1 - stride * height_col * width_col);
int deltay = (1 - stride * ksize * height_col) * width_col;
for (int yc = yc1 ; yc <= yc2 ; ++ yc) {
for (int xc = xc1 ; xc <= xc2 ; ++ xc) {
val += data_col[offset + yc * deltay + xc * deltax];
}
}
#if 0
int x_col_start = (x < ksize) ? 0 : (x - ksize) / stride + 1;
int x_col_end = min(x / stride + 1, width_col);
int y_col_start = (y < ksize) ? 0 : (y - ksize) / stride + 1;
int y_col_end = min(y / stride + 1, height_col);
// scan all the filter applications ?
int offset = (c * ksize * ksize + y * ksize + x) * height_col * width_col;
int coeff_y_col = (1 - stride * ksize * height_col) * width_col;
int coeff_x_col = (1 - stride * height_col * width_col);
for (int y_col = y_col_start; y_col < y_col_end; ++y_col) {
for (int x_col = x_col_start; x_col < x_col_end; ++x_col) {
val += data_col[offset + y_col * coeff_y_col + x_col * coeff_x_col];
}
}
#endif
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
Dtype* data_im)
{
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
col2im_gpu_kernel<Dtype> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, data_col, height, width, channels,
ksize, stride, pad,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int ksize,
const int stride, const int pad,
double* data_im);
|
ed48c9a80cda44b33c458fe051277c3c21eb4474.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <windows.h>
// OpenGL Graphics includes
#include <GL/glew.h>
#include <GL/freeglut.h>
// includes, system
#include<stdlib.h>
#include <stdio.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
//#include <shrQATest.h>
#include<iostream>
using namespace std;
#include "KDTree.h"
#include "Mesh.h"
#define WIDTH 200
#define HEIGH 200
#define INF 100000
#define EPSILON 0.001
#define PI 3.14159265
#define PHON 30
#define AMBIENT 0.8
#define SPECULAR 0.0
#define DIFFUSE 0.8
#define RGB_R 1.0
#define RGB_G 1.0
#define RGB_B 1.0
#define GLUT_WHEEL_UP 3
#define GLUT_WHEEL_DOWN 4
double anger_x = 0;
double anger_y = 0;
double anger_move_x = 0;
double anger_move_y = 0;
double shift_x = 0;
double shift_y = 0;
double shift_move_x = 0;
double shift_move_y = 0;
double scale_x = 0;
double sacle_y = 0;
double scale = 50;
bool left_tag = false;
bool right_tag = false;
bool middle_tag = false;
int mouse_x = 0;
int mouse_y = 0;
void processMouseActiveMotion(int x, int y);
void processMouse(int button, int state, int x, int y);
void keyboard(unsigned char key,int x, int y);
void draw();
void drawGraph();
void drawBox(Point& p1,Point& p2);
KDTree tree;
Mesh mesh;
GLubyte* pixelData;
int index_node=0;
KDNode* kdnode;
struct Stack{
int index;
bool leafNode;
};
struct Ray
{
Point pos;
Point dir;
};
struct Light
{
Point pos;
Point col;
};
void myDisplay(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(WIDTH,HEIGH,GL_BGR_EXT,GL_UNSIGNED_BYTE,pixelData);
glutSwapBuffers();
}
__device__ float Dot(Point& a, Point& b)
{
return (a.x*b.x + a.y*b.y + a.z*b.z);
}
__device__ Point CrossProduct(Point& a, Point& b)
{
Point ret;
ret.x = a.y * b.z - a.z * b.y;
ret.y = a.z * b.x - a.x * b.z;
ret.z = a.x * b.y - a.y * b.x;
return ret;
}
__device__ void Normalize(Point& vector)
{
float v=sqrt(vector.x*vector.x+vector.y*vector.y+vector.z*vector.z);
vector.x /= v; vector.y /= v; vector.z /= v;
}
__device__ Point minus(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x - p2.x;
ret.y = p1.y - p2.y;
ret.z = p1.z - p2.z;
return ret;
}
__device__ Point multi(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x*p2.x;
ret.y = p1.y*p2.y;
ret.z = p1.z*p2.z;
return ret;
}
__device__ Point make(float a, float b, float c)
{
Point ret;
ret.x = a;
ret.y = b;
ret.z= c;
return ret;
}
__device__ Point add(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x+p2.x;
ret.y = p1.y+p2.y;
ret.z = p1.z+p2.z;
return ret;
}
__device__ Point getNormal(Point& rkPoint0, Point& rkPoint1, Point& rkPoint2)
{
Point kEdge1 = minus(rkPoint1 , rkPoint0);
Point kEdge2 = minus(rkPoint2 , rkPoint0);
Point normal = CrossProduct(kEdge1 , kEdge2);
Normalize(normal);
return normal;
}
__device__ bool pointInTriangle(Point& a, Point& b, Point& c, Point& p)
{
Point AB = minus(b , a);
Point AC = minus(c , a);
Point AP = minus(p , a);
Point BC = minus(c , b);
Point BP = minus(p , b);
Point temp;
temp = CrossProduct(AB, AC);
float left = sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(AB, AP);
float right = 0;//distance(CrossProduct(AB, AP), temp)+ distance(CrossProduct(AP, AC),temp) + distance(CrossProduct(BC, BP),temp);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(AP, AC);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(BC, BP);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
return (left-EPSILON<right)&&(right<left+EPSILON);
}
__device__ int intersectLeaf(Ray& ray, int leaf, float* d_vertice, unsigned int * d_face, KDLeafNode* d_leafNode, int* d_leafNodeIndex,Point& d_normal, Point& d_intersection)
{
if(leaf==-1)
return -1;
int start = d_leafNode[leaf].begin;
int end = d_leafNode[leaf].begin+d_leafNode[leaf].numObject;
float minDis=10000;
int minObjNum=-1;
Point planeNormal;//store the normal of the intersected plane
for(int m=start; m<end; m++)
{
Point PT1,PT2,PT3;
PT1.x = d_vertice[d_face[d_leafNodeIndex[m]*3]*3];
PT1.y = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+1];
PT1.z = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+2];
PT2.x = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3];
PT2.y = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+1];
PT2.z = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+2];
PT3.x = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3];
PT3.y = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+1];
PT3.z = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+2];
//get plane
Point normal=getNormal(PT1,PT3,PT2);
float d, denom, nom,t;
Point intersectionP, intersection;
d = 0-Dot(normal , PT1);
//find intersection points
denom=Dot(normal,ray.dir);
if(fabs(denom)<EPSILON)
{//parallel no intersection
continue;
}
else
{
nom=Dot(normal, ray.pos)+d;
t=-nom/denom;//distance
if(t<=0)
{//interseciton is on the back of the ray's start point
continue;
}
else
{
//whether in the triangle's plane
//intersectionP=make_Point(ray.pos.x+ray.dir.x*t,ray.pos.y+ray.dir.y*t,ray.pos.z+ray.dir.z*t);
intersectionP.x = ray.pos.x+ray.dir.x*t;
intersectionP.y = ray.pos.y+ray.dir.y*t;
intersectionP.z = ray.pos.z+ray.dir.z*t;
if( t <minDis&&pointInTriangle(PT1,PT2,PT3,intersectionP))
{
minDis=t; //min distance;
minObjNum=m;
planeNormal=normal;
d_normal.x = normal.x ;
d_normal.y = normal.y ;
d_normal.z = normal.z ;
d_intersection.x = intersectionP.x;
d_intersection.y = intersectionP.y;
d_intersection.z = intersectionP.z;
}
}
}
}
return minObjNum;
}
////////////////////////////////////////////////////////////////////////////////
//intersectBox method
////////////////////////////////////////////////////////////////////////////////
__device__ bool intersectBox(Ray& r, Point& boxmin, Point& boxmax, float &tnear, float &tfar)
{
//if(fabs(r.dir.x)<EPSILON || fabs(r.dir.y)<EPSILON || fabs(r.dir.z)<EPSILON)
// return false;
// compute intersection of ray with all six bbox planes
Point invR ;//= make_Point(1.0f) / r.dir;
invR.x = 1.0/r.dir.x;
invR.y = 1.0/r.dir.y;
invR.z = 1.0/r.dir.z;
Point tbot = multi(invR , minus(boxmin , r.pos));
Point ttop = multi(invR , minus(boxmax , r.pos));
// re-order intersections to find smallest and largest on each axis
Point tmin = make(min(ttop.x, tbot.x),min(ttop.y, tbot.y),min(ttop.z, tbot.z));
Point tmax = make(max(ttop.x, tbot.x),max(ttop.y, tbot.y),max(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
float largest_tmin = max(max(tmin.x, tmin.y), max(tmin.x, tmin.z));
float smallest_tmax = min(min(tmax.x, tmax.y), min(tmax.x, tmax.z));
tnear = largest_tmin;
tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
__device__ int traverse(Ray& ray, KDInsideNode* d_insideNode, float* vertice, unsigned int* face, int* leafNodeIndex, KDLeafNode* leafNode, Point& normal, Point& intersection)
{
int cur_index =0;
float tnear,tfar;
Point intersectionPoint;
float intersectionvalue =0;
Stack stack[200];
int capacity = 0;
int result;
stack[capacity].index =0;
stack[capacity].leafNode = false;
capacity++;
while(capacity>0)
{
capacity--;
while(stack[capacity].leafNode&&capacity>=0)
{
result = intersectLeaf(ray, stack[capacity].index, vertice, face, leafNode, leafNodeIndex,normal, intersection);
if(result!=-1)
return result;
else
{
capacity--;
continue;
}
}
if(!stack[capacity].leafNode)
{
cur_index = stack[capacity].index;
if(intersectBox(ray, d_insideNode[cur_index].aabb.minPoint, d_insideNode[cur_index].aabb.maxPoint, tnear, tfar))
{
intersectionPoint.x = ray.pos.x + tnear*ray.dir.x;
intersectionPoint.y = ray.pos.y + tnear*ray.dir.y;
intersectionPoint.z = ray.pos.z + tnear*ray.dir.z;
switch(d_insideNode[cur_index].splitAxis)
{
case Axis_X:
intersectionvalue = intersectionPoint.x;
break;
case Axis_Y:
intersectionvalue = intersectionPoint.y;
break;
case Axis_Z:
intersectionvalue = intersectionPoint.z;
break;
}
if(intersectionvalue < d_insideNode[cur_index].splitValue)
{ // left part
if(d_insideNode[cur_index].right!=-1)
{
if(d_insideNode[cur_index].RightLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].right;
capacity++;
}
if(d_insideNode[cur_index].left!=-1)
{
if(d_insideNode[cur_index].LeftLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].left;
capacity++;
}
}
else
{ // right part
if(d_insideNode[cur_index].left!=-1)
{
if(d_insideNode[cur_index].LeftLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].left;
capacity++;
}
if(d_insideNode[cur_index].right!=-1)
{
if(d_insideNode[cur_index].RightLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].right;
capacity++;
}
}
}
}
}
if(capacity ==0)
return -1;
}
__global__ void Render(Ray* rray, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index<WIDTH*HEIGH)
{
Point diffuse;
Point ambient;
Point specular;
Point color;
Point intersectionPoint;
Point normal;
float dif;
float cos_data;
Point reflect;
Point rayFromEye;
Ray ray;
ray.pos.x = rray[index].pos.x;
ray.pos.y = rray[index].pos.y;
ray.pos.z = rray[index].pos.z;
ray.dir.x = rray[index].dir.x;
ray.dir.y = rray[index].dir.y;
ray.dir.z = rray[index].dir.z;
Light light;
light.pos.x = 5;
light.pos.y = 5;
light.pos.z = 5;
light.col.x =1;
light.col.y =1;
light.col.z =1;
int kind = traverse(ray,gpu_insideNode, gpu_vertice, gpu_face, gpu_index,gpu_leafNode,normal,intersectionPoint);
ambient.x = ambient.y = ambient.z =0.0;
diffuse.x = diffuse.y = diffuse.z =0.0;
specular.x = specular.y = specular.z =0.0;
if(kind!=-1)
{
ambient.x = AMBIENT*RGB_R;
ambient.y = AMBIENT*RGB_G;
ambient.z = AMBIENT*RGB_B;
Point p;
p.x = intersectionPoint.x;
p.y = intersectionPoint.y;
p.z = intersectionPoint.z;
Normalize(p);
dif =Dot(p,normal);
if(dif>0)
{
Ray temp;
Point temp_point1,temp_point2;
temp.pos = intersectionPoint;
temp.dir = minus(light.pos , intersectionPoint);
kind = traverse(temp,gpu_insideNode, gpu_vertice, gpu_face, gpu_index,gpu_leafNode,temp_point1,temp_point2);
if(kind ==-1)
{
color.x = ambient.x;
color.y = ambient.y;
color.z = ambient.z;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
return;
}
else
{
diffuse.x = dif*DIFFUSE;
diffuse.y = dif*DIFFUSE;
diffuse.z = dif*DIFFUSE;
}
}
else
{
color.x = ambient.x;
color.y = ambient.y;
color.z = ambient.z;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
return;
}
if(dif>0)
{
reflect.x = normal.x *dif*2-p.x;
reflect.y = normal.y *dif*2-p.y;
reflect.z = normal.z *dif*2-p.z;
rayFromEye.x = ray.pos.x - intersectionPoint.x;
rayFromEye.y = ray.pos.y - intersectionPoint.y;
rayFromEye.z = ray.pos.z - intersectionPoint.z;
Normalize(rayFromEye);
cos_data = reflect.x*rayFromEye.x + reflect.y*rayFromEye.y + reflect.z*rayFromEye.z;
if(cos_data>0)
{
cos_data = pow(cos_data,PHON);
specular.x = light.col.x*cos_data*SPECULAR;
specular.y = light.col.y*cos_data*SPECULAR;
specular.z = light.col.z*cos_data*SPECULAR;
}
}
}
color.x = diffuse.x + ambient.x + specular.x;
color.y = diffuse.y + ambient.y + specular.y;
color.z = diffuse.z + ambient.z + specular.z;
if(color.x >1.0)
color.x =1.0;
if(color.y >1.0)
color.y =1.0;
if(color.z >1.0)
color.z =1.0;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
}
}
void processMouse(int button, int state, int x, int y) {
if( button == GLUT_WHEEL_UP )
scale -= 0.05;
if( button == GLUT_WHEEL_DOWN )
scale += 0.05;
if ( state == GLUT_DOWN )
{
mouse_x = x; mouse_y = y;
if( button == GLUT_LEFT_BUTTON )
left_tag = true;
if( button == GLUT_RIGHT_BUTTON )
right_tag = true;
// cout << "left down!" << endl;
}
if ( state == GLUT_UP )
{
left_tag = false;
right_tag = false;
if( button == GLUT_LEFT_BUTTON )
{
anger_x += anger_move_x;
anger_y += anger_move_y;
anger_move_x = 0;
anger_move_y = 0;
}
if( button == GLUT_RIGHT_BUTTON )
{
shift_x += shift_move_x;
shift_y += shift_move_y;
shift_move_x = 0;
shift_move_y = 0;
}
// cout << "left up!" << endl;
}
}
void processMouseActiveMotion(int x, int y) {
if ( left_tag )
{
anger_move_x = ( y - mouse_y ) * 1.0 / 800;
anger_move_y = ( x - mouse_x ) * 1.0 / 800;
// cout << anger_x << endl;
}
if ( right_tag )
{
shift_move_x = x - mouse_x;
shift_move_y = mouse_y - y;
}
}
int main(int argc, char **argv)
{
if(mesh.loadFile("export/dolphins.obj"))
{
cout<<"successful";
}
else
cout<<"failer";
int* index = new int[mesh.m_numFace];
for(int i=0;i<mesh.m_numFace;i++)
index[i] =i;
tree.ConstructKDTree(tree.root,mesh.m_numFace,index,mesh.face,mesh.vertice,mesh.aabb,0,true);
tree.PrepareMemory();
// kdnode = tree.root;
///////////////////////////////////////////////////////////////////////////////////////////////////
Ray* rays;
rays = new Ray[WIDTH *HEIGH];
//pixelData = new GLubyte[WIDTH *HEIGH*3];
pixelData = (GLubyte*)malloc(WIDTH*HEIGH*3);
if(pixelData == 0)
return 0;
//CUDA_Prepare(&mesh, &tree,pixelData);
//unsigned char gpu_color[3];
int pixel_y=0,pixel_x =0;//pixles
float i_inc = 1.0/WIDTH;
float j_inc = 1.0/HEIGH;
float j=-0.5,i;
int count_data =0;
for(pixel_y=0; pixel_y < HEIGH; j+=j_inc, pixel_y++ )
{
for(pixel_x =0,i=-0.5*WIDTH/HEIGH;pixel_x < WIDTH; i+=i_inc*WIDTH/HEIGH, pixel_x++)
{
// cout<<"pixel_y "<<pixel_y<<" pixel_x "<<pixel_x<<endl;
rays[count_data].dir.x= i; //+camera.look_at.x -camera.location.x;
rays[count_data].dir.y= -1.5 ;//+camera.look_at.y -camera.location.y;
rays[count_data].dir.z =-j;// camera->direction.z;//*(camera.look_at.z -camera.location.z);
rays[count_data].pos.x = 0;//camera->location.x;
rays[count_data].pos.y = 7;//camera->location.y;
rays[count_data].pos.z = 0;//camera->location.z;
/*
Render(rays[count_data],mesh.vertice, mesh.face, tree.leafNodeIndex, tree.insideNode, tree.leafNode,gpu_color);
pixelData[3*count_data] = gpu_color[0];
pixelData[3*count_data+1] = gpu_color[1];
pixelData[3*count_data+2] = gpu_color[2];
*/
count_data++;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(300, 30);
glutInitWindowSize(WIDTH, HEIGH);
glutCreateWindow( "kdtree");
glViewport(0, 0, 800, 800);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-200, 200, -200, 200, 1, 10000);
// gluPerspective (0, 800 /600.0, 10.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0,0,1000,0,0,0,0,1.0,0);
glutDisplayFunc(draw);
glutIdleFunc(draw);
glEnable(GL_DEPTH_TEST);
glutMouseFunc(processMouse);
glutMotionFunc(processMouseActiveMotion);
glutKeyboardFunc(keyboard);
glutMainLoop();
*/
Ray* gpu_rays;
unsigned int* gpu_face;
float* gpu_vertice;
int* gpu_leafNodeIndex;
KDLeafNode* gpu_leafNode;
KDInsideNode* gpu_insideNode;
GLubyte* gpu_color;
hipMalloc((void**)&gpu_rays, sizeof( Ray )*(WIDTH*HEIGH));
hipMalloc((void**)&gpu_face, sizeof( unsigned int )*(mesh.m_numFace)*3);
hipMalloc((void**)&gpu_vertice, sizeof( float )*(mesh.m_numVertice)*3);
hipMalloc((void**)&gpu_leafNodeIndex, sizeof( int )*(tree.SizeofLeafNode));
hipMalloc((void**)&gpu_leafNode, sizeof( KDLeafNode )*(tree.numLeafNode));
hipMalloc((void**)&gpu_insideNode, sizeof( KDInsideNode )*(tree.numInsideNode));
hipMalloc((void**)&gpu_color, sizeof( GLubyte )*(WIDTH*HEIGH*3));
hipMemcpy(gpu_rays, rays, sizeof(Ray)*WIDTH*HEIGH, hipMemcpyHostToDevice);
hipMemcpy(gpu_face, mesh.face, sizeof( unsigned int )*(mesh.m_numFace)*3, hipMemcpyHostToDevice);
hipMemcpy(gpu_vertice, mesh.vertice, sizeof( float )*(mesh.m_numVertice)*3, hipMemcpyHostToDevice);
hipMemcpy(gpu_leafNodeIndex, tree.leafNodeIndex, sizeof( int )*(tree.SizeofLeafNode), hipMemcpyHostToDevice);
hipMemcpy(gpu_leafNode, tree.leafNode, sizeof( KDLeafNode )*(tree.numLeafNode), hipMemcpyHostToDevice);
hipMemcpy(gpu_insideNode, tree.insideNode, sizeof( KDInsideNode )*(tree.numInsideNode), hipMemcpyHostToDevice);
int blocks = WIDTH*HEIGH/512;
if(WIDTH*HEIGH%512 !=0)
blocks++;
printf("\nblocks:%d\n",blocks);
hipLaunchKernelGGL(( Render), dim3(blocks),dim3(512), 0, 0, gpu_rays,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color);
cutilDeviceSynchronize();
hipMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(WIDTH*HEIGH*3), hipMemcpyDeviceToHost);
//for(int i=0;i<WIDTH*HEIGH*3;i++)
// pixelData[i]=120;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(WIDTH, HEIGH);
glutCreateWindow("RayTracing");
glutDisplayFunc(&myDisplay);
glutMainLoop();
delete [] rays;
delete [] index;
free(pixelData);
hipFree(gpu_rays);
hipFree(gpu_vertice);
hipFree(gpu_face);
hipFree(gpu_leafNodeIndex);
hipFree(gpu_insideNode);
hipFree(gpu_leafNode);
hipFree(gpu_color);
return 0;
}
void draw( void )
{
glPushMatrix();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor4f(1.0,1.0,1.0,1.0);
glRotatef(10.0f, 1.0f, 0.0f, 0.0f);
glRotatef(10.0f, 0.0f, 1.0f, 0.0f);
glRotatef( ( anger_x + anger_move_x ) * 90, 1.0f, 0.0f, 0.0f);
glRotatef( ( anger_y + anger_move_y ) * 90, 0.0f, 1.0f, 0.0f);
glTranslatef( ( shift_x + shift_move_x ) * 0.5, 0.0f, 0.0f);
glTranslatef( 0.0f, ( shift_y + shift_move_y ) * 0.5, 0.0f);
glScalef( scale, scale, scale );
drawGraph();
glPopMatrix();
glutSwapBuffers();
// glFlush();
}
void drawGraph()
{
KDNode* cur;
//KDLeafNode* cur;
//cur = &(tree.leafNode[index_node]);
cur = kdnode;
drawBox(cur->aabb.maxPoint,cur->aabb.minPoint);
glBegin(GL_LINES);
//glBegin(GL_LINE_LOOP);
for( int i=0;i<cur->numObject;i++)
{
int face = cur->object[i];
//int face = tree.leafNodeIndex[(cur->begin)+i];
glVertex3f(mesh.vertice[mesh.face[face*3]*3],mesh.vertice[mesh.face[face*3]*3+1],mesh.vertice[mesh.face[face*3]*3+2]);
glVertex3f(mesh.vertice[mesh.face[face*3+1]*3],mesh.vertice[mesh.face[face*3+1]*3+1],mesh.vertice[mesh.face[face*3+1]*3+2]);
glVertex3f(mesh.vertice[mesh.face[face*3+2]*3],mesh.vertice[mesh.face[face*3+2]*3+1],mesh.vertice[mesh.face[face*3+2]*3+2]);
}
glEnd();
}
void drawBox(Point& p1,Point& p2)
{
// glColor3f(1.0f,0.0f,0.0f);
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p1.x,p1.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p1.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p2.x,p1.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p2.y,p1.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p2.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p2.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
///
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p2.z);
glVertex3f(p1.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p2.z);
glVertex3f(p2.x,p1.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p1.z);
glVertex3f(p1.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p1.z);
glVertex3f(p2.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p1.z);
glVertex3f(p2.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p1.z);
glVertex3f(p2.x,p1.y,p2.z);
glEnd();
}
void keyboard(unsigned char key,int x, int y)
{
switch(key)
{
case 's':
scale +=0.5;
break;
case 'm':
scale -=0.5;
break;
case 'k':
index_node++;
kdnode = kdnode->left;
}
} | ed48c9a80cda44b33c458fe051277c3c21eb4474.cu |
#include <windows.h>
// OpenGL Graphics includes
#include <GL/glew.h>
#include <GL/freeglut.h>
// includes, system
#include<stdlib.h>
#include <stdio.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
//#include <shrQATest.h>
#include<iostream>
using namespace std;
#include "KDTree.h"
#include "Mesh.h"
#define WIDTH 200
#define HEIGH 200
#define INF 100000
#define EPSILON 0.001
#define PI 3.14159265
#define PHON 30
#define AMBIENT 0.8
#define SPECULAR 0.0
#define DIFFUSE 0.8
#define RGB_R 1.0
#define RGB_G 1.0
#define RGB_B 1.0
#define GLUT_WHEEL_UP 3
#define GLUT_WHEEL_DOWN 4
double anger_x = 0;
double anger_y = 0;
double anger_move_x = 0;
double anger_move_y = 0;
double shift_x = 0;
double shift_y = 0;
double shift_move_x = 0;
double shift_move_y = 0;
double scale_x = 0;
double sacle_y = 0;
double scale = 50;
bool left_tag = false;
bool right_tag = false;
bool middle_tag = false;
int mouse_x = 0;
int mouse_y = 0;
void processMouseActiveMotion(int x, int y);
void processMouse(int button, int state, int x, int y);
void keyboard(unsigned char key,int x, int y);
void draw();
void drawGraph();
void drawBox(Point& p1,Point& p2);
KDTree tree;
Mesh mesh;
GLubyte* pixelData;
int index_node=0;
KDNode* kdnode;
struct Stack{
int index;
bool leafNode;
};
struct Ray
{
Point pos;
Point dir;
};
struct Light
{
Point pos;
Point col;
};
void myDisplay(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(WIDTH,HEIGH,GL_BGR_EXT,GL_UNSIGNED_BYTE,pixelData);
glutSwapBuffers();
}
__device__ float Dot(Point& a, Point& b)
{
return (a.x*b.x + a.y*b.y + a.z*b.z);
}
__device__ Point CrossProduct(Point& a, Point& b)
{
Point ret;
ret.x = a.y * b.z - a.z * b.y;
ret.y = a.z * b.x - a.x * b.z;
ret.z = a.x * b.y - a.y * b.x;
return ret;
}
__device__ void Normalize(Point& vector)
{
float v=sqrt(vector.x*vector.x+vector.y*vector.y+vector.z*vector.z);
vector.x /= v; vector.y /= v; vector.z /= v;
}
__device__ Point minus(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x - p2.x;
ret.y = p1.y - p2.y;
ret.z = p1.z - p2.z;
return ret;
}
__device__ Point multi(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x*p2.x;
ret.y = p1.y*p2.y;
ret.z = p1.z*p2.z;
return ret;
}
__device__ Point make(float a, float b, float c)
{
Point ret;
ret.x = a;
ret.y = b;
ret.z= c;
return ret;
}
__device__ Point add(Point& p1, Point& p2)
{
Point ret;
ret.x = p1.x+p2.x;
ret.y = p1.y+p2.y;
ret.z = p1.z+p2.z;
return ret;
}
__device__ Point getNormal(Point& rkPoint0, Point& rkPoint1, Point& rkPoint2)
{
Point kEdge1 = minus(rkPoint1 , rkPoint0);
Point kEdge2 = minus(rkPoint2 , rkPoint0);
Point normal = CrossProduct(kEdge1 , kEdge2);
Normalize(normal);
return normal;
}
__device__ bool pointInTriangle(Point& a, Point& b, Point& c, Point& p)
{
Point AB = minus(b , a);
Point AC = minus(c , a);
Point AP = minus(p , a);
Point BC = minus(c , b);
Point BP = minus(p , b);
Point temp;
temp = CrossProduct(AB, AC);
float left = sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(AB, AP);
float right = 0;//distance(CrossProduct(AB, AP), temp)+ distance(CrossProduct(AP, AC),temp) + distance(CrossProduct(BC, BP),temp);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(AP, AC);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
temp = CrossProduct(BC, BP);
right += sqrt(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
return (left-EPSILON<right)&&(right<left+EPSILON);
}
__device__ int intersectLeaf(Ray& ray, int leaf, float* d_vertice, unsigned int * d_face, KDLeafNode* d_leafNode, int* d_leafNodeIndex,Point& d_normal, Point& d_intersection)
{
if(leaf==-1)
return -1;
int start = d_leafNode[leaf].begin;
int end = d_leafNode[leaf].begin+d_leafNode[leaf].numObject;
float minDis=10000;
int minObjNum=-1;
Point planeNormal;//store the normal of the intersected plane
for(int m=start; m<end; m++)
{
Point PT1,PT2,PT3;
PT1.x = d_vertice[d_face[d_leafNodeIndex[m]*3]*3];
PT1.y = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+1];
PT1.z = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+2];
PT2.x = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3];
PT2.y = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+1];
PT2.z = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+2];
PT3.x = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3];
PT3.y = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+1];
PT3.z = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+2];
//get plane
Point normal=getNormal(PT1,PT3,PT2);
float d, denom, nom,t;
Point intersectionP, intersection;
d = 0-Dot(normal , PT1);
//find intersection points
denom=Dot(normal,ray.dir);
if(fabs(denom)<EPSILON)
{//parallel no intersection
continue;
}
else
{
nom=Dot(normal, ray.pos)+d;
t=-nom/denom;//distance
if(t<=0)
{//interseciton is on the back of the ray's start point
continue;
}
else
{
//whether in the triangle's plane
//intersectionP=make_Point(ray.pos.x+ray.dir.x*t,ray.pos.y+ray.dir.y*t,ray.pos.z+ray.dir.z*t);
intersectionP.x = ray.pos.x+ray.dir.x*t;
intersectionP.y = ray.pos.y+ray.dir.y*t;
intersectionP.z = ray.pos.z+ray.dir.z*t;
if( t <minDis&&pointInTriangle(PT1,PT2,PT3,intersectionP))
{
minDis=t; //min distance;
minObjNum=m;
planeNormal=normal;
d_normal.x = normal.x ;
d_normal.y = normal.y ;
d_normal.z = normal.z ;
d_intersection.x = intersectionP.x;
d_intersection.y = intersectionP.y;
d_intersection.z = intersectionP.z;
}
}
}
}
return minObjNum;
}
////////////////////////////////////////////////////////////////////////////////
//intersectBox method
////////////////////////////////////////////////////////////////////////////////
__device__ bool intersectBox(Ray& r, Point& boxmin, Point& boxmax, float &tnear, float &tfar)
{
//if(fabs(r.dir.x)<EPSILON || fabs(r.dir.y)<EPSILON || fabs(r.dir.z)<EPSILON)
// return false;
// compute intersection of ray with all six bbox planes
Point invR ;//= make_Point(1.0f) / r.dir;
invR.x = 1.0/r.dir.x;
invR.y = 1.0/r.dir.y;
invR.z = 1.0/r.dir.z;
Point tbot = multi(invR , minus(boxmin , r.pos));
Point ttop = multi(invR , minus(boxmax , r.pos));
// re-order intersections to find smallest and largest on each axis
Point tmin = make(min(ttop.x, tbot.x),min(ttop.y, tbot.y),min(ttop.z, tbot.z));
Point tmax = make(max(ttop.x, tbot.x),max(ttop.y, tbot.y),max(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
float largest_tmin = max(max(tmin.x, tmin.y), max(tmin.x, tmin.z));
float smallest_tmax = min(min(tmax.x, tmax.y), min(tmax.x, tmax.z));
tnear = largest_tmin;
tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
__device__ int traverse(Ray& ray, KDInsideNode* d_insideNode, float* vertice, unsigned int* face, int* leafNodeIndex, KDLeafNode* leafNode, Point& normal, Point& intersection)
{
int cur_index =0;
float tnear,tfar;
Point intersectionPoint;
float intersectionvalue =0;
Stack stack[200];
int capacity = 0;
int result;
stack[capacity].index =0;
stack[capacity].leafNode = false;
capacity++;
while(capacity>0)
{
capacity--;
while(stack[capacity].leafNode&&capacity>=0)
{
result = intersectLeaf(ray, stack[capacity].index, vertice, face, leafNode, leafNodeIndex,normal, intersection);
if(result!=-1)
return result;
else
{
capacity--;
continue;
}
}
if(!stack[capacity].leafNode)
{
cur_index = stack[capacity].index;
if(intersectBox(ray, d_insideNode[cur_index].aabb.minPoint, d_insideNode[cur_index].aabb.maxPoint, tnear, tfar))
{
intersectionPoint.x = ray.pos.x + tnear*ray.dir.x;
intersectionPoint.y = ray.pos.y + tnear*ray.dir.y;
intersectionPoint.z = ray.pos.z + tnear*ray.dir.z;
switch(d_insideNode[cur_index].splitAxis)
{
case Axis_X:
intersectionvalue = intersectionPoint.x;
break;
case Axis_Y:
intersectionvalue = intersectionPoint.y;
break;
case Axis_Z:
intersectionvalue = intersectionPoint.z;
break;
}
if(intersectionvalue < d_insideNode[cur_index].splitValue)
{ // left part
if(d_insideNode[cur_index].right!=-1)
{
if(d_insideNode[cur_index].RightLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].right;
capacity++;
}
if(d_insideNode[cur_index].left!=-1)
{
if(d_insideNode[cur_index].LeftLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].left;
capacity++;
}
}
else
{ // right part
if(d_insideNode[cur_index].left!=-1)
{
if(d_insideNode[cur_index].LeftLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].left;
capacity++;
}
if(d_insideNode[cur_index].right!=-1)
{
if(d_insideNode[cur_index].RightLeaf)
stack[capacity].leafNode = true;
else
stack[capacity].leafNode = false;
stack[capacity].index = d_insideNode[cur_index].right;
capacity++;
}
}
}
}
}
if(capacity ==0)
return -1;
}
__global__ void Render(Ray* rray, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index<WIDTH*HEIGH)
{
Point diffuse;
Point ambient;
Point specular;
Point color;
Point intersectionPoint;
Point normal;
float dif;
float cos_data;
Point reflect;
Point rayFromEye;
Ray ray;
ray.pos.x = rray[index].pos.x;
ray.pos.y = rray[index].pos.y;
ray.pos.z = rray[index].pos.z;
ray.dir.x = rray[index].dir.x;
ray.dir.y = rray[index].dir.y;
ray.dir.z = rray[index].dir.z;
Light light;
light.pos.x = 5;
light.pos.y = 5;
light.pos.z = 5;
light.col.x =1;
light.col.y =1;
light.col.z =1;
int kind = traverse(ray,gpu_insideNode, gpu_vertice, gpu_face, gpu_index,gpu_leafNode,normal,intersectionPoint);
ambient.x = ambient.y = ambient.z =0.0;
diffuse.x = diffuse.y = diffuse.z =0.0;
specular.x = specular.y = specular.z =0.0;
if(kind!=-1)
{
ambient.x = AMBIENT*RGB_R;
ambient.y = AMBIENT*RGB_G;
ambient.z = AMBIENT*RGB_B;
Point p;
p.x = intersectionPoint.x;
p.y = intersectionPoint.y;
p.z = intersectionPoint.z;
Normalize(p);
dif =Dot(p,normal);
if(dif>0)
{
Ray temp;
Point temp_point1,temp_point2;
temp.pos = intersectionPoint;
temp.dir = minus(light.pos , intersectionPoint);
kind = traverse(temp,gpu_insideNode, gpu_vertice, gpu_face, gpu_index,gpu_leafNode,temp_point1,temp_point2);
if(kind ==-1)
{
color.x = ambient.x;
color.y = ambient.y;
color.z = ambient.z;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
return;
}
else
{
diffuse.x = dif*DIFFUSE;
diffuse.y = dif*DIFFUSE;
diffuse.z = dif*DIFFUSE;
}
}
else
{
color.x = ambient.x;
color.y = ambient.y;
color.z = ambient.z;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
return;
}
if(dif>0)
{
reflect.x = normal.x *dif*2-p.x;
reflect.y = normal.y *dif*2-p.y;
reflect.z = normal.z *dif*2-p.z;
rayFromEye.x = ray.pos.x - intersectionPoint.x;
rayFromEye.y = ray.pos.y - intersectionPoint.y;
rayFromEye.z = ray.pos.z - intersectionPoint.z;
Normalize(rayFromEye);
cos_data = reflect.x*rayFromEye.x + reflect.y*rayFromEye.y + reflect.z*rayFromEye.z;
if(cos_data>0)
{
cos_data = pow(cos_data,PHON);
specular.x = light.col.x*cos_data*SPECULAR;
specular.y = light.col.y*cos_data*SPECULAR;
specular.z = light.col.z*cos_data*SPECULAR;
}
}
}
color.x = diffuse.x + ambient.x + specular.x;
color.y = diffuse.y + ambient.y + specular.y;
color.z = diffuse.z + ambient.z + specular.z;
if(color.x >1.0)
color.x =1.0;
if(color.y >1.0)
color.y =1.0;
if(color.z >1.0)
color.z =1.0;
gpu_color[2+3*index] = (unsigned char)(color.x*255);
gpu_color[1+3*index] = (unsigned char)(color.y*255);
gpu_color[0+3*index] = (unsigned char)(color.z*255);
}
}
void processMouse(int button, int state, int x, int y) {
if( button == GLUT_WHEEL_UP )
scale -= 0.05;
if( button == GLUT_WHEEL_DOWN )
scale += 0.05;
if ( state == GLUT_DOWN )
{
mouse_x = x; mouse_y = y;
if( button == GLUT_LEFT_BUTTON )
left_tag = true;
if( button == GLUT_RIGHT_BUTTON )
right_tag = true;
// cout << "left down!" << endl;
}
if ( state == GLUT_UP )
{
left_tag = false;
right_tag = false;
if( button == GLUT_LEFT_BUTTON )
{
anger_x += anger_move_x;
anger_y += anger_move_y;
anger_move_x = 0;
anger_move_y = 0;
}
if( button == GLUT_RIGHT_BUTTON )
{
shift_x += shift_move_x;
shift_y += shift_move_y;
shift_move_x = 0;
shift_move_y = 0;
}
// cout << "left up!" << endl;
}
}
void processMouseActiveMotion(int x, int y) {
if ( left_tag )
{
anger_move_x = ( y - mouse_y ) * 1.0 / 800;
anger_move_y = ( x - mouse_x ) * 1.0 / 800;
// cout << anger_x << endl;
}
if ( right_tag )
{
shift_move_x = x - mouse_x;
shift_move_y = mouse_y - y;
}
}
int main(int argc, char **argv)
{
if(mesh.loadFile("export/dolphins.obj"))
{
cout<<"successful";
}
else
cout<<"failer";
int* index = new int[mesh.m_numFace];
for(int i=0;i<mesh.m_numFace;i++)
index[i] =i;
tree.ConstructKDTree(tree.root,mesh.m_numFace,index,mesh.face,mesh.vertice,mesh.aabb,0,true);
tree.PrepareMemory();
// kdnode = tree.root;
///////////////////////////////////////////////////////////////////////////////////////////////////
Ray* rays;
rays = new Ray[WIDTH *HEIGH];
//pixelData = new GLubyte[WIDTH *HEIGH*3];
pixelData = (GLubyte*)malloc(WIDTH*HEIGH*3);
if(pixelData == 0)
return 0;
//CUDA_Prepare(&mesh, &tree,pixelData);
//unsigned char gpu_color[3];
int pixel_y=0,pixel_x =0;//pixles
float i_inc = 1.0/WIDTH;
float j_inc = 1.0/HEIGH;
float j=-0.5,i;
int count_data =0;
for(pixel_y=0; pixel_y < HEIGH; j+=j_inc, pixel_y++ )
{
for(pixel_x =0,i=-0.5*WIDTH/HEIGH;pixel_x < WIDTH; i+=i_inc*WIDTH/HEIGH, pixel_x++)
{
// cout<<"pixel_y "<<pixel_y<<" pixel_x "<<pixel_x<<endl;
rays[count_data].dir.x= i; //+camera.look_at.x -camera.location.x;
rays[count_data].dir.y= -1.5 ;//+camera.look_at.y -camera.location.y;
rays[count_data].dir.z =-j;// camera->direction.z;//*(camera.look_at.z -camera.location.z);
rays[count_data].pos.x = 0;//camera->location.x;
rays[count_data].pos.y = 7;//camera->location.y;
rays[count_data].pos.z = 0;//camera->location.z;
/*
Render(rays[count_data],mesh.vertice, mesh.face, tree.leafNodeIndex, tree.insideNode, tree.leafNode,gpu_color);
pixelData[3*count_data] = gpu_color[0];
pixelData[3*count_data+1] = gpu_color[1];
pixelData[3*count_data+2] = gpu_color[2];
*/
count_data++;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(300, 30);
glutInitWindowSize(WIDTH, HEIGH);
glutCreateWindow( "kdtree");
glViewport(0, 0, 800, 800);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-200, 200, -200, 200, 1, 10000);
// gluPerspective (0, 800 /600.0, 10.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0,0,1000,0,0,0,0,1.0,0);
glutDisplayFunc(draw);
glutIdleFunc(draw);
glEnable(GL_DEPTH_TEST);
glutMouseFunc(processMouse);
glutMotionFunc(processMouseActiveMotion);
glutKeyboardFunc(keyboard);
glutMainLoop();
*/
Ray* gpu_rays;
unsigned int* gpu_face;
float* gpu_vertice;
int* gpu_leafNodeIndex;
KDLeafNode* gpu_leafNode;
KDInsideNode* gpu_insideNode;
GLubyte* gpu_color;
cudaMalloc((void**)&gpu_rays, sizeof( Ray )*(WIDTH*HEIGH));
cudaMalloc((void**)&gpu_face, sizeof( unsigned int )*(mesh.m_numFace)*3);
cudaMalloc((void**)&gpu_vertice, sizeof( float )*(mesh.m_numVertice)*3);
cudaMalloc((void**)&gpu_leafNodeIndex, sizeof( int )*(tree.SizeofLeafNode));
cudaMalloc((void**)&gpu_leafNode, sizeof( KDLeafNode )*(tree.numLeafNode));
cudaMalloc((void**)&gpu_insideNode, sizeof( KDInsideNode )*(tree.numInsideNode));
cudaMalloc((void**)&gpu_color, sizeof( GLubyte )*(WIDTH*HEIGH*3));
cudaMemcpy(gpu_rays, rays, sizeof(Ray)*WIDTH*HEIGH, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_face, mesh.face, sizeof( unsigned int )*(mesh.m_numFace)*3, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_vertice, mesh.vertice, sizeof( float )*(mesh.m_numVertice)*3, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_leafNodeIndex, tree.leafNodeIndex, sizeof( int )*(tree.SizeofLeafNode), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_leafNode, tree.leafNode, sizeof( KDLeafNode )*(tree.numLeafNode), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_insideNode, tree.insideNode, sizeof( KDInsideNode )*(tree.numInsideNode), cudaMemcpyHostToDevice);
int blocks = WIDTH*HEIGH/512;
if(WIDTH*HEIGH%512 !=0)
blocks++;
printf("\nblocks:%d\n",blocks);
Render<<<blocks,512>>>(gpu_rays,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color);
cutilDeviceSynchronize();
cudaMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(WIDTH*HEIGH*3), cudaMemcpyDeviceToHost);
//for(int i=0;i<WIDTH*HEIGH*3;i++)
// pixelData[i]=120;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(WIDTH, HEIGH);
glutCreateWindow("RayTracing");
glutDisplayFunc(&myDisplay);
glutMainLoop();
delete [] rays;
delete [] index;
free(pixelData);
cudaFree(gpu_rays);
cudaFree(gpu_vertice);
cudaFree(gpu_face);
cudaFree(gpu_leafNodeIndex);
cudaFree(gpu_insideNode);
cudaFree(gpu_leafNode);
cudaFree(gpu_color);
return 0;
}
void draw( void )
{
glPushMatrix();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor4f(1.0,1.0,1.0,1.0);
glRotatef(10.0f, 1.0f, 0.0f, 0.0f);
glRotatef(10.0f, 0.0f, 1.0f, 0.0f);
glRotatef( ( anger_x + anger_move_x ) * 90, 1.0f, 0.0f, 0.0f);
glRotatef( ( anger_y + anger_move_y ) * 90, 0.0f, 1.0f, 0.0f);
glTranslatef( ( shift_x + shift_move_x ) * 0.5, 0.0f, 0.0f);
glTranslatef( 0.0f, ( shift_y + shift_move_y ) * 0.5, 0.0f);
glScalef( scale, scale, scale );
drawGraph();
glPopMatrix();
glutSwapBuffers();
// glFlush();
}
void drawGraph()
{
KDNode* cur;
//KDLeafNode* cur;
//cur = &(tree.leafNode[index_node]);
cur = kdnode;
drawBox(cur->aabb.maxPoint,cur->aabb.minPoint);
glBegin(GL_LINES);
//glBegin(GL_LINE_LOOP);
for( int i=0;i<cur->numObject;i++)
{
int face = cur->object[i];
//int face = tree.leafNodeIndex[(cur->begin)+i];
glVertex3f(mesh.vertice[mesh.face[face*3]*3],mesh.vertice[mesh.face[face*3]*3+1],mesh.vertice[mesh.face[face*3]*3+2]);
glVertex3f(mesh.vertice[mesh.face[face*3+1]*3],mesh.vertice[mesh.face[face*3+1]*3+1],mesh.vertice[mesh.face[face*3+1]*3+2]);
glVertex3f(mesh.vertice[mesh.face[face*3+2]*3],mesh.vertice[mesh.face[face*3+2]*3+1],mesh.vertice[mesh.face[face*3+2]*3+2]);
}
glEnd();
}
void drawBox(Point& p1,Point& p2)
{
// glColor3f(1.0f,0.0f,0.0f);
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p1.x,p1.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p1.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p1.z);
glVertex3f(p2.x,p1.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p2.y,p1.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p2.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p2.z);
glVertex3f(p2.x,p2.y,p2.z);
glEnd();
///
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p2.z);
glVertex3f(p1.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p1.y,p2.z);
glVertex3f(p2.x,p1.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p1.z);
glVertex3f(p1.x,p2.y,p2.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p1.x,p2.y,p1.z);
glVertex3f(p2.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p1.z);
glVertex3f(p2.x,p2.y,p1.z);
glEnd();
glBegin(GL_LINES);
glVertex3f(p2.x,p1.y,p1.z);
glVertex3f(p2.x,p1.y,p2.z);
glEnd();
}
void keyboard(unsigned char key,int x, int y)
{
switch(key)
{
case 's':
scale +=0.5;
break;
case 'm':
scale -=0.5;
break;
case 'k':
index_node++;
kdnode = kdnode->left;
}
} |
a779db9b40f3795f4a0b878c26b13026090c8850.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
int size;
typedef struct
{
float** element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
hipError_t rc;
// allocate array for all the rows
rc = hipMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != hipSuccess)
{
fprintf(stdout, "CUDA error: %s\n", hipGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = hipMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != hipSuccess)
{
fprintf(stdout, "CUDA error: %s\n", hipGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
hipFree(m->element[i]);
hipFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
// all x -> y and vice versa
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
if (i >= size || j >= size)
return;
for(k = 0; k < size; k++)
{
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
hipError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stdout, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
int length = 32;
// Perform CUDA matrix multiplication
dim3 block(length, length); // a block of 32 x 32 CUDA threads
dim = (size % length == 0) ? size / length : size / length + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
hipLaunchKernelGGL(( mm_kernel), dim3(grid), dim3(block), 0, 0, a, b, result2, size);
hipDeviceSynchronize();
after = wall_clock_time();
fprintf(stdout, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// was there any error?
rc = hipGetLastError();
if (rc != hipSuccess)
printf("Last CUDA error %s\n", hipGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stdout,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
| a779db9b40f3795f4a0b878c26b13026090c8850.cu | /**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
int size;
typedef struct
{
float** element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
cudaError_t rc;
// allocate array for all the rows
rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != cudaSuccess)
{
fprintf(stdout, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != cudaSuccess)
{
fprintf(stdout, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
cudaFree(m->element[i]);
cudaFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
// all x -> y and vice versa
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
if (i >= size || j >= size)
return;
for(k = 0; k < size; k++)
{
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stdout, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
int length = 32;
// Perform CUDA matrix multiplication
dim3 block(length, length); // a block of 32 x 32 CUDA threads
dim = (size % length == 0) ? size / length : size / length + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
mm_kernel<<<grid, block>>>(a, b, result2, size);
cudaDeviceSynchronize();
after = wall_clock_time();
fprintf(stdout, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stdout,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
|
965a5c7e2413ccb179cbbe6e9b3d71f184f6b531.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 965a5c7e2413ccb179cbbe6e9b3d71f184f6b531.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
11c5fb411397f9d719fec444d7eccaac7f999487.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
__global__ void reset_phase(int *phase, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) {
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
phase[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] = -1;
}
}
}
__global__ void reset_phase_shell(int *phase_shell, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) {
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
phase_shell[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] = 1;
}
}
}
__global__ void reset_flag_u(int *flag_u, dom_struct *dom)
{
int i; // iterator
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) {
for(i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) {
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 1;
}
}
}
__global__ void reset_flag_v(int *flag_v, dom_struct *dom)
{
int j; // iterator
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tk < dom->Gfy._knb) && (ti < dom->Gfy._inb)) {
for(j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) {
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 1;
}
}
}
__global__ void reset_flag_w(int *flag_w, dom_struct *dom)
{
int k; // iterator
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) {
for(k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) {
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = 1;
}
}
}
__global__ void build_phase(int p, part_struct *parts, int *phase,
dom_struct *dom, real X, real Y, real Z,
int is, int ie, int js, int je, int ks, int ke)
{
real xx, yy, zz; // distance from cell center to particle center along
// Cartesian basis
real d; // distance form cell center to particle center
int C; // current cell
int cutoff; // cage cutoff constant
// update phase (use center of cell containing particle center)
int ti = blockDim.x*blockIdx.x + threadIdx.x + is;
int tj = blockDim.y*blockIdx.y + threadIdx.y + js;
int tk = blockDim.z*blockIdx.z + threadIdx.z + ks;
if((ti < ie) && (tj < je) && (tk < ke)) {
xx = (ti-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
C = ti + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
cutoff = (floor(d / (parts[p].r))) < 1;
phase[C] = cutoff*p + (1 - cutoff)*phase[C];
}
}
__global__ void build_cage(int p, part_struct *parts, int *phase,
int *phase_shell, dom_struct *dom, real Y, real Z,
int js, int je, int ks, int ke)
{
real xx, yy, zz; // distance from cell center to particle center along
// Cartesian basis
real d; // distance form cell center to particle center
int cutoff; // cage cutoff constant
real X; // particle center location
// update phase (use center of cell containing particle center)
int tj = blockDim.x*blockIdx.x + threadIdx.x + js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + ks;
if((tj < je) && (tk < ke)) {
X = parts[p].x;
if(parts[p].x < dom->xs) X = parts[p].x + dom->xl;
for(int i = parts[p].cage.is; i < parts[p].cage.ibs; i++) {
xx = (i-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
cutoff =
(1 - floor(d / (1.0*parts[p].r
- 0.50*(dom->dx + dom->dy + dom->dz)/3.)));
if((cutoff * (p+1) - 1) > -1)
phase[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = cutoff * (p + 1) - 1;
/*cutoff = (cutoff>0) &&
(ceil(d / (1.0*parts[p].r - 2.*(dom->dx + dom->dy + dom->dz)/3.))-1);
if((cutoff*(p+1)-1) > -1)
phase_shell[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = 1-cutoff;
*/
}
X = parts[p].x;
if(parts[p].x > dom->xe) X = parts[p].x - dom->xl;
for(int i = parts[p].cage.ibe; i < parts[p].cage.ie; i++) {
xx = (i-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
cutoff =
(1 - floor(d / (1.0*parts[p].r
- 0.50*(dom->dx + dom->dy + dom->dz)/3.)));
if((cutoff * (p+1) - 1) > -1)
phase[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = cutoff * (p + 1) - 1;
/*cutoff = (cutoff>0) &&
(ceil(d / (1.0*parts[p].r - 2.*(dom->dx + dom->dy + dom->dz)/3.))-1);
if((cutoff*(p+1)-1) > -1)
phase_shell[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = 1-cutoff;
*/
}
}
}
__global__ void cage_phases_periodic_x(int *phase_type, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gcc.jnb && tk < dom->Gcc.knb) {
phase_type[dom->Gcc.isb + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[(dom->Gcc.ie-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b];
phase_type[(dom->Gcc.ieb-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[dom->Gcc.is + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b];
}
}
__global__ void cage_phases_periodic_y(int *phase_type, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gcc.knb && ti < dom->Gcc.inb) {
phase_type[ti + dom->Gcc.jsb*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[ti + (dom->Gcc.je-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b];
phase_type[ti + (dom->Gcc.jeb-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[ti + dom->Gcc.js*dom->Gcc.s1b + tk*dom->Gcc.s2b];
}
}
__global__ void cage_phases_periodic_z(int *phase_type, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gcc.inb && tj < dom->Gcc.jnb) {
phase_type[ti + tj*dom->Gcc.s1b + dom->Gcc.ksb*dom->Gcc.s2b]
= phase_type[ti + tj*dom->Gcc.s1b + (dom->Gcc.ke-1)*dom->Gcc.s2b];
phase_type[ti + tj*dom->Gcc.s1b + (dom->Gcc.keb-1)*dom->Gcc.s2b]
= phase_type[ti + tj*dom->Gcc.s1b + dom->Gcc.ks*dom->Gcc.s2b];
}
}
__global__ void phase_shell_x(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int i; // iterator
int W, E; // flag locations
int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tj < dom->Gcc.je) && (tk < dom->Gcc.ke)) {
for(i = dom->Gcc.is; i <= dom->Gcc.ie; i++) {
W = (i-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
E = i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[E] = phase_shell[E]*(1 - (phase[W] < 0 && phase[E] > -1));
// if phase changes from solid to fluid
phase_shell[W] = phase_shell[W]*(1 - (phase[W] > -1 && phase[E] < 0));
}
}
}
__global__ void phase_shell_y(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int j; // iterator
int N, S; // flag locations
int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tk < dom->Gcc.ke) && (ti < dom->Gcc.ie)) {
for(j = dom->Gcc.js; j <= dom->Gcc.je; j++) {
S = ti + (j-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b;
N = ti + j*dom->Gcc.s1b + tk*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[N] = phase_shell[N]*(1 - (phase[S] < 0 && phase[N] > -1));
// if phase changes from solid to fluid
phase_shell[S] = phase_shell[S]*(1 - (phase[S] > -1 && phase[N] < 0));
}
}
}
__global__ void phase_shell_z(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int k; // iterator
int B, T; // flag locations
int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((ti < dom->Gcc.ie) && (tj < dom->Gcc.je)) {
for(k = dom->Gcc.ks; k <= dom->Gcc.ke; k++) {
B = ti + tj*dom->Gcc.s1b + (k-1)*dom->Gcc.s2b;
T = ti + tj*dom->Gcc.s1b + k*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[T] = phase_shell[T]*(1 - (phase[B] < 0 && phase[T] > -1));
// if phase changes from solid to fluid
phase_shell[B] = phase_shell[B]*(1 - (phase[B] > -1 && phase[T] < 0));
}
}
}
__global__ void cage_flag_u_periodic_x(int *flag_u, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfx.jnb && tk < dom->Gfx.knb) {
flag_u[dom->Gfx.isb + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[(dom->Gfx.ie-2) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b];
flag_u[(dom->Gfx.ieb-1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[(dom->Gfx.is+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b];
}
}
__global__ void cage_flag_u_periodic_y(int *flag_u, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfx.knb && ti < dom->Gfx.inb) {
flag_u[ti + dom->Gfx.jsb*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[ti + (dom->Gfx.je-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b];
flag_u[ti + (dom->Gfx.jeb-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[ti + dom->Gfx.js*dom->Gfx.s1b + tk*dom->Gfx.s2b];
}
}
__global__ void cage_flag_u_periodic_z(int *flag_u, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfx.inb && tj < dom->Gfx.jnb) {
flag_u[ti + tj*dom->Gfx.s1b + dom->Gfx.ksb*dom->Gfx.s2b]
= flag_u[ti + tj*dom->Gfx.s1b + (dom->Gfx.ke-1)*dom->Gfx.s2b];
flag_u[ti + tj*dom->Gfx.s1b + (dom->Gfx.keb-1)*dom->Gfx.s2b]
= flag_u[ti + tj*dom->Gfx.s1b + dom->Gfx.ks*dom->Gfx.s2b];
}
}
__global__ void cage_flag_v_periodic_x(int *flag_v, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfy.jnb && tk < dom->Gfy.knb) {
flag_v[dom->Gfy.isb + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[(dom->Gfy.ie-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b];
flag_v[(dom->Gfy.ieb-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[dom->Gfy.is + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b];
}
}
__global__ void cage_flag_v_periodic_y(int *flag_v, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfy.knb && ti < dom->Gfy.inb) {
flag_v[ti + dom->Gfy.jsb*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[ti + (dom->Gfy.je-2)*dom->Gfy.s1b + tk*dom->Gfy.s2b];
flag_v[ti + (dom->Gfy.jeb-1)*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[ti + (dom->Gfy.js+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b];
}
}
__global__ void cage_flag_v_periodic_z(int *flag_v, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfy.inb && tj < dom->Gfy.jnb) {
flag_v[ti + tj*dom->Gfy.s1b + dom->Gfy.ksb*dom->Gfy.s2b]
= flag_v[ti + tj*dom->Gfy.s1b + (dom->Gfy.ke-1)*dom->Gfy.s2b];
flag_v[ti + tj*dom->Gfy.s1b + (dom->Gfy.keb-1)*dom->Gfy.s2b]
= flag_v[ti + tj*dom->Gfy.s1b + dom->Gfy.ks*dom->Gfy.s2b];
}
}
__global__ void cage_flag_w_periodic_x(int *flag_w, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfz.jnb && tk < dom->Gfz.knb) {
flag_w[dom->Gfz.isb + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[(dom->Gfz.ie-1)+ tj*dom->Gfz.s1b + tk*dom->Gfz.s2b];
flag_w[(dom->Gfz.ieb-1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[dom->Gfz.is + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b];
}
}
__global__ void cage_flag_w_periodic_y(int *flag_w, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfz.knb && ti < dom->Gfz.inb) {
flag_w[ti + dom->Gfz.jsb*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[ti + (dom->Gfz.je-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b];
flag_w[ti + (dom->Gfz.jeb-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[ti + dom->Gfz.js*dom->Gfz.s1b + tk*dom->Gfz.s2b];
}
}
__global__ void cage_flag_w_periodic_z(int *flag_w, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfz.inb && tj < dom->Gfz.jnb) {
flag_w[ti + tj*dom->Gfz.s1b + dom->Gfz.ksb*dom->Gfz.s2b]
= flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.ke-2)*dom->Gfz.s2b];
flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.keb-1)*dom->Gfz.s2b]
= flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.ks+1)*dom->Gfz.s2b];
}
}
__global__ void cage_flag_u(int *flag_u, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int i; // iterator
int W, E; // flag locations
int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tj < dom->Gcc.je) && (tk < dom->Gcc.ke)) {
for(i = dom->Gcc.is; i <= dom->Gcc.ie; i++) {
W = (i-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
E = i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
#ifdef STEPS
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] =
1 - ((phase[W] < 0 && phase[E] > -1)
|| (phase[W] > -1 && phase[E] < 0)
|| ((phase_shell[W] < 1 && phase_shell[E] < 1)));
#else
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] =
1 - 2*((phase[W] < 0 && phase[E] > -1)
|| (phase[W] > -1 && phase[E] < 0)
|| ((phase_shell[W] < 1 && phase_shell[E] < 1)));
#endif
}
}
}
__global__ void cage_flag_v(int *flag_v, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int j; // iterator
int S, N; // flag locations
int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tk < dom->Gcc.ke) && (ti < dom->Gcc.ie)) {
for(j = dom->Gcc.js; j <= dom->Gcc.je; j++) {
S = ti + (j-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b;
N = ti + j*dom->Gcc.s1b + tk*dom->Gcc.s2b;
#ifdef STEPS
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] =
1 - ((phase[S] < 0 && phase[N] > -1)
|| (phase[S] > -1 && phase[N] < 0)
|| ((phase_shell[S] < 1 && phase_shell[N] < 1)));
#else
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] =
1 - 2*((phase[S] < 0 && phase[N] > -1)
|| (phase[S] > -1 && phase[N] < 0)
|| ((phase_shell[S] < 1 && phase_shell[N] < 1)));
#endif
}
}
}
__global__ void cage_flag_w(int *flag_w, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int k; // iterator
int B, T; // flag locations
int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((ti < dom->Gcc.ie) && (tj < dom->Gcc.je)) {
for(k = dom->Gcc.ks; k <= dom->Gcc.ke; k++) {
B = ti + tj*dom->Gcc.s1b + (k-1)*dom->Gcc.s2b;
T = ti + tj*dom->Gcc.s1b + k*dom->Gcc.s2b;
#ifdef STEPS
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] =
1 - ((phase[B] < 0 && phase[T] > -1)
|| (phase[B] > -1 && phase[T] < 0)
|| ((phase_shell[B] < 1 && phase_shell[T] < 1)));
#else
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] =
1 - 2*((phase[B] < 0 && phase[T] > -1)
|| (phase[B] > -1 && phase[T] < 0)
|| ((phase_shell[B] < 1 && phase_shell[T] < 1)));
#endif
}
}
}
__global__ void flag_external_u(int *flag_u, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) {
flag_u[dom->Gfx._is + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0;
flag_u[dom->Gfx._ie-1 + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0;
}
}
__global__ void flag_external_v(int *flag_v, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if((tk < dom->Gfy._knb) && (ti < dom->Gfy._inb)) {
flag_v[ti + dom->Gfy._js*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0;
flag_v[ti + (dom->Gfy._je-1)*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0;
}
}
__global__ void flag_external_w(int *flag_w, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) {
flag_w[ti + tj*dom->Gfz._s1b + dom->Gfz._ks*dom->Gfz._s2b] = 0;
flag_w[ti + tj*dom->Gfz._s1b + (dom->Gfz._ke-1)*dom->Gfz._s2b] = 0;
}
}
__global__ void part_BC_u(real *u, int *phase, int *flag_u,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfx._jsb;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfx._ksb;
int C, CW, CE;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PW, PE; // particle number
real a; // particle radius
int order; // particle order
real oy, oz; // particle angular velocity
real oydot, ozdot; // particle angular acceleration
real uu; // particle velocity
if(tj < dom->Gfx._jeb && tk < dom->Gfx._keb) {
for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) {
C = i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b;
CW = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
CE = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
PW = phase[CW];
PE = phase[CE];
if(PW > -1) {
P = PW;
PP = PW;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oy = parts[P].oy;
oz = parts[P].oz;
oydot = parts[P].oydot;
ozdot = parts[P].ozdot;
uu = parts[P].u;
} else if(PE > -1) {
P = PE;
PP = PE;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oy = parts[P].oy;
oz = parts[P].oz;
oydot = parts[P].oydot;
ozdot = parts[P].ozdot;
uu = parts[P].u;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-DOM_BUF) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
oy = 0;
oz = 0;
oydot = 0;
ozdot = 0;
uu = 0;
}
x = (i-DOM_BUF) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dz) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_u[C] < 1) && (PP > -1);
u[C] = - (check - 1) * u[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
real ocrossr_x = oy*z - oz*y;
real odotcrossr_x = oydot*z - ozdot*y;
Ux += uu + ocrossr_x;
Ux += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_x;
// boolean check if this is an analytically-posed node
int check = (flag_u[C] < 1) && (PP > -1);
u[C] = check * Ux + (1 - check) * u[C];
#endif
}
}
}
__global__ void part_BC_v(real *v, int *phase, int *flag_v,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfy._ksb;
int ti = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfy._isb;
int C, CS, CN;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PS, PN; // particle number
real a; // particle radius
int order; // particle order
real oz, ox; // particle angular velocity
real ozdot, oxdot; // particle angular acceleration
real vv; // particle velocity
if(tk < dom->Gfy._keb && ti < dom->Gfy._ieb) {
for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) {
C = ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b;
CS = ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b;
CN = ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
PS = phase[CS];
PN = phase[CN];
if(PS > -1) {
P = PS;
PP = PS;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oz = parts[P].oz;
ox = parts[P].ox;
ozdot = parts[P].ozdot;
oxdot = parts[P].oxdot;
vv = parts[P].v;
} else if(PN > -1) {
P = PN;
PP = PN;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oz = parts[P].oz;
ox = parts[P].ox;
ozdot = parts[P].ozdot;
oxdot = parts[P].oxdot;
vv = parts[P].v;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (ti-0.5) * dom->dx + dom->xs + a;
Y = (j-DOM_BUF) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
oz = 0;
ox = 0;
ozdot = 0;
oxdot = 0;
vv = 0;
}
x = (ti-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (j-DOM_BUF) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dx) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_v[C] < 1) && (PP > -1);
v[C] = - (check - 1) * v[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
// switch reference frame and set boundary condition
real ocrossr_y = -(ox*z - oz*x);
real odotcrossr_y = -(oxdot*z - ozdot*x);
Uy += vv + ocrossr_y;
Uy += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_y;
// boolean check if this is an analytically-posed node
int check = (flag_v[C] < 1) && (PP > -1);
v[C] = check * Uy + (1 - check) * v[C];
#endif
}
}
}
__global__ void part_BC_w(real *w, int *phase, int *flag_w,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfz._isb;
int tj = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfz._jsb;
int C, CB, CT;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PB, PT; // particle number
real a; // particle radius
int order; // particle order
real ox, oy; // particle angular velocity
real oxdot, oydot; // particle angular acceleration
real ww; // particle velocity
if(ti < dom->Gfz._ieb && tj < dom->Gfz._jeb) {
for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) {
C = ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b;
CB = ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b;
CT = ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b;
// get particle number
PB = phase[CB];
PT = phase[CT];
if(PB > -1) {
P = PB;
PP = PB;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oxdot = parts[P].oxdot;
oydot = parts[P].oydot;
ww = parts[P].w;
} else if(PT > -1) {
P = PT;
PP = PT;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oxdot = parts[P].oxdot;
oydot = parts[P].oydot;
ww = parts[P].w;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (ti-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (k-DOM_BUF) * dom->dz + dom->zs + a;
order = 0;
ox = 0;
oy = 0;
oxdot = 0;
oydot = 0;
ww = 0;
}
x = (ti-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (k-DOM_BUF) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dz) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_w[C] < 1) && (PP > -1);
w[C] = - (check - 1) * w[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
// switch reference frame and set boundary condition
real ocrossr_z = ox*y - oy*x;
real odotcrossr_z = oxdot*y - oydot*x;
Uz += ww + ocrossr_z;
Uz += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_z;
// boolean check if this is an analytically-posed node
int check = (flag_w[C] < 1) && (PP > -1);
w[C] = check * Uz + (1 - check) * w[C];
#endif
}
}
}
__global__ void part_BC_p(real *p, real *p_rhs, int *phase, int *phase_shell,
part_struct *parts, dom_struct *dom,
real mu, real nu, real dt, real dt0, gradP_struct gradP, real rho_f, int stride,
real *pnm_re00, real *pnm_im00, real *phinm_re00, real *phinm_im00,
real *chinm_re00, real *chinm_im00,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gcc._js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gcc._ks;
int C, CC;
real x, y, z; // pressure node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real pp_tmp;//, pp_tmp00;// temporary pressure
int P; // particle number
real a; // particle radius
int order; // particle order
real ox, oy, oz; // particle angular velocity
real udot, vdot, wdot;// particle acceleration
if(tj < dom->Gcc._je && tk < dom->Gcc._ke) {
for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) {
CC = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
C = (i-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc._s1 + (tk-DOM_BUF)*dom->Gcc._s2;
// get particle number
P = phase[CC];
if(P > -1) {
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oz = parts[P].oz;
udot = parts[P].udot;
vdot = parts[P].vdot;
wdot = parts[P].wdot;
} else {
P = 0;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
ox = 0;
oy = 0;
oz = 0;
udot = 0;
vdot = 0;
wdot = 0;
}
x = (i-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs) x += dom->xl;
if(x > dom->xe) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys) y += dom->yl;
if(y > dom->ye) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs) z += dom->zl;
if(z > dom->ze) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
p_rhs[C] = phase_shell[CC] * p_rhs[C];
#else
real ar = a / r;
real ra = r / a;
pp_tmp = X_pn(0, theta, phi, pnm_re, pnm_im, P, stride);
//pp_tmp00 = X_pn(0, theta, phi, pnm_re00, pnm_im00, P, stride);
for(int n = 1; n <= order; n++) {
pp_tmp += (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* X_pn(n, theta, phi, pnm_re, pnm_im, P, stride);
//pp_tmp00 += (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
//* X_pn(n, theta, phi, pnm_re00, pnm_im00, P, stride);
pp_tmp -= n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* X_phin(n, theta, phi, phinm_re, phinm_im, P, stride);
//pp_tmp00 -= n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
//* X_phin(n, theta, phi, phinm_re00, phinm_im00, P, stride);
}
pp_tmp *= mu*nu/(a*a);
//pp_tmp00 *= mu*nu/(a*a);
real ocrossr2 = (oy*z - oz*y) * (oy*z - oz*y);
ocrossr2 += (ox*z - oz*x) * (ox*z - oz*x);
ocrossr2 += (ox*y - oy*x) * (ox*y - oy*x);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*x + (-gradP.y/rhoV - vdot)*y
+ (-gradP.z/rhoV - wdot)*z;
pp_tmp += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
//pp_tmp00 += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// write BC if flagged, otherwise leave alone
p_rhs[C] = (real) phase_shell[CC] * p_rhs[C]
+ (real) (1 - phase_shell[CC])
* ((pp_tmp - p[CC]) + 0.5*mu*p_rhs[C]);
p_rhs[C] = (real) (phase[CC] < 0 && phase_shell[CC]) * p_rhs[C];
#endif
}
}
}
__global__ void part_BC_p_fill(real *p, int *phase,
part_struct *parts, dom_struct *dom,
real mu, real nu, real rho_f, gradP_struct gradP, int stride,
real *pnm_re, real *pnm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gcc._js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gcc._ks;
int CC;
real x, y, z; // pressure node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real pp_tmp;//, pp_tmp00;// temporary pressure
int P; // particle number
real a; // particle radius
real ox, oy, oz; // particle angular velocity
real udot, vdot, wdot;// particle acceleration
if(tj < dom->Gcc._je && tk < dom->Gcc._ke) {
for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) {
CC = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
P = phase[CC];
if(P > -1) {
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
ox = parts[P].ox;
oy = parts[P].oy;
oz = parts[P].oz;
udot = parts[P].udot;
vdot = parts[P].vdot;
wdot = parts[P].wdot;
} else {
P = 0;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
ox = 0;
oy = 0;
oz = 0;
udot = 0;
vdot = 0;
wdot = 0;
}
x = (i-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs) x += dom->xl;
if(x > dom->xe) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys) y += dom->yl;
if(y > dom->ye) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs) z += dom->zl;
if(z > dom->ze) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
p[CC] = phase_shell[CC] * p[CC];
#else
pp_tmp = X_pn(0, theta, phi, pnm_re, pnm_im, P, stride);
pp_tmp *= mu*nu/(a*a);
real ocrossr2 = (oy*z - oz*y) * (oy*z - oz*y);
ocrossr2 += (ox*z - oz*x) * (ox*z - oz*x);
ocrossr2 += (ox*y - oy*x) * (ox*y - oy*x);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*x + (-gradP.y/rhoV - vdot)*y
+ (-gradP.z/rhoV - wdot)*z;
pp_tmp += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// write BC if inside particle, otherwise leave alone
p[CC] = (real) (phase[CC] > -1) * pp_tmp
+ (1 - (phase[CC] > -1)) * p[CC];
#endif
}
}
}
__device__ real Nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real Pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return 0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return 0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return 0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return 0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return 0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__device__ void xyz2rtp(real x, real y, real z, real *r, real *theta, real *phi)
{
real XY = x*x + y*y;
real XYZ = XY + z*z;
// We calculate the coefficients everywhere in space. If a particle is
// centered at the center of a cell, XYZ will be zero. We'll set it equal
// to one since these values aren't ever used.
if(XYZ >= 0 && XYZ < DIV_ST) XYZ = 1;//DIV_ST;
else if(XYZ < 0 && XYZ > -DIV_ST) XYZ = 1;//-DIV_ST;
*r = sqrt(XYZ);
*theta = acos(z / *r);
// Note that XY cannot be set equal to one, because the values are used.
if(XY >= 0 && XY < DIV_ST) XY = DIV_ST;
else if(XY < 0 && XY > -DIV_ST) XY = -DIV_ST;
*phi = acos(x / sqrt(XY));
if(y < 0.) *phi = 2.*PI - *phi;
}
__device__ real X_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)*Pnm(n,m,theta)*pnm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)*Pnm(n,m,theta)
*(pnm_re[coeff]*cos(m*phi)
- pnm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real X_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)*Pnm(n,m,theta)*phinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)*Pnm(n,m,theta)
*(phinm_re[coeff]*cos(m*phi)
- phinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*pnm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(pnm_re[coeff]*cos(m*phi)
- pnm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*phinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(phinm_re[coeff]*cos(m*phi)
- phinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_chin(int n, real theta, real phi,
real *chinm_re, real *chinm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*chinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(chinm_re[coeff]*cos(m*phi)
- chinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Z_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(pnm_re[coeff]*sin(m*phi)
+ pnm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ real Z_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(phinm_re[coeff]*sin(m*phi)
+ phinm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ real Z_chin(int n, real theta, real phi,
real *chinm_re, real *chinm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(chinm_re[coeff]*sin(m*phi)
+ chinm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ void lamb_vel(int order, real a, real r, real theta, real phi,
real nu, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
int p_ind, int stride, real *Ux, real *Uy, real *Uz)
{
real ar = a / r;
real ra = r / a;
real ur = 0.;
real ut = 0.5*ra*Y_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
real up = 0.5*ra*Z_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
for(int n = 1; n <= order; n++) {
real powranp1 = pow(ra,n+1.);
real powranm1 = pow(ra,n-1.);
real powarnp2 = pow(ar,n+2.);
real powarnp1 = pow(ar,n+1.);
real powarn = pow(ar,n);
real od2np3 = 1./(2.*n+3.);
real odnp1 = 1./(n+1.);
ur += (0.5*n*od2np3*powranp1
+ 0.25*n*((2.*n+1.)*od2np3*ar*ar-1.)*powarn)
* X_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
ur += (n*powranm1
+ 0.5*n*(2.*n-1.-(2.*n+1.)*ra*ra)*pow(ar,n+2.))
* X_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
ut += (0.5*(n+3.)*odnp1*od2np3*powranp1
+ 0.25*odnp1*(n-2.-n*(2.*n+1.)*od2np3*ar*ar)*powarn)
* Y_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
ut += (powranm1
+ 0.5*odnp1*((n-2.)*(2.*n+1.)*ra*ra-n*(2.*n-1.))*powarnp2)
* Y_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
ut += (powranm1
- powarnp1)
* Z_chin(n, theta, phi, chinm_re, chinm_im, p_ind, stride);
up += (0.5*(n+3.)*odnp1*od2np3*powranp1
+ 0.25*odnp1*(n-2.-n*(2.*n+1.)*od2np3*ar*ar)*powarn)
* Z_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
up += (powranm1
+ 0.5*odnp1*((n-2.)*(2.*n+1.)*ra*ra-n*(2.*n-1.))*powarnp2)
* Z_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
up += (-powranm1
+ powarnp1)
* Y_chin(n, theta, phi, chinm_re, chinm_im, p_ind, stride);
}
ur *= nu / a;
ut *= nu / a;
up *= nu / a;
*Ux = ur*sin(theta)*cos(phi)+ut*cos(theta)*cos(phi)-up*sin(phi);
*Uy = ur*sin(theta)*sin(phi)+ut*cos(theta)*sin(phi)+up*cos(phi);
*Uz = ur*cos(theta)-ut*sin(theta);
}
__device__ void lamb_gradP(int order, real a, real r, real theta, real phi,
real mu, real nu, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
int p_ind, int stride, real *gPx, real *gPy, real *gPz)
{
real ar = a / r;
real ra = r / a;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
real pr = 0;
real pt = 1./r * Y_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
real pp = 1./r * Z_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
for(int n = 1; n <= order; n++) {
pr += n * (ar + 0.5*(2.*n-1.)*pow(ar,2.*n+2.))*pow(ra,n)
* X_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pr += n * (2.*n-1.)*(2.*n+1.)*pow(ar,n+2.)
* X_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
pt += (1. - 0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* Y_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pt += -n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* Y_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
pp += (1. - 0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* Z_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pp += -n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* Z_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
}
pr *= mu * nu / (a*a*a);
pt *= mu * nu / (a*a);
pp *= mu * nu / (a*a);
*gPx = pr*sin(theta)*cos(phi)+pt*cos(theta)*cos(phi)/r-pp*sin(phi)/r/st;
*gPy = pr*sin(theta)*sin(phi)+pt*cos(theta)*sin(phi)/r+pp*cos(phi)/r/st;
*gPz = pr*cos(theta)-pt*sin(theta)/r;
}
__global__ void predict_coeffs(real dt0, real dt,
real *pnm_re00, real *pnm_im00, real *phinm_re00, real *phinm_im00,
real *chinm_re00, real *chinm_im00,
real *pnm_re0, real *pnm_im0, real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im, int stride)
{
int coeff = threadIdx.x;
int part = blockIdx.x;
// extrapolate coefficients
int ti = stride*part + coeff;
real a = dt/dt0;
// store last result
/*pnm_re0[ti] = pnm_re[ti];
pnm_im0[ti] = pnm_im[ti];
phinm_re0[ti] = phinm_re[ti];
phinm_im0[ti] = phinm_im[ti];
chinm_re0[ti] = chinm_re[ti];
chinm_im0[ti] = chinm_im[ti];
*/
// predict starting point for iterations at the next timestep
pnm_re[ti] = pnm_re[ti]*(1. + a) - pnm_re00[ti]*a;
pnm_im[ti] = pnm_im[ti]*(1. + a) - pnm_im00[ti]*a;
phinm_re[ti] = phinm_re[ti]*(1. + a) - phinm_re00[ti]*a;
phinm_im[ti] = phinm_im[ti]*(1. + a) - phinm_im00[ti]*a;
chinm_re[ti] = chinm_re[ti]*(1. + a) - chinm_re00[ti]*a;
chinm_im[ti] = chinm_im[ti]*(1. + a) - chinm_im00[ti]*a;
}
| 11c5fb411397f9d719fec444d7eccaac7f999487.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
__global__ void reset_phase(int *phase, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) {
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
phase[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] = -1;
}
}
}
__global__ void reset_phase_shell(int *phase_shell, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) {
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
phase_shell[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] = 1;
}
}
}
__global__ void reset_flag_u(int *flag_u, dom_struct *dom)
{
int i; // iterator
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) {
for(i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) {
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 1;
}
}
}
__global__ void reset_flag_v(int *flag_v, dom_struct *dom)
{
int j; // iterator
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((tk < dom->Gfy._knb) && (ti < dom->Gfy._inb)) {
for(j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) {
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 1;
}
}
}
__global__ void reset_flag_w(int *flag_w, dom_struct *dom)
{
int k; // iterator
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
// flag everything as fluid
if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) {
for(k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) {
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = 1;
}
}
}
__global__ void build_phase(int p, part_struct *parts, int *phase,
dom_struct *dom, real X, real Y, real Z,
int is, int ie, int js, int je, int ks, int ke)
{
real xx, yy, zz; // distance from cell center to particle center along
// Cartesian basis
real d; // distance form cell center to particle center
int C; // current cell
int cutoff; // cage cutoff constant
// update phase (use center of cell containing particle center)
int ti = blockDim.x*blockIdx.x + threadIdx.x + is;
int tj = blockDim.y*blockIdx.y + threadIdx.y + js;
int tk = blockDim.z*blockIdx.z + threadIdx.z + ks;
if((ti < ie) && (tj < je) && (tk < ke)) {
xx = (ti-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
C = ti + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
cutoff = (floor(d / (parts[p].r))) < 1;
phase[C] = cutoff*p + (1 - cutoff)*phase[C];
}
}
__global__ void build_cage(int p, part_struct *parts, int *phase,
int *phase_shell, dom_struct *dom, real Y, real Z,
int js, int je, int ks, int ke)
{
real xx, yy, zz; // distance from cell center to particle center along
// Cartesian basis
real d; // distance form cell center to particle center
int cutoff; // cage cutoff constant
real X; // particle center location
// update phase (use center of cell containing particle center)
int tj = blockDim.x*blockIdx.x + threadIdx.x + js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + ks;
if((tj < je) && (tk < ke)) {
X = parts[p].x;
if(parts[p].x < dom->xs) X = parts[p].x + dom->xl;
for(int i = parts[p].cage.is; i < parts[p].cage.ibs; i++) {
xx = (i-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
cutoff =
(1 - floor(d / (1.0*parts[p].r
- 0.50*(dom->dx + dom->dy + dom->dz)/3.)));
if((cutoff * (p+1) - 1) > -1)
phase[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = cutoff * (p + 1) - 1;
/*cutoff = (cutoff>0) &&
(ceil(d / (1.0*parts[p].r - 2.*(dom->dx + dom->dy + dom->dz)/3.))-1);
if((cutoff*(p+1)-1) > -1)
phase_shell[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = 1-cutoff;
*/
}
X = parts[p].x;
if(parts[p].x > dom->xe) X = parts[p].x - dom->xl;
for(int i = parts[p].cage.ibe; i < parts[p].cage.ie; i++) {
xx = (i-0.5)*dom->dx - (X-dom->xs);
yy = (tj-0.5)*dom->dy - (Y-dom->ys);
zz = (tk-0.5)*dom->dz - (Z-dom->zs);
d = sqrt(xx * xx + yy * yy + zz * zz);
cutoff =
(1 - floor(d / (1.0*parts[p].r
- 0.50*(dom->dx + dom->dy + dom->dz)/3.)));
if((cutoff * (p+1) - 1) > -1)
phase[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = cutoff * (p + 1) - 1;
/*cutoff = (cutoff>0) &&
(ceil(d / (1.0*parts[p].r - 2.*(dom->dx + dom->dy + dom->dz)/3.))-1);
if((cutoff*(p+1)-1) > -1)
phase_shell[i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b] = 1-cutoff;
*/
}
}
}
__global__ void cage_phases_periodic_x(int *phase_type, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gcc.jnb && tk < dom->Gcc.knb) {
phase_type[dom->Gcc.isb + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[(dom->Gcc.ie-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b];
phase_type[(dom->Gcc.ieb-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[dom->Gcc.is + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b];
}
}
__global__ void cage_phases_periodic_y(int *phase_type, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gcc.knb && ti < dom->Gcc.inb) {
phase_type[ti + dom->Gcc.jsb*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[ti + (dom->Gcc.je-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b];
phase_type[ti + (dom->Gcc.jeb-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b]
= phase_type[ti + dom->Gcc.js*dom->Gcc.s1b + tk*dom->Gcc.s2b];
}
}
__global__ void cage_phases_periodic_z(int *phase_type, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gcc.inb && tj < dom->Gcc.jnb) {
phase_type[ti + tj*dom->Gcc.s1b + dom->Gcc.ksb*dom->Gcc.s2b]
= phase_type[ti + tj*dom->Gcc.s1b + (dom->Gcc.ke-1)*dom->Gcc.s2b];
phase_type[ti + tj*dom->Gcc.s1b + (dom->Gcc.keb-1)*dom->Gcc.s2b]
= phase_type[ti + tj*dom->Gcc.s1b + dom->Gcc.ks*dom->Gcc.s2b];
}
}
__global__ void phase_shell_x(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int i; // iterator
int W, E; // flag locations
int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tj < dom->Gcc.je) && (tk < dom->Gcc.ke)) {
for(i = dom->Gcc.is; i <= dom->Gcc.ie; i++) {
W = (i-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
E = i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[E] = phase_shell[E]*(1 - (phase[W] < 0 && phase[E] > -1));
// if phase changes from solid to fluid
phase_shell[W] = phase_shell[W]*(1 - (phase[W] > -1 && phase[E] < 0));
}
}
}
__global__ void phase_shell_y(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int j; // iterator
int N, S; // flag locations
int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tk < dom->Gcc.ke) && (ti < dom->Gcc.ie)) {
for(j = dom->Gcc.js; j <= dom->Gcc.je; j++) {
S = ti + (j-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b;
N = ti + j*dom->Gcc.s1b + tk*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[N] = phase_shell[N]*(1 - (phase[S] < 0 && phase[N] > -1));
// if phase changes from solid to fluid
phase_shell[S] = phase_shell[S]*(1 - (phase[S] > -1 && phase[N] < 0));
}
}
}
__global__ void phase_shell_z(part_struct *parts,
dom_struct *dom, int *phase, int *phase_shell)
{
int k; // iterator
int B, T; // flag locations
int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((ti < dom->Gcc.ie) && (tj < dom->Gcc.je)) {
for(k = dom->Gcc.ks; k <= dom->Gcc.ke; k++) {
B = ti + tj*dom->Gcc.s1b + (k-1)*dom->Gcc.s2b;
T = ti + tj*dom->Gcc.s1b + k*dom->Gcc.s2b;
// if phase changes from fluid to solid
phase_shell[T] = phase_shell[T]*(1 - (phase[B] < 0 && phase[T] > -1));
// if phase changes from solid to fluid
phase_shell[B] = phase_shell[B]*(1 - (phase[B] > -1 && phase[T] < 0));
}
}
}
__global__ void cage_flag_u_periodic_x(int *flag_u, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfx.jnb && tk < dom->Gfx.knb) {
flag_u[dom->Gfx.isb + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[(dom->Gfx.ie-2) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b];
flag_u[(dom->Gfx.ieb-1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[(dom->Gfx.is+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b];
}
}
__global__ void cage_flag_u_periodic_y(int *flag_u, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfx.knb && ti < dom->Gfx.inb) {
flag_u[ti + dom->Gfx.jsb*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[ti + (dom->Gfx.je-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b];
flag_u[ti + (dom->Gfx.jeb-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b]
= flag_u[ti + dom->Gfx.js*dom->Gfx.s1b + tk*dom->Gfx.s2b];
}
}
__global__ void cage_flag_u_periodic_z(int *flag_u, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfx.inb && tj < dom->Gfx.jnb) {
flag_u[ti + tj*dom->Gfx.s1b + dom->Gfx.ksb*dom->Gfx.s2b]
= flag_u[ti + tj*dom->Gfx.s1b + (dom->Gfx.ke-1)*dom->Gfx.s2b];
flag_u[ti + tj*dom->Gfx.s1b + (dom->Gfx.keb-1)*dom->Gfx.s2b]
= flag_u[ti + tj*dom->Gfx.s1b + dom->Gfx.ks*dom->Gfx.s2b];
}
}
__global__ void cage_flag_v_periodic_x(int *flag_v, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfy.jnb && tk < dom->Gfy.knb) {
flag_v[dom->Gfy.isb + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[(dom->Gfy.ie-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b];
flag_v[(dom->Gfy.ieb-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[dom->Gfy.is + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b];
}
}
__global__ void cage_flag_v_periodic_y(int *flag_v, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfy.knb && ti < dom->Gfy.inb) {
flag_v[ti + dom->Gfy.jsb*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[ti + (dom->Gfy.je-2)*dom->Gfy.s1b + tk*dom->Gfy.s2b];
flag_v[ti + (dom->Gfy.jeb-1)*dom->Gfy.s1b + tk*dom->Gfy.s2b]
= flag_v[ti + (dom->Gfy.js+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b];
}
}
__global__ void cage_flag_v_periodic_z(int *flag_v, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfy.inb && tj < dom->Gfy.jnb) {
flag_v[ti + tj*dom->Gfy.s1b + dom->Gfy.ksb*dom->Gfy.s2b]
= flag_v[ti + tj*dom->Gfy.s1b + (dom->Gfy.ke-1)*dom->Gfy.s2b];
flag_v[ti + tj*dom->Gfy.s1b + (dom->Gfy.keb-1)*dom->Gfy.s2b]
= flag_v[ti + tj*dom->Gfy.s1b + dom->Gfy.ks*dom->Gfy.s2b];
}
}
__global__ void cage_flag_w_periodic_x(int *flag_w, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if(tj < dom->Gfz.jnb && tk < dom->Gfz.knb) {
flag_w[dom->Gfz.isb + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[(dom->Gfz.ie-1)+ tj*dom->Gfz.s1b + tk*dom->Gfz.s2b];
flag_w[(dom->Gfz.ieb-1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[dom->Gfz.is + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b];
}
}
__global__ void cage_flag_w_periodic_y(int *flag_w, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if(tk < dom->Gfz.knb && ti < dom->Gfz.inb) {
flag_w[ti + dom->Gfz.jsb*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[ti + (dom->Gfz.je-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b];
flag_w[ti + (dom->Gfz.jeb-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b]
= flag_w[ti + dom->Gfz.js*dom->Gfz.s1b + tk*dom->Gfz.s2b];
}
}
__global__ void cage_flag_w_periodic_z(int *flag_w, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if(ti < dom->Gfz.inb && tj < dom->Gfz.jnb) {
flag_w[ti + tj*dom->Gfz.s1b + dom->Gfz.ksb*dom->Gfz.s2b]
= flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.ke-2)*dom->Gfz.s2b];
flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.keb-1)*dom->Gfz.s2b]
= flag_w[ti + tj*dom->Gfz.s1b + (dom->Gfz.ks+1)*dom->Gfz.s2b];
}
}
__global__ void cage_flag_u(int *flag_u, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int i; // iterator
int W, E; // flag locations
int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tj < dom->Gcc.je) && (tk < dom->Gcc.ke)) {
for(i = dom->Gcc.is; i <= dom->Gcc.ie; i++) {
W = (i-1) + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
E = i + tj*dom->Gcc.s1b + tk*dom->Gcc.s2b;
#ifdef STEPS
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] =
1 - ((phase[W] < 0 && phase[E] > -1)
|| (phase[W] > -1 && phase[E] < 0)
|| ((phase_shell[W] < 1 && phase_shell[E] < 1)));
#else
flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] =
1 - 2*((phase[W] < 0 && phase[E] > -1)
|| (phase[W] > -1 && phase[E] < 0)
|| ((phase_shell[W] < 1 && phase_shell[E] < 1)));
#endif
}
}
}
__global__ void cage_flag_v(int *flag_v, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int j; // iterator
int S, N; // flag locations
int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((tk < dom->Gcc.ke) && (ti < dom->Gcc.ie)) {
for(j = dom->Gcc.js; j <= dom->Gcc.je; j++) {
S = ti + (j-1)*dom->Gcc.s1b + tk*dom->Gcc.s2b;
N = ti + j*dom->Gcc.s1b + tk*dom->Gcc.s2b;
#ifdef STEPS
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] =
1 - ((phase[S] < 0 && phase[N] > -1)
|| (phase[S] > -1 && phase[N] < 0)
|| ((phase_shell[S] < 1 && phase_shell[N] < 1)));
#else
flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] =
1 - 2*((phase[S] < 0 && phase[N] > -1)
|| (phase[S] > -1 && phase[N] < 0)
|| ((phase_shell[S] < 1 && phase_shell[N] < 1)));
#endif
}
}
}
__global__ void cage_flag_w(int *flag_w, part_struct *parts, dom_struct *dom,
int *phase, int *phase_shell)
{
int k; // iterator
int B, T; // flag locations
int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF;
int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF;
if((ti < dom->Gcc.ie) && (tj < dom->Gcc.je)) {
for(k = dom->Gcc.ks; k <= dom->Gcc.ke; k++) {
B = ti + tj*dom->Gcc.s1b + (k-1)*dom->Gcc.s2b;
T = ti + tj*dom->Gcc.s1b + k*dom->Gcc.s2b;
#ifdef STEPS
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] =
1 - ((phase[B] < 0 && phase[T] > -1)
|| (phase[B] > -1 && phase[T] < 0)
|| ((phase_shell[B] < 1 && phase_shell[T] < 1)));
#else
flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] =
1 - 2*((phase[B] < 0 && phase[T] > -1)
|| (phase[B] > -1 && phase[T] < 0)
|| ((phase_shell[B] < 1 && phase_shell[T] < 1)));
#endif
}
}
}
__global__ void flag_external_u(int *flag_u, dom_struct *dom)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x;
int tk = blockDim.y*blockIdx.y + threadIdx.y;
if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) {
flag_u[dom->Gfx._is + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0;
flag_u[dom->Gfx._ie-1 + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0;
}
}
__global__ void flag_external_v(int *flag_v, dom_struct *dom)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x;
int ti = blockDim.y*blockIdx.y + threadIdx.y;
if((tk < dom->Gfy._knb) && (ti < dom->Gfy._inb)) {
flag_v[ti + dom->Gfy._js*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0;
flag_v[ti + (dom->Gfy._je-1)*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0;
}
}
__global__ void flag_external_w(int *flag_w, dom_struct *dom)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x;
int tj = blockDim.y*blockIdx.y + threadIdx.y;
if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) {
flag_w[ti + tj*dom->Gfz._s1b + dom->Gfz._ks*dom->Gfz._s2b] = 0;
flag_w[ti + tj*dom->Gfz._s1b + (dom->Gfz._ke-1)*dom->Gfz._s2b] = 0;
}
}
__global__ void part_BC_u(real *u, int *phase, int *flag_u,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfx._jsb;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfx._ksb;
int C, CW, CE;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PW, PE; // particle number
real a; // particle radius
int order; // particle order
real oy, oz; // particle angular velocity
real oydot, ozdot; // particle angular acceleration
real uu; // particle velocity
if(tj < dom->Gfx._jeb && tk < dom->Gfx._keb) {
for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) {
C = i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b;
CW = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
CE = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
PW = phase[CW];
PE = phase[CE];
if(PW > -1) {
P = PW;
PP = PW;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oy = parts[P].oy;
oz = parts[P].oz;
oydot = parts[P].oydot;
ozdot = parts[P].ozdot;
uu = parts[P].u;
} else if(PE > -1) {
P = PE;
PP = PE;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oy = parts[P].oy;
oz = parts[P].oz;
oydot = parts[P].oydot;
ozdot = parts[P].ozdot;
uu = parts[P].u;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-DOM_BUF) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
oy = 0;
oz = 0;
oydot = 0;
ozdot = 0;
uu = 0;
}
x = (i-DOM_BUF) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dz) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_u[C] < 1) && (PP > -1);
u[C] = - (check - 1) * u[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
real ocrossr_x = oy*z - oz*y;
real odotcrossr_x = oydot*z - ozdot*y;
Ux += uu + ocrossr_x;
Ux += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_x;
// boolean check if this is an analytically-posed node
int check = (flag_u[C] < 1) && (PP > -1);
u[C] = check * Ux + (1 - check) * u[C];
#endif
}
}
}
__global__ void part_BC_v(real *v, int *phase, int *flag_v,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tk = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfy._ksb;
int ti = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfy._isb;
int C, CS, CN;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PS, PN; // particle number
real a; // particle radius
int order; // particle order
real oz, ox; // particle angular velocity
real ozdot, oxdot; // particle angular acceleration
real vv; // particle velocity
if(tk < dom->Gfy._keb && ti < dom->Gfy._ieb) {
for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) {
C = ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b;
CS = ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b;
CN = ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
PS = phase[CS];
PN = phase[CN];
if(PS > -1) {
P = PS;
PP = PS;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oz = parts[P].oz;
ox = parts[P].ox;
ozdot = parts[P].ozdot;
oxdot = parts[P].oxdot;
vv = parts[P].v;
} else if(PN > -1) {
P = PN;
PP = PN;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
oz = parts[P].oz;
ox = parts[P].ox;
ozdot = parts[P].ozdot;
oxdot = parts[P].oxdot;
vv = parts[P].v;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (ti-0.5) * dom->dx + dom->xs + a;
Y = (j-DOM_BUF) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
oz = 0;
ox = 0;
ozdot = 0;
oxdot = 0;
vv = 0;
}
x = (ti-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (j-DOM_BUF) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dx) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_v[C] < 1) && (PP > -1);
v[C] = - (check - 1) * v[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
// switch reference frame and set boundary condition
real ocrossr_y = -(ox*z - oz*x);
real odotcrossr_y = -(oxdot*z - ozdot*x);
Uy += vv + ocrossr_y;
Uy += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_y;
// boolean check if this is an analytically-posed node
int check = (flag_v[C] < 1) && (PP > -1);
v[C] = check * Uy + (1 - check) * v[C];
#endif
}
}
}
__global__ void part_BC_w(real *w, int *phase, int *flag_w,
part_struct *parts, dom_struct *dom,
real nu, int stride,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int ti = blockDim.x*blockIdx.x + threadIdx.x + dom->Gfz._isb;
int tj = blockDim.y*blockIdx.y + threadIdx.y + dom->Gfz._jsb;
int C, CB, CT;
real x, y, z; // velocity node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real Ux, Uy, Uz; // Cartesian-directed pressure gradients
int P, PP, PB, PT; // particle number
real a; // particle radius
int order; // particle order
real ox, oy; // particle angular velocity
real oxdot, oydot; // particle angular acceleration
real ww; // particle velocity
if(ti < dom->Gfz._ieb && tj < dom->Gfz._jeb) {
for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) {
C = ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b;
CB = ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b;
CT = ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b;
// get particle number
PB = phase[CB];
PT = phase[CT];
if(PB > -1) {
P = PB;
PP = PB;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oxdot = parts[P].oxdot;
oydot = parts[P].oydot;
ww = parts[P].w;
} else if(PT > -1) {
P = PT;
PP = PT;
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oxdot = parts[P].oxdot;
oydot = parts[P].oydot;
ww = parts[P].w;
} else {
P = 0;
PP = -1;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (ti-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (k-DOM_BUF) * dom->dz + dom->zs + a;
order = 0;
ox = 0;
oy = 0;
oxdot = 0;
oydot = 0;
ww = 0;
}
x = (ti-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs - 0.5*dom->dx) x += dom->xl;
if(x > dom->xe + 0.5*dom->dx) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys - 0.5*dom->dy) y += dom->yl;
if(y > dom->ye + 0.5*dom->dy) y -= dom->yl;
z = (k-DOM_BUF) * dom->dz + dom->zs - Z;
if(z < dom->zs - 0.5*dom->dz) z += dom->zl;
if(z > dom->ze + 0.5*dom->dz) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
int check = (flag_w[C] < 1) && (PP > -1);
w[C] = - (check - 1) * w[C];
#else
lamb_vel(order, a, r, theta, phi,
nu, pnm_re, pnm_im, phinm_re, phinm_im,
chinm_re, chinm_im,
P, stride, &Ux, &Uy, &Uz);
// switch reference frame and set boundary condition
real ocrossr_z = ox*y - oy*x;
real odotcrossr_z = oxdot*y - oydot*x;
Uz += ww + ocrossr_z;
Uz += 0.1/nu *(r*r*r*r*r-a*a*a*a*a)/(r*r*r) * odotcrossr_z;
// boolean check if this is an analytically-posed node
int check = (flag_w[C] < 1) && (PP > -1);
w[C] = check * Uz + (1 - check) * w[C];
#endif
}
}
}
__global__ void part_BC_p(real *p, real *p_rhs, int *phase, int *phase_shell,
part_struct *parts, dom_struct *dom,
real mu, real nu, real dt, real dt0, gradP_struct gradP, real rho_f, int stride,
real *pnm_re00, real *pnm_im00, real *phinm_re00, real *phinm_im00,
real *chinm_re00, real *chinm_im00,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gcc._js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gcc._ks;
int C, CC;
real x, y, z; // pressure node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real pp_tmp;//, pp_tmp00;// temporary pressure
int P; // particle number
real a; // particle radius
int order; // particle order
real ox, oy, oz; // particle angular velocity
real udot, vdot, wdot;// particle acceleration
if(tj < dom->Gcc._je && tk < dom->Gcc._ke) {
for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) {
CC = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
C = (i-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc._s1 + (tk-DOM_BUF)*dom->Gcc._s2;
// get particle number
P = phase[CC];
if(P > -1) {
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
order = parts[P].order;
ox = parts[P].ox;
oy = parts[P].oy;
oz = parts[P].oz;
udot = parts[P].udot;
vdot = parts[P].vdot;
wdot = parts[P].wdot;
} else {
P = 0;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
order = 0;
ox = 0;
oy = 0;
oz = 0;
udot = 0;
vdot = 0;
wdot = 0;
}
x = (i-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs) x += dom->xl;
if(x > dom->xe) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys) y += dom->yl;
if(y > dom->ye) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs) z += dom->zl;
if(z > dom->ze) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
p_rhs[C] = phase_shell[CC] * p_rhs[C];
#else
real ar = a / r;
real ra = r / a;
pp_tmp = X_pn(0, theta, phi, pnm_re, pnm_im, P, stride);
//pp_tmp00 = X_pn(0, theta, phi, pnm_re00, pnm_im00, P, stride);
for(int n = 1; n <= order; n++) {
pp_tmp += (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* X_pn(n, theta, phi, pnm_re, pnm_im, P, stride);
//pp_tmp00 += (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
//* X_pn(n, theta, phi, pnm_re00, pnm_im00, P, stride);
pp_tmp -= n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* X_phin(n, theta, phi, phinm_re, phinm_im, P, stride);
//pp_tmp00 -= n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
//* X_phin(n, theta, phi, phinm_re00, phinm_im00, P, stride);
}
pp_tmp *= mu*nu/(a*a);
//pp_tmp00 *= mu*nu/(a*a);
real ocrossr2 = (oy*z - oz*y) * (oy*z - oz*y);
ocrossr2 += (ox*z - oz*x) * (ox*z - oz*x);
ocrossr2 += (ox*y - oy*x) * (ox*y - oy*x);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*x + (-gradP.y/rhoV - vdot)*y
+ (-gradP.z/rhoV - wdot)*z;
pp_tmp += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
//pp_tmp00 += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// write BC if flagged, otherwise leave alone
p_rhs[C] = (real) phase_shell[CC] * p_rhs[C]
+ (real) (1 - phase_shell[CC])
* ((pp_tmp - p[CC]) + 0.5*mu*p_rhs[C]);
p_rhs[C] = (real) (phase[CC] < 0 && phase_shell[CC]) * p_rhs[C];
#endif
}
}
}
__global__ void part_BC_p_fill(real *p, int *phase,
part_struct *parts, dom_struct *dom,
real mu, real nu, real rho_f, gradP_struct gradP, int stride,
real *pnm_re, real *pnm_im)
{
int tj = blockDim.x*blockIdx.x + threadIdx.x + dom->Gcc._js;
int tk = blockDim.y*blockIdx.y + threadIdx.y + dom->Gcc._ks;
int CC;
real x, y, z; // pressure node location Cartesian
real X, Y, Z; // particle position
real r, theta, phi; // velocity node location spherical
real pp_tmp;//, pp_tmp00;// temporary pressure
int P; // particle number
real a; // particle radius
real ox, oy, oz; // particle angular velocity
real udot, vdot, wdot;// particle acceleration
if(tj < dom->Gcc._je && tk < dom->Gcc._ke) {
for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) {
CC = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b;
// get particle number
P = phase[CC];
if(P > -1) {
a = parts[P].r;
X = parts[P].x;
Y = parts[P].y;
Z = parts[P].z;
ox = parts[P].ox;
oy = parts[P].oy;
oz = parts[P].oz;
udot = parts[P].udot;
vdot = parts[P].vdot;
wdot = parts[P].wdot;
} else {
P = 0;
a = (dom->dx + dom->dy + dom->dz) / 3.;
X = (i-0.5) * dom->dx + dom->xs + a;
Y = (tj-0.5) * dom->dy + dom->ys + a;
Z = (tk-0.5) * dom->dz + dom->zs + a;
ox = 0;
oy = 0;
oz = 0;
udot = 0;
vdot = 0;
wdot = 0;
}
x = (i-0.5) * dom->dx + dom->xs - X;
if(x < dom->xs) x += dom->xl;
if(x > dom->xe) x -= dom->xl;
y = (tj-0.5) * dom->dy + dom->ys - Y;
if(y < dom->ys) y += dom->yl;
if(y > dom->ye) y -= dom->yl;
z = (tk-0.5) * dom->dz + dom->zs - Z;
if(z < dom->zs) z += dom->zl;
if(z > dom->ze) z -= dom->zl;
xyz2rtp(x, y, z, &r, &theta, &phi);
// calculate analytic solution
#ifdef STEPS
p[CC] = phase_shell[CC] * p[CC];
#else
pp_tmp = X_pn(0, theta, phi, pnm_re, pnm_im, P, stride);
pp_tmp *= mu*nu/(a*a);
real ocrossr2 = (oy*z - oz*y) * (oy*z - oz*y);
ocrossr2 += (ox*z - oz*x) * (ox*z - oz*x);
ocrossr2 += (ox*y - oy*x) * (ox*y - oy*x);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*x + (-gradP.y/rhoV - vdot)*y
+ (-gradP.z/rhoV - wdot)*z;
pp_tmp += 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// write BC if inside particle, otherwise leave alone
p[CC] = (real) (phase[CC] > -1) * pp_tmp
+ (1 - (phase[CC] > -1)) * p[CC];
#endif
}
}
}
__device__ real Nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real Pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return 0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return 0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return 0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return 0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return 0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__device__ void xyz2rtp(real x, real y, real z, real *r, real *theta, real *phi)
{
real XY = x*x + y*y;
real XYZ = XY + z*z;
// We calculate the coefficients everywhere in space. If a particle is
// centered at the center of a cell, XYZ will be zero. We'll set it equal
// to one since these values aren't ever used.
if(XYZ >= 0 && XYZ < DIV_ST) XYZ = 1;//DIV_ST;
else if(XYZ < 0 && XYZ > -DIV_ST) XYZ = 1;//-DIV_ST;
*r = sqrt(XYZ);
*theta = acos(z / *r);
// Note that XY cannot be set equal to one, because the values are used.
if(XY >= 0 && XY < DIV_ST) XY = DIV_ST;
else if(XY < 0 && XY > -DIV_ST) XY = -DIV_ST;
*phi = acos(x / sqrt(XY));
if(y < 0.) *phi = 2.*PI - *phi;
}
__device__ real X_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)*Pnm(n,m,theta)*pnm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)*Pnm(n,m,theta)
*(pnm_re[coeff]*cos(m*phi)
- pnm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real X_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)*Pnm(n,m,theta)*phinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)*Pnm(n,m,theta)
*(phinm_re[coeff]*cos(m*phi)
- phinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*pnm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(pnm_re[coeff]*cos(m*phi)
- pnm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*phinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(phinm_re[coeff]*cos(m*phi)
- phinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Y_chin(int n, real theta, real phi,
real *chinm_re, real *chinm_im, int pp, int stride)
{
int coeff = 0;
real ct = cos(theta);
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*chinm_re[coeff];
for(m = 1; m <= n; m++) {
coeff++;
sum += 2.*Nnm(n,m)
*(-(n+1)*ct/st*Pnm(n,m,theta)+(n-m+1)/st*Pnm(n+1,m,theta))
*(chinm_re[coeff]*cos(m*phi)
- chinm_im[coeff]*sin(m*phi));
}
return sum;
}
__device__ real Z_pn(int n, real theta, real phi,
real *pnm_re, real *pnm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(pnm_re[coeff]*sin(m*phi)
+ pnm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ real Z_phin(int n, real theta, real phi,
real *phinm_re, real *phinm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(phinm_re[coeff]*sin(m*phi)
+ phinm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ real Z_chin(int n, real theta, real phi,
real *chinm_re, real *chinm_im, int pp, int stride)
{
int coeff = 0;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
for(int j = 0; j < n; j++) coeff += j+1;
coeff = coeff + pp*stride;
int m = 0;
real sum = 0.;
for(m = 1; m <= n; m++) {
coeff++;
sum += -2.*m/st*Nnm(n,m)*Pnm(n,m,theta)
*(chinm_re[coeff]*sin(m*phi)
+ chinm_im[coeff]*cos(m*phi));
}
return sum;
}
__device__ void lamb_vel(int order, real a, real r, real theta, real phi,
real nu, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
int p_ind, int stride, real *Ux, real *Uy, real *Uz)
{
real ar = a / r;
real ra = r / a;
real ur = 0.;
real ut = 0.5*ra*Y_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
real up = 0.5*ra*Z_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
for(int n = 1; n <= order; n++) {
real powranp1 = pow(ra,n+1.);
real powranm1 = pow(ra,n-1.);
real powarnp2 = pow(ar,n+2.);
real powarnp1 = pow(ar,n+1.);
real powarn = pow(ar,n);
real od2np3 = 1./(2.*n+3.);
real odnp1 = 1./(n+1.);
ur += (0.5*n*od2np3*powranp1
+ 0.25*n*((2.*n+1.)*od2np3*ar*ar-1.)*powarn)
* X_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
ur += (n*powranm1
+ 0.5*n*(2.*n-1.-(2.*n+1.)*ra*ra)*pow(ar,n+2.))
* X_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
ut += (0.5*(n+3.)*odnp1*od2np3*powranp1
+ 0.25*odnp1*(n-2.-n*(2.*n+1.)*od2np3*ar*ar)*powarn)
* Y_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
ut += (powranm1
+ 0.5*odnp1*((n-2.)*(2.*n+1.)*ra*ra-n*(2.*n-1.))*powarnp2)
* Y_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
ut += (powranm1
- powarnp1)
* Z_chin(n, theta, phi, chinm_re, chinm_im, p_ind, stride);
up += (0.5*(n+3.)*odnp1*od2np3*powranp1
+ 0.25*odnp1*(n-2.-n*(2.*n+1.)*od2np3*ar*ar)*powarn)
* Z_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
up += (powranm1
+ 0.5*odnp1*((n-2.)*(2.*n+1.)*ra*ra-n*(2.*n-1.))*powarnp2)
* Z_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
up += (-powranm1
+ powarnp1)
* Y_chin(n, theta, phi, chinm_re, chinm_im, p_ind, stride);
}
ur *= nu / a;
ut *= nu / a;
up *= nu / a;
*Ux = ur*sin(theta)*cos(phi)+ut*cos(theta)*cos(phi)-up*sin(phi);
*Uy = ur*sin(theta)*sin(phi)+ut*cos(theta)*sin(phi)+up*cos(phi);
*Uz = ur*cos(theta)-ut*sin(theta);
}
__device__ void lamb_gradP(int order, real a, real r, real theta, real phi,
real mu, real nu, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
int p_ind, int stride, real *gPx, real *gPy, real *gPz)
{
real ar = a / r;
real ra = r / a;
real st = sin(theta);
if(st >= 0 && st < DIV_ST) st = DIV_ST;
else if(st < 0 && st > -DIV_ST) st = -DIV_ST;
real pr = 0;
real pt = 1./r * Y_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
real pp = 1./r * Z_pn(0, theta, phi, pnm_re, pnm_im, p_ind, stride);
for(int n = 1; n <= order; n++) {
pr += n * (ar + 0.5*(2.*n-1.)*pow(ar,2.*n+2.))*pow(ra,n)
* X_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pr += n * (2.*n-1.)*(2.*n+1.)*pow(ar,n+2.)
* X_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
pt += (1. - 0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* Y_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pt += -n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* Y_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
pp += (1. - 0.5*n*(2.*n-1.)/(n+1.)*pow(ar,2.*n+1.))*pow(ra,n)
* Z_pn(n, theta, phi, pnm_re, pnm_im, p_ind, stride);
pp += -n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ar,n+1.)
* Z_phin(n, theta, phi, phinm_re, phinm_im, p_ind, stride);
}
pr *= mu * nu / (a*a*a);
pt *= mu * nu / (a*a);
pp *= mu * nu / (a*a);
*gPx = pr*sin(theta)*cos(phi)+pt*cos(theta)*cos(phi)/r-pp*sin(phi)/r/st;
*gPy = pr*sin(theta)*sin(phi)+pt*cos(theta)*sin(phi)/r+pp*cos(phi)/r/st;
*gPz = pr*cos(theta)-pt*sin(theta)/r;
}
__global__ void predict_coeffs(real dt0, real dt,
real *pnm_re00, real *pnm_im00, real *phinm_re00, real *phinm_im00,
real *chinm_re00, real *chinm_im00,
real *pnm_re0, real *pnm_im0, real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im, int stride)
{
int coeff = threadIdx.x;
int part = blockIdx.x;
// extrapolate coefficients
int ti = stride*part + coeff;
real a = dt/dt0;
// store last result
/*pnm_re0[ti] = pnm_re[ti];
pnm_im0[ti] = pnm_im[ti];
phinm_re0[ti] = phinm_re[ti];
phinm_im0[ti] = phinm_im[ti];
chinm_re0[ti] = chinm_re[ti];
chinm_im0[ti] = chinm_im[ti];
*/
// predict starting point for iterations at the next timestep
pnm_re[ti] = pnm_re[ti]*(1. + a) - pnm_re00[ti]*a;
pnm_im[ti] = pnm_im[ti]*(1. + a) - pnm_im00[ti]*a;
phinm_re[ti] = phinm_re[ti]*(1. + a) - phinm_re00[ti]*a;
phinm_im[ti] = phinm_im[ti]*(1. + a) - phinm_im00[ti]*a;
chinm_re[ti] = chinm_re[ti]*(1. + a) - chinm_re00[ti]*a;
chinm_im[ti] = chinm_im[ti]*(1. + a) - chinm_im00[ti]*a;
}
|
9efdd722f9266832457d99ef35159f6b1fc64039.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions available that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res, listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
hipLaunchKernelGGL(( ivfFlatScan) \
, dim3(grid), dim3(block), codec.getSmemSize(dim), stream, \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(res,
queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| 9efdd722f9266832457d99ef35159f6b1fc64039.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions available that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res, listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
ivfFlatScan \
<<<grid, block, codec.getSmemSize(dim), stream>>>( \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(res,
queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
872cde6b54dc5f63f17ce9ea74de2181aaf93fe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* GPU Quicksort Kernels
* ---------------------
* Functions part1, part2, part3 and lqsort
* Copyright (c) 2007-2008, Daniel Cederman and Philippas Tsigas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
* conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ----
*
* Functions bitonicSort and cumcount are based upon the
* NVIDIA CUDA SDK Code Samples "Bitonic Sort" and "Scan"
*
* "This code is released free of charge for use in
* derivative works, whether academic, commercial, or personal"
* - CUDA Website
*
* http://developer.download.nvidia.com/licenses/general_license.txt
*
**/
#include "gpuqsort.h"
#undef THREADS
#define THREADS blockDim.x
extern __shared__ unsigned int sarray[];
#ifdef HASATOMICS
__device__ unsigned int ohtotal = 0;
#endif
/**
* Swaps the location of two unsigned ints
* @param a This unsigned int will swap place with unsigned int b
* @param b This unsigned int will swap place with unsigned int a
*/
//template <typename unsigned int>
__device__ inline void swap(unsigned int& a, unsigned int& b)
{
unsigned int tmp = a;
a = b;
b = tmp;
}
/**
* Perform a bitonic sort
* @param values The unsigned ints to be sorted
* @param target Where to place the sorted unsigned int when done
* @param size The number of unsigned ints
*/
//template <typename unsigned int>
__device__ inline
void bitonicSort(unsigned int* fromvalues, unsigned int* tovalues, unsigned int from, unsigned int size)
{
unsigned int* shared = (unsigned int*)sarray;
unsigned int coal = (from&0xf);
size = size + coal;
from = from - coal;
int sb = 2 << (int)(__log2f(size));
// Buffer data to be sorted in the shared memory
for(int i=threadIdx.x;i<size;i+=THREADS)
{
shared[i] = fromvalues[i+from];
}
for(int i=threadIdx.x;i<coal;i+=THREADS)
shared[i]=0;
// Pad the data
for(int i=threadIdx.x+size;i<sb;i+=THREADS)
shared[i] = 0xffffffff;
__syncthreads();
// Parallel bitonic sort.
for (int k = 2; k <= sb; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
for(int tid=threadIdx.x;tid<sb;tid+=THREADS)
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
else
{
if (shared[tid] < shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
}
}
__syncthreads();
}
}
__syncthreads();
// Write back the sorted data to its correct position
for(int i=threadIdx.x;i<size;i+=THREADS)
if(i>=coal)
tovalues[i+from] = shared[i];
__syncthreads();
}
/**
* Perform a cumulative count on two arrays
* @param lblock Array one
* @param rblock Array two
*/
__device__ inline void cumcount(unsigned int *lblock, unsigned int *rblock)
{
int tx = threadIdx.x;
int offset = 1;
__syncthreads();
for (int d = THREADS>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (tx < d)
{
int ai = offset*(2*tx+1)-1;
int bi = offset*(2*tx+2)-1;
lblock[bi] += lblock[ai];
rblock[bi] += rblock[ai];
}
offset *= 2;
}
__syncthreads();
if (tx == 0)
{
lblock[THREADS] = lblock[THREADS-1];
rblock[THREADS] = rblock[THREADS-1];
lblock[THREADS - 1] =0;
rblock[THREADS - 1] =0;
} // clear the last unsigned int */
__syncthreads();
for (int d = 1; d < THREADS; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (tx < d)
{
int ai = offset*(2*tx+1)-1;
int bi = offset*(2*tx+2)-1;
int t = lblock[ai];
lblock[ai] = lblock[bi];
lblock[bi] += t;
t = rblock[ai];
rblock[ai] = rblock[bi];
rblock[bi] += t;
}
}
}
/**
* Part One - Counts the number of unsigned ints larger or smaller than the pivot. It then
* performs a cumulative sum so that each thread knows where to write
* @param data unsigned ints to be counted
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part1(unsigned int* data, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
unsigned int* lblock = (unsigned int*)sarray;
unsigned int* rblock = (unsigned int*)(&lblock[(blockDim.x+1)]);
unsigned int* minpiv = (unsigned int*)(&rblock[(blockDim.x+1)]);
unsigned int* maxpiv = (unsigned int*)(&minpiv[blockDim.x]);
// Where should we read?
unsigned int start = params[blockIdx.x].from;
unsigned int end = params[blockIdx.x].end;
unsigned int pivot = params[blockIdx.x].pivot;
// Stores the max and min value of the data. Used to decide a new pivot
minpiv[tx] = data[start+tx];
maxpiv[tx] = data[start+tx];
__syncthreads();
int ll=0;
int lr=0;
__syncthreads();
int coal = (start&0xf);
start = start-coal;
// Go through the data
if(tx+start<end)
{
unsigned int d = data[tx+start];
if(!(tx<coal))
{
// Counting unsigned ints smaller...
if(d<pivot)
ll++;
else
// or larger than the pivot
if(d>pivot)
lr++;
// Store the max and min unsigned int
minpiv[tx] = min(minpiv[tx],d);
maxpiv[tx] = max(maxpiv[tx],d);
}
}
// Go through the data
for(unsigned int i=tx+start+THREADS;i<end;i+=THREADS)
{
unsigned int d = data[i];
// Counting unsigned ints smaller...
if(d<pivot)
ll++;
else
// or larger than the pivot
if(d>pivot)
lr++;
// Store the max and min unsigned int
minpiv[tx] = min(minpiv[tx],d);
maxpiv[tx] = max(maxpiv[tx],d);
}
lblock[tx]=ll;
rblock[tx]=lr;
__syncthreads();
// Perform a cumulative sum
cumcount((unsigned int*)lblock,(unsigned int*)rblock);
if(tx==0)
{
// Decide on max and min unsigned int
for(int i=0;i<THREADS;i++)
{
minpiv[0]=min(minpiv[0],minpiv[i]);
maxpiv[0]=max(maxpiv[0],maxpiv[i]);
}
}
__syncthreads();
// Store each threads part of the cumulative count
hist->left[blockIdx.x*(THREADS)+threadIdx.x] = lblock[threadIdx.x+1];
hist->right[blockIdx.x*(THREADS)+threadIdx.x] = rblock[threadIdx.x+1];
// Store the total sum
lengths->left[blockIdx.x] = lblock[THREADS];
lengths->right[blockIdx.x] = rblock[THREADS];
// Store the max and min unsigned int
lengths->minpiv[blockIdx.x] = minpiv[0];
lengths->maxpiv[blockIdx.x] = maxpiv[0];
}
/**
* Part Two - Move unsigned ints to their correct position in the auxillary array
* @param data unsigned ints to be moved
* @param data2 Destination for unsigned ints
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part2(unsigned int* data, unsigned int* data2, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
// Each thread uses the cumulative sum to know where to write
unsigned int x = lengths->left[bx] + hist->left[bx*(THREADS)+tx]-1;// - 1;
unsigned int y = lengths->right[bx] - hist->right[bx*(THREADS)+tx];
// Where should we read?
unsigned int start = params[bx].from;
unsigned int end = params[bx].end;
unsigned int pivot = params[bx].pivot;
__syncthreads();
int coal = (start&0xf);
start = start-coal;
// Go through all the assigned data
if(tx+start<end)
{
// Reading unsigned ints...
unsigned int d = data[tx+start];
if(!(tx<coal))
{
// and writing them to auxillary array
if(d<pivot)
data2[x--]=d;
else
if(d>pivot)
data2[y++]=d;
}
}
__syncthreads();
// Go through all the assigned data
for(unsigned int i=start+tx+THREADS;i<end;i+=THREADS)
{
// Reading unsigned ints...
unsigned int d = data[i];
// and writing them to auxillary array
if(d<pivot)
{
data2[x--]=d;
}
else
if(d>pivot)
data2[y++]=d;
}
return;
}
/**
* Part Three - Write the pivot value
* @param data Destination for pivot
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part3(unsigned int* data, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
// If we are the "last" thread block that is assigned to the same data sequence
// we write the pivot between the left and right block
if(params[bx].last)
{
// Get destination position
unsigned int x = lengths->left[bx] + hist->left[bx*THREADS+THREADS-1] + tx;
unsigned int y = lengths->right[bx] - hist->right[bx*THREADS+THREADS-1];
unsigned int pivot = params[bx].pivot;
// Write the pivot values
for(;x<y;x+=THREADS)
data[x]=pivot;
}
}
/**
* The local quicksort - sorts a block of data with no inter-block synchronization
* @param adata Contains some of the blocks to be sorted and also acts as the final
* destination for sorted data
* @param adata2 Contains some of the blocks to be sorted
* @param bs List of blocks to be sorted and a pointer telling if a specific block is
* in \a adata or \a adata2
*/
//template <typename unsigned int>
__global__ void lqsort(unsigned int* adata, unsigned int* adata2, struct LQSortParams* bs, unsigned int phase)
{
__shared__ unsigned int lphase;
lphase=phase;
// Shorthand for the threadid
int tx = threadIdx.x;
// Stack pointer
__shared__ int bi;
// Stack unsigned ints
__shared__ unsigned int beg[32];
__shared__ unsigned int end[32];
__shared__ bool flip[32];
unsigned int* lblock = (unsigned int*)sarray;
unsigned int* rblock = (unsigned int*)(&lblock[(blockDim.x+1)]);
// The current pivot
__shared__ unsigned int pivot;
// The sequence to be sorted
__shared__ unsigned int from;
__shared__ unsigned int to;
// Since we switch between the primary and the auxillary buffer,
// these variables are required to keep track on which role
// a buffer currently has
__shared__ unsigned int* data;
__shared__ unsigned int* data2;
__shared__ unsigned int sbsize;
__shared__ unsigned int bx;
if(threadIdx.x==0)
#ifdef HASATOMICS
bx = atomicInc(&ohtotal,50000);
#else
bx = blockIdx.x;
#endif
__syncthreads();
while(bx<gridDim.x)
{
// Thread 0 is in charge of the stack operations
if(tx==0)
{
// We push our first block on the stack
// This is the block given by the bs parameter
beg[0] = bs[bx].beg;
end[0] = bs[bx].end;
flip[0] = bs[bx].flip;
sbsize = bs[bx].sbsize;
bi = 0;
}
__syncthreads();
// If we were given an empty block there is no need to continue
if(end[0]==beg[0])
return;
// While there are items left on the stack to sort
while(bi>=0)
{
__syncthreads();
// Thread 0 pops a fresh sequence from the stack
if(tx==0)
{
from = beg[bi];
to = end[bi];
// Check which buffer the sequence is in
if(!flip[bi])
{
data = adata2;
data2 = adata;
}
else
{
data = adata;
data2 = adata2;
}
}
__syncthreads();
// If the sequence is smaller than SBSIZE we sort it using
// an alternative sort. Otherwise each thread would sort just one
// or two unsigned ints and that wouldn't be efficient
if((to-from)<(sbsize-16))
{
// Sort it using bitonic sort. This could be changed to some other
// sorting method. Store the result in the final destination buffer
if((to-from>=1)&&(lphase!=2))
bitonicSort(data,adata,from,to-from);
__syncthreads();
// Decrement the stack pointer
if(tx==0)
bi--;
__syncthreads();
// and continue with the next sequence
continue;
}
if(tx==0)
{
// Create a new pivot for the sequence
// Try to optimize this for your input distribution
// if you have some information about it
unsigned int mip = min(min(data[from],data[to-1]),data[(from+to)/2]);
unsigned int map = max(max(data[from],data[to-1]),data[(from+to)/2]);
pivot = min(max(mip/2+map/2,mip),map);
}
unsigned int ll=0;
unsigned int lr=0;
__syncthreads();
unsigned int coal = (from)&0xf;
if(tx+from-coal<to)
{
unsigned int d = data[tx+from-coal];
if(!(tx<coal))
{
// Counting unsigned ints that have a higher value than the pivot
if(d<pivot)
ll++;
else
// or a lower
if(d>pivot)
lr++;
}
}
// Go through the current sequence
for(int i=from+tx+THREADS-coal;i<to;i+=THREADS)
{
unsigned int d = data[i];
// Counting unsigned ints that have a higher value than the pivot
if(d<pivot)
ll++;
else
// or a lower
if(d>pivot)
lr++;
}
// Store the result in a shared array so that we can calculate a
// cumulative sum
lblock[tx]=ll;
rblock[tx]=lr;
__syncthreads();
// Calculate the cumulative sum
cumcount((unsigned int*)lblock,(unsigned int*)rblock);
__syncthreads();
// Let thread 0 add the new resulting subsequences to the stack
if(tx==0)
{
// The sequences are in the other buffer now
flip[bi+1] = !flip[bi];
flip[bi] = !flip[bi];
// We need to place the smallest object on top of the stack
// to ensure that we don't run out of stack space
if(lblock[THREADS]<rblock[THREADS])
{
beg[bi+1]=beg[bi];
beg[bi]=to-rblock[THREADS];
end[bi+1]=from+lblock[THREADS];
}
else
{
end[bi+1]=end[bi];
end[bi]=from+lblock[THREADS];
beg[bi+1]=to-rblock[THREADS];
}
// Increment the stack pointer
bi++;
}
__syncthreads();
unsigned int x = from+lblock[tx+1]-1;
unsigned int y = to-rblock[tx+1];
coal = from&0xf;
if(tx+from-coal<to)
{
unsigned int d = data[tx+from-coal];
if(!(tx<coal))
{
if(d<pivot)
data2[x--] = d;
else
if(d>pivot)
data2[y++] = d;
}
}
// Go through the data once again
// writing it to its correct position
for(unsigned int i=from+tx+THREADS-coal;i<to;i+=THREADS)
{
unsigned int d = data[i];
if(d<pivot)
data2[x--] = d;
else
if(d>pivot)
data2[y++] = d;
}
__syncthreads();
// As a final step, write the pivot value between the right and left
// subsequence. Write it to the final destination since this pivot
// is always correctly sorted
for(unsigned int i=from+lblock[THREADS]+tx;i<to-rblock[THREADS];i+=THREADS)
{
adata[i]=pivot;
}
__syncthreads();
}
#ifdef HASATOMICS
if(threadIdx.x==0)
bx = atomicInc(&ohtotal,50000);
__syncthreads();
#else
break;
#endif
}
__syncthreads();
}
| 872cde6b54dc5f63f17ce9ea74de2181aaf93fe4.cu | /**
* GPU Quicksort Kernels
* ---------------------
* Functions part1, part2, part3 and lqsort
* Copyright (c) 2007-2008, Daniel Cederman and Philippas Tsigas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
* conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ----
*
* Functions bitonicSort and cumcount are based upon the
* NVIDIA CUDA SDK Code Samples "Bitonic Sort" and "Scan"
*
* "This code is released free of charge for use in
* derivative works, whether academic, commercial, or personal"
* - CUDA Website
*
* http://developer.download.nvidia.com/licenses/general_license.txt
*
**/
#include "gpuqsort.h"
#undef THREADS
#define THREADS blockDim.x
extern __shared__ unsigned int sarray[];
#ifdef HASATOMICS
__device__ unsigned int ohtotal = 0;
#endif
/**
* Swaps the location of two unsigned ints
* @param a This unsigned int will swap place with unsigned int b
* @param b This unsigned int will swap place with unsigned int a
*/
//template <typename unsigned int>
__device__ inline void swap(unsigned int& a, unsigned int& b)
{
unsigned int tmp = a;
a = b;
b = tmp;
}
/**
* Perform a bitonic sort
* @param values The unsigned ints to be sorted
* @param target Where to place the sorted unsigned int when done
* @param size The number of unsigned ints
*/
//template <typename unsigned int>
__device__ inline
void bitonicSort(unsigned int* fromvalues, unsigned int* tovalues, unsigned int from, unsigned int size)
{
unsigned int* shared = (unsigned int*)sarray;
unsigned int coal = (from&0xf);
size = size + coal;
from = from - coal;
int sb = 2 << (int)(__log2f(size));
// Buffer data to be sorted in the shared memory
for(int i=threadIdx.x;i<size;i+=THREADS)
{
shared[i] = fromvalues[i+from];
}
for(int i=threadIdx.x;i<coal;i+=THREADS)
shared[i]=0;
// Pad the data
for(int i=threadIdx.x+size;i<sb;i+=THREADS)
shared[i] = 0xffffffff;
__syncthreads();
// Parallel bitonic sort.
for (int k = 2; k <= sb; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
for(int tid=threadIdx.x;tid<sb;tid+=THREADS)
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
else
{
if (shared[tid] < shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
}
}
__syncthreads();
}
}
__syncthreads();
// Write back the sorted data to its correct position
for(int i=threadIdx.x;i<size;i+=THREADS)
if(i>=coal)
tovalues[i+from] = shared[i];
__syncthreads();
}
/**
* Perform a cumulative count on two arrays
* @param lblock Array one
* @param rblock Array two
*/
__device__ inline void cumcount(unsigned int *lblock, unsigned int *rblock)
{
int tx = threadIdx.x;
int offset = 1;
__syncthreads();
for (int d = THREADS>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (tx < d)
{
int ai = offset*(2*tx+1)-1;
int bi = offset*(2*tx+2)-1;
lblock[bi] += lblock[ai];
rblock[bi] += rblock[ai];
}
offset *= 2;
}
__syncthreads();
if (tx == 0)
{
lblock[THREADS] = lblock[THREADS-1];
rblock[THREADS] = rblock[THREADS-1];
lblock[THREADS - 1] =0;
rblock[THREADS - 1] =0;
} // clear the last unsigned int */
__syncthreads();
for (int d = 1; d < THREADS; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (tx < d)
{
int ai = offset*(2*tx+1)-1;
int bi = offset*(2*tx+2)-1;
int t = lblock[ai];
lblock[ai] = lblock[bi];
lblock[bi] += t;
t = rblock[ai];
rblock[ai] = rblock[bi];
rblock[bi] += t;
}
}
}
/**
* Part One - Counts the number of unsigned ints larger or smaller than the pivot. It then
* performs a cumulative sum so that each thread knows where to write
* @param data unsigned ints to be counted
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part1(unsigned int* data, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
unsigned int* lblock = (unsigned int*)sarray;
unsigned int* rblock = (unsigned int*)(&lblock[(blockDim.x+1)]);
unsigned int* minpiv = (unsigned int*)(&rblock[(blockDim.x+1)]);
unsigned int* maxpiv = (unsigned int*)(&minpiv[blockDim.x]);
// Where should we read?
unsigned int start = params[blockIdx.x].from;
unsigned int end = params[blockIdx.x].end;
unsigned int pivot = params[blockIdx.x].pivot;
// Stores the max and min value of the data. Used to decide a new pivot
minpiv[tx] = data[start+tx];
maxpiv[tx] = data[start+tx];
__syncthreads();
int ll=0;
int lr=0;
__syncthreads();
int coal = (start&0xf);
start = start-coal;
// Go through the data
if(tx+start<end)
{
unsigned int d = data[tx+start];
if(!(tx<coal))
{
// Counting unsigned ints smaller...
if(d<pivot)
ll++;
else
// or larger than the pivot
if(d>pivot)
lr++;
// Store the max and min unsigned int
minpiv[tx] = min(minpiv[tx],d);
maxpiv[tx] = max(maxpiv[tx],d);
}
}
// Go through the data
for(unsigned int i=tx+start+THREADS;i<end;i+=THREADS)
{
unsigned int d = data[i];
// Counting unsigned ints smaller...
if(d<pivot)
ll++;
else
// or larger than the pivot
if(d>pivot)
lr++;
// Store the max and min unsigned int
minpiv[tx] = min(minpiv[tx],d);
maxpiv[tx] = max(maxpiv[tx],d);
}
lblock[tx]=ll;
rblock[tx]=lr;
__syncthreads();
// Perform a cumulative sum
cumcount((unsigned int*)lblock,(unsigned int*)rblock);
if(tx==0)
{
// Decide on max and min unsigned int
for(int i=0;i<THREADS;i++)
{
minpiv[0]=min(minpiv[0],minpiv[i]);
maxpiv[0]=max(maxpiv[0],maxpiv[i]);
}
}
__syncthreads();
// Store each threads part of the cumulative count
hist->left[blockIdx.x*(THREADS)+threadIdx.x] = lblock[threadIdx.x+1];
hist->right[blockIdx.x*(THREADS)+threadIdx.x] = rblock[threadIdx.x+1];
// Store the total sum
lengths->left[blockIdx.x] = lblock[THREADS];
lengths->right[blockIdx.x] = rblock[THREADS];
// Store the max and min unsigned int
lengths->minpiv[blockIdx.x] = minpiv[0];
lengths->maxpiv[blockIdx.x] = maxpiv[0];
}
/**
* Part Two - Move unsigned ints to their correct position in the auxillary array
* @param data unsigned ints to be moved
* @param data2 Destination for unsigned ints
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part2(unsigned int* data, unsigned int* data2, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
// Each thread uses the cumulative sum to know where to write
unsigned int x = lengths->left[bx] + hist->left[bx*(THREADS)+tx]-1;// - 1;
unsigned int y = lengths->right[bx] - hist->right[bx*(THREADS)+tx];
// Where should we read?
unsigned int start = params[bx].from;
unsigned int end = params[bx].end;
unsigned int pivot = params[bx].pivot;
__syncthreads();
int coal = (start&0xf);
start = start-coal;
// Go through all the assigned data
if(tx+start<end)
{
// Reading unsigned ints...
unsigned int d = data[tx+start];
if(!(tx<coal))
{
// and writing them to auxillary array
if(d<pivot)
data2[x--]=d;
else
if(d>pivot)
data2[y++]=d;
}
}
__syncthreads();
// Go through all the assigned data
for(unsigned int i=start+tx+THREADS;i<end;i+=THREADS)
{
// Reading unsigned ints...
unsigned int d = data[i];
// and writing them to auxillary array
if(d<pivot)
{
data2[x--]=d;
}
else
if(d>pivot)
data2[y++]=d;
}
return;
}
/**
* Part Three - Write the pivot value
* @param data Destination for pivot
* @param params Specifies which data each thread block is responsible for
* @param hist The cumulative sum for each thread is stored here
* @param lengths The total sum for each thread block is stored here
*/
//template <typename unsigned int>
__global__ void part3(unsigned int* data, struct Params<unsigned int>* params, struct Hist* hist, Length<unsigned int>* lengths)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
// If we are the "last" thread block that is assigned to the same data sequence
// we write the pivot between the left and right block
if(params[bx].last)
{
// Get destination position
unsigned int x = lengths->left[bx] + hist->left[bx*THREADS+THREADS-1] + tx;
unsigned int y = lengths->right[bx] - hist->right[bx*THREADS+THREADS-1];
unsigned int pivot = params[bx].pivot;
// Write the pivot values
for(;x<y;x+=THREADS)
data[x]=pivot;
}
}
/**
* The local quicksort - sorts a block of data with no inter-block synchronization
* @param adata Contains some of the blocks to be sorted and also acts as the final
* destination for sorted data
* @param adata2 Contains some of the blocks to be sorted
* @param bs List of blocks to be sorted and a pointer telling if a specific block is
* in \a adata or \a adata2
*/
//template <typename unsigned int>
__global__ void lqsort(unsigned int* adata, unsigned int* adata2, struct LQSortParams* bs, unsigned int phase)
{
__shared__ unsigned int lphase;
lphase=phase;
// Shorthand for the threadid
int tx = threadIdx.x;
// Stack pointer
__shared__ int bi;
// Stack unsigned ints
__shared__ unsigned int beg[32];
__shared__ unsigned int end[32];
__shared__ bool flip[32];
unsigned int* lblock = (unsigned int*)sarray;
unsigned int* rblock = (unsigned int*)(&lblock[(blockDim.x+1)]);
// The current pivot
__shared__ unsigned int pivot;
// The sequence to be sorted
__shared__ unsigned int from;
__shared__ unsigned int to;
// Since we switch between the primary and the auxillary buffer,
// these variables are required to keep track on which role
// a buffer currently has
__shared__ unsigned int* data;
__shared__ unsigned int* data2;
__shared__ unsigned int sbsize;
__shared__ unsigned int bx;
if(threadIdx.x==0)
#ifdef HASATOMICS
bx = atomicInc(&ohtotal,50000);
#else
bx = blockIdx.x;
#endif
__syncthreads();
while(bx<gridDim.x)
{
// Thread 0 is in charge of the stack operations
if(tx==0)
{
// We push our first block on the stack
// This is the block given by the bs parameter
beg[0] = bs[bx].beg;
end[0] = bs[bx].end;
flip[0] = bs[bx].flip;
sbsize = bs[bx].sbsize;
bi = 0;
}
__syncthreads();
// If we were given an empty block there is no need to continue
if(end[0]==beg[0])
return;
// While there are items left on the stack to sort
while(bi>=0)
{
__syncthreads();
// Thread 0 pops a fresh sequence from the stack
if(tx==0)
{
from = beg[bi];
to = end[bi];
// Check which buffer the sequence is in
if(!flip[bi])
{
data = adata2;
data2 = adata;
}
else
{
data = adata;
data2 = adata2;
}
}
__syncthreads();
// If the sequence is smaller than SBSIZE we sort it using
// an alternative sort. Otherwise each thread would sort just one
// or two unsigned ints and that wouldn't be efficient
if((to-from)<(sbsize-16))
{
// Sort it using bitonic sort. This could be changed to some other
// sorting method. Store the result in the final destination buffer
if((to-from>=1)&&(lphase!=2))
bitonicSort(data,adata,from,to-from);
__syncthreads();
// Decrement the stack pointer
if(tx==0)
bi--;
__syncthreads();
// and continue with the next sequence
continue;
}
if(tx==0)
{
// Create a new pivot for the sequence
// Try to optimize this for your input distribution
// if you have some information about it
unsigned int mip = min(min(data[from],data[to-1]),data[(from+to)/2]);
unsigned int map = max(max(data[from],data[to-1]),data[(from+to)/2]);
pivot = min(max(mip/2+map/2,mip),map);
}
unsigned int ll=0;
unsigned int lr=0;
__syncthreads();
unsigned int coal = (from)&0xf;
if(tx+from-coal<to)
{
unsigned int d = data[tx+from-coal];
if(!(tx<coal))
{
// Counting unsigned ints that have a higher value than the pivot
if(d<pivot)
ll++;
else
// or a lower
if(d>pivot)
lr++;
}
}
// Go through the current sequence
for(int i=from+tx+THREADS-coal;i<to;i+=THREADS)
{
unsigned int d = data[i];
// Counting unsigned ints that have a higher value than the pivot
if(d<pivot)
ll++;
else
// or a lower
if(d>pivot)
lr++;
}
// Store the result in a shared array so that we can calculate a
// cumulative sum
lblock[tx]=ll;
rblock[tx]=lr;
__syncthreads();
// Calculate the cumulative sum
cumcount((unsigned int*)lblock,(unsigned int*)rblock);
__syncthreads();
// Let thread 0 add the new resulting subsequences to the stack
if(tx==0)
{
// The sequences are in the other buffer now
flip[bi+1] = !flip[bi];
flip[bi] = !flip[bi];
// We need to place the smallest object on top of the stack
// to ensure that we don't run out of stack space
if(lblock[THREADS]<rblock[THREADS])
{
beg[bi+1]=beg[bi];
beg[bi]=to-rblock[THREADS];
end[bi+1]=from+lblock[THREADS];
}
else
{
end[bi+1]=end[bi];
end[bi]=from+lblock[THREADS];
beg[bi+1]=to-rblock[THREADS];
}
// Increment the stack pointer
bi++;
}
__syncthreads();
unsigned int x = from+lblock[tx+1]-1;
unsigned int y = to-rblock[tx+1];
coal = from&0xf;
if(tx+from-coal<to)
{
unsigned int d = data[tx+from-coal];
if(!(tx<coal))
{
if(d<pivot)
data2[x--] = d;
else
if(d>pivot)
data2[y++] = d;
}
}
// Go through the data once again
// writing it to its correct position
for(unsigned int i=from+tx+THREADS-coal;i<to;i+=THREADS)
{
unsigned int d = data[i];
if(d<pivot)
data2[x--] = d;
else
if(d>pivot)
data2[y++] = d;
}
__syncthreads();
// As a final step, write the pivot value between the right and left
// subsequence. Write it to the final destination since this pivot
// is always correctly sorted
for(unsigned int i=from+lblock[THREADS]+tx;i<to-rblock[THREADS];i+=THREADS)
{
adata[i]=pivot;
}
__syncthreads();
}
#ifdef HASATOMICS
if(threadIdx.x==0)
bx = atomicInc(&ohtotal,50000);
__syncthreads();
#else
break;
#endif
}
__syncthreads();
}
|
6feaadbef274169413966549ab30cc1535b053ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight)
{
THCTensor_(resizeAs)(state, output, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
scalar_t *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, PReLUUpdateOutput<scalar_t>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluForward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
scalar_t *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<scalar_t>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
accreal scale_)
{
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<scalar_t>());
// introduces a sync point
scalar_t sum = ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, gradInput));
scalar_t w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<scalar_t>(scale));
}
else
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, PReLUAccGradParameters<scalar_t>(scale));
THCTensor *gradWeightBuf = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 0);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *sumbuf = THCTensor_(new)(state);
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size(d);
}
THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 0);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 0);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sumbuf);
}
THCTensor_(free)(state, gradWeightBuf);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
}
}
#endif
| 6feaadbef274169413966549ab30cc1535b053ab.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight)
{
THCTensor_(resizeAs)(state, output, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
scalar_t *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, PReLUUpdateOutput<scalar_t>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
preluForward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
scalar_t *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<scalar_t>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
preluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
accreal scale_)
{
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<scalar_t>());
// introduces a sync point
scalar_t sum = ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, gradInput));
scalar_t w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<scalar_t>(scale));
}
else
{
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, PReLUAccGradParameters<scalar_t>(scale));
THCTensor *gradWeightBuf = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 0);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *sumbuf = THCTensor_(new)(state);
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size(d);
}
THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 0);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 0);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sumbuf);
}
THCTensor_(free)(state, gradWeightBuf);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
}
}
#endif
|
3ffe5448310d5f92021807ef7e2a04723d130b3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "../shape/head.h"
}
__global__ void clrvect_krnl(struct dat_t *ddat, int s, int f, int nThreads) {
/* multi-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < nThreads){
switch (ddat->set[s].type) {
case DELAY:
ddat->set[s].desc.deldop.frame[f].fit_s[offset] = 0.0;
break;
case DOPPLER:
ddat->set[s].desc.doppler.frame[f].fit_s[offset] = 0.0;
break;
}
}
}
__global__ void clrvect_af_krnl(struct dat_t *ddat, int s, int nframes,
int nThreads, int frame_size) {
/* multi-threaded kernel for all frames in a set */
int total_offset = blockIdx.x * blockDim.x + threadIdx.x;
int frm = total_offset / frame_size;
int offset = total_offset % frame_size;
if ((offset < nThreads) && (frm < nframes)) {
switch (ddat->set[s].type) {
case DELAY:
ddat->set[s].desc.deldop.frame[frm].fit_s[offset] = 0.0;
break;
case DOPPLER:
ddat->set[s].desc.doppler.frame[frm].fit_s[offset] = 0.0;
break;
}
}
}
void cotrans_cuda(double y[3], double a[3][3], double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
void mmmul_cuda( double *x, double y[3][3], double *z)
{
double t[3][3];
int i, j, k;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++) {
t[i][j] = 0.0;
for (k=0;k<=2;k++)
t[i][j] += y[i][k]*z[k*3+j];
}
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
x[i*3+j] = t[i][j];
}
__device__ void dev_mmmul( double x[3][3], double y[3][3], double z[3][3])
{
double t[3][3];
int i, j, k;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++) {
t[i][j] = 0.0;
for (k=0;k<=2;k++)
t[i][j] += y[i][k]*z[k][j];
}
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
x[i][j] = t[i][j];
}
__device__ void dev_mmmul2(double3 *x, double y[3][3], double3 *z, int frm)
{ /* This version turns the original double x[3][3] and double z[3][3] into
* double3 pointers with nframes entries. Selection is made via f */
double t[3][3];
int i, f;
f = 3*frm;
for (i=0; i<=2; i++) {
t[i][0] = 0.0;
t[i][0] += y[i][0] * z[f+0].x;
t[i][0] += y[i][1] * z[f+1].x;
t[i][0] += y[i][2] * z[f+2].x;
t[i][1] = 0.0;
t[i][1] += y[i][0] * z[f+0].y;
t[i][1] += y[i][1] * z[f+1].y;
t[i][1] += y[i][2] * z[f+2].y;
t[i][2] = 0.0;
t[i][2] += y[i][0] * z[f+0].z;
t[i][2] += y[i][1] * z[f+1].z;
t[i][2] += y[i][2] * z[f+2].z;
}
for (i=0; i<=2; i++) {
x[f+i].x = t[i][0];
x[f+i].y = t[i][1];
x[f+i].z = t[i][2];
}
}
__device__ int dev_vp_iround(double x)
{
if (x < 0.0)
return ((int)(x - 0.5));
else
return ((int)(x + 0.5));
}
void mtrnsps_cuda( double *a, double b[3][3])
{
double t[3][3];
int i, j;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
a[i*3+j] = t[i][j];
//a[i][j] = t[i][j];
}
void checkErrorAfterKernelLaunch(char *location) {
hipError_t cudaStatus;
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed in %s: %s\n", location, hipGetErrorString(cudaStatus));
}
}
void deviceSyncAfterKernelLaunch(char *location) {
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipError_t cudaStatus;
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching the kernel in %s.\n", cudaStatus, location);
}
__device__ double dev_dot( double x[3], double y[3])
{
return x[0]*y[0]+x[1]*y[1]+x[2]*y[2];
}
__device__ double dev_dot2( double x[3], double3 *y)
{
/* This version replaces double y[3] with a double3 *y */
return x[0]*y->x+x[1]*y->y+x[2]*y->z;
}
__device__ double dev_normalize(double *u)
{
int i;
double norm;
norm = 0.0;
for (i=0; i<=2; i++)
norm += u[i]*u[i];
norm = sqrt(norm);
if (norm != 0.0) {
for (i=0; i<=2; i++)
u[i] /= norm;
}
return norm;
}
__device__ void dev_cotrans1( double y[3], double *a, double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[3*j+i]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[3*i+j]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
__device__ void dev_cotrans2( double y[3], double a[3][3], double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
__device__ void dev_cotrans3(double y[3], double a[3][3], double x[3],
int dir) {
double t[3];
int i, j;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
for (j = 0; j <= 2; j++)
t[i] += a[i][j] * x[j];
}
if (dir == (-1))
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
for (j = 0; j <= 2; j++)
t[i] += a[j][i] * x[j];
}
for (i = 0; i <= 2; i++)
y[i] = t[i];
}
__device__ void dev_cotrans4(float3 *y, double a[3][3], double x[3], int dir, int f)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
y[f].x = t[0];
y[f].y = t[1];
y[f].z = t[2];
}
__device__ void dev_cotrans5(double3 *y, double a[3][3], double3 *x,
int dir) {
/* This version replaces double y[3] and double x[3] with double3 y and double3 x */
double t[3];
int i;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[i][0] * x->x;
t[i] += a[i][1] * x->y;
t[i] += a[i][2] * x->z;
}
if (dir == (-1))
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[0][i] * x->x;
t[i] += a[1][i] * x->y;
t[i] += a[2][i] * x->z;
}
y->x = t[0];
y->y = t[1];
y->z = t[2];
}
__device__ void dev_cotrans6(double y[3], double3 *a, double x[3], int dir, int frm) {
/* This version replaces double a[3][3] with a double3 pointers of lenght
* nframes, selected with 'f' */
double t[3];
int i, j, f;
f = frm*3;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[f+i].x * x[0];
t[i] += a[f+i].y * x[1];
t[i] += a[f+i].z * x[2];
}
if (dir == (-1)) {
t[0] = 0.0;
for (j=0; j<=2; j++) {
t[0] += a[f+j].x * x[j];
t[0] += a[f+j].x * x[j];
t[0] += a[f+j].x * x[j];
}
t[1] = 0.0;
for (j=0; j<=2; j++) {
t[1] += a[f+j].y * x[j];
t[1] += a[f+j].y * x[j];
t[1] += a[f+j].y * x[j];
}
t[2] = 0.0;
for (j=0; j<=2; j++) {
t[2] += a[f+j].z * x[j];
t[2] += a[f+j].z * x[j];
t[2] += a[f+j].z * x[j];
}
}
for (i = 0; i <= 2; i++)
y[i] = t[i];
}
__device__ void dev_mtrnsps( double a[3][3], double b[3][3])
{
double t[3][3];
int i, j;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
a[i][j] = t[i][j];
}
__device__ void dev_mtrnsps2(double3 *a, double b[3][3], int frm)
{ /* This version splits the double a[3][3] of the original function into
* three separate double3 vector variables. b[3][3] remains unchanged. */
double t[3][3];
int i, j, f;
f = frm *3;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
a[f+0].x = t[0][0];
a[f+0].y = t[0][1];
a[f+0].z = t[0][2];
a[f+1].x = t[1][0];
a[f+1].y = t[1][1];
a[f+1].z = t[1][2];
a[f+2].x = t[2][0];
a[f+2].y = t[2][1];
a[f+2].z = t[2][2];
}
__device__ double radlaw_cuda(union radscat_t *radar, unsigned char *radtype,
int ilaw, double cosinc, int c, int f)
{
int irdl;
double tan2inc, sin2inc, hagforsarg, incidence, rho1, rho2, angres, angratio;
double diffxsec_proj = -9.99; /* dummy value */
switch (radtype[ilaw]) {
case COSINELAW_DIFF:
diffxsec_proj = radar[ilaw].RC.R.val*(radar[ilaw].RC.C.val + 1)
* pow( cosinc, 2*radar[ilaw].RC.C.val - 1);
break;
case TABULARLAW:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
incidence = acos(cosinc);
angres = (PIE/2) / (radar[ilaw].tabular.n - 1);
angratio = incidence/angres;
irdl = (int) floor(angratio);
rho1 = radar[ilaw].tabular.rho[irdl].val;
rho2 = radar[ilaw].tabular.rho[irdl+1].val;
diffxsec_proj = (rho1 + (angratio - irdl)*(rho2 - rho1)) / cosinc;
}
break;
case GAUSSIANLAW:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj = radar[ilaw].quasispec.R.val*radar[ilaw].quasispec.C.val
* exp( -radar[ilaw].quasispec.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
} else {
diffxsec_proj = 0.0;
}
break;
case HAGFORSLAW:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + radar[ilaw].quasispec.C.val*sin2inc;
diffxsec_proj = 0.5*radar[ilaw].quasispec.R.val*radar[ilaw].quasispec.C.val
* pow( hagforsarg, -1.5) / cosinc;
} else {
diffxsec_proj = 0.0;
}
break;
case COSINELAW_QS:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff)
diffxsec_proj = radar[ilaw].quasispec.R.val*(radar[ilaw].quasispec.C.val + 1)
* pow( cosinc, 2*radar[ilaw].quasispec.C.val - 1);
else
diffxsec_proj = 0.0;
break;
case GAUSSIAN_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj += radar[ilaw].hybrid.qs.R.val*radar[ilaw].hybrid.qs.C.val
* exp( -radar[ilaw].hybrid.qs.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
}
break;
case HAGFORS_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + radar[ilaw].hybrid.qs.C.val*sin2inc;
diffxsec_proj += 0.5*radar[ilaw].hybrid.qs.R.val*radar[ilaw].hybrid.qs.C.val
* pow( hagforsarg, -1.5) / cosinc;
}
break;
case COSINE_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff)
diffxsec_proj += radar[ilaw].hybrid.qs.R.val*(radar[ilaw].hybrid.qs.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.qs.C.val - 1);
break;
case HARMCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = radar[ilaw].harmcosine.local[c][f].R.val
* (radar[ilaw].harmcosine.local[c][f].C.val + 1)
* pow( cosinc, 2*radar[ilaw].harmcosine.local[c][f].C.val - 1);
}
break;
case INHOCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = radar[ilaw].inhocosine.local[c][f].R.val
* (radar[ilaw].inhocosine.local[c][f].C.val + 1)
* pow( cosinc, 2*radar[ilaw].inhocosine.local[c][f].C.val - 1);
}
break;
case NOLAW:
printf("\n\npos2doppler-cuda.cu: can't set radar scattering law.\n\n");
default:
printf("\n\npos2doppler-cuda.cu: Unspecified error.\n\n");
}
return diffxsec_proj;
}
__device__ double dev_radlaw( struct photo_t *photo, int ilaw, double cosinc, int c, int f)
{
int i;
double tan2inc, sin2inc, hagforsarg, incidence, rho1, rho2, angres, angratio;
double diffxsec_proj = -9.99; /* dummy value */
switch (photo->radtype[ilaw]) {
case COSINELAW_DIFF:
diffxsec_proj = photo->radar[ilaw].RC.R.val*(photo->radar[ilaw].RC.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].RC.C.val - 1);
break;
case TABULARLAW:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
incidence = acos(cosinc);
angres = (PIE/2) / (photo->radar[ilaw].tabular.n - 1);
angratio = incidence/angres;
i = (int) floor(angratio);
rho1 = photo->radar[ilaw].tabular.rho[i].val;
rho2 = photo->radar[ilaw].tabular.rho[i+1].val;
diffxsec_proj = (rho1 + (angratio - i)*(rho2 - rho1)) / cosinc;
}
break;
case GAUSSIANLAW:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj = photo->radar[ilaw].quasispec.R.val*photo->radar[ilaw].quasispec.C.val
* exp( -photo->radar[ilaw].quasispec.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
} else {
diffxsec_proj = 0.0;
}
break;
case HAGFORSLAW:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + photo->radar[ilaw].quasispec.C.val*sin2inc;
diffxsec_proj = 0.5*photo->radar[ilaw].quasispec.R.val*photo->radar[ilaw].quasispec.C.val
* pow( hagforsarg, -1.5) / cosinc;
} else {
diffxsec_proj = 0.0;
}
break;
case COSINELAW_QS:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff)
diffxsec_proj = photo->radar[ilaw].quasispec.R.val*(photo->radar[ilaw].quasispec.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].quasispec.C.val - 1);
else
diffxsec_proj = 0.0;
break;
case GAUSSIAN_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj += photo->radar[ilaw].hybrid.qs.R.val*photo->radar[ilaw].hybrid.qs.C.val
* exp( -photo->radar[ilaw].hybrid.qs.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
}
break;
case HAGFORS_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + photo->radar[ilaw].hybrid.qs.C.val*sin2inc;
diffxsec_proj += 0.5*photo->radar[ilaw].hybrid.qs.R.val*photo->radar[ilaw].hybrid.qs.C.val
* pow( hagforsarg, -1.5) / cosinc;
}
break;
case COSINE_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff)
diffxsec_proj += photo->radar[ilaw].hybrid.qs.R.val*(photo->radar[ilaw].hybrid.qs.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.qs.C.val - 1);
break;
case HARMCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = photo->radar[ilaw].harmcosine.local[c][f].R.val
* (photo->radar[ilaw].harmcosine.local[c][f].C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].harmcosine.local[c][f].C.val - 1);
}
break;
case INHOCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = photo->radar[ilaw].inhocosine.local[c][f].R.val
* (photo->radar[ilaw].inhocosine.local[c][f].C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].inhocosine.local[c][f].C.val - 1);
}
break;
case NOLAW:
printf("radlaw.c: can't set radar scattering law = \"none\" when radar data are used\n");
break;
default:
printf("radlaw.c: can't handle that radar scattering law yet\n");
}
return diffxsec_proj;
}
| 3ffe5448310d5f92021807ef7e2a04723d130b3a.cu | extern "C" {
#include "../shape/head.h"
}
__global__ void clrvect_krnl(struct dat_t *ddat, int s, int f, int nThreads) {
/* multi-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < nThreads){
switch (ddat->set[s].type) {
case DELAY:
ddat->set[s].desc.deldop.frame[f].fit_s[offset] = 0.0;
break;
case DOPPLER:
ddat->set[s].desc.doppler.frame[f].fit_s[offset] = 0.0;
break;
}
}
}
__global__ void clrvect_af_krnl(struct dat_t *ddat, int s, int nframes,
int nThreads, int frame_size) {
/* multi-threaded kernel for all frames in a set */
int total_offset = blockIdx.x * blockDim.x + threadIdx.x;
int frm = total_offset / frame_size;
int offset = total_offset % frame_size;
if ((offset < nThreads) && (frm < nframes)) {
switch (ddat->set[s].type) {
case DELAY:
ddat->set[s].desc.deldop.frame[frm].fit_s[offset] = 0.0;
break;
case DOPPLER:
ddat->set[s].desc.doppler.frame[frm].fit_s[offset] = 0.0;
break;
}
}
}
void cotrans_cuda(double y[3], double a[3][3], double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
void mmmul_cuda( double *x, double y[3][3], double *z)
{
double t[3][3];
int i, j, k;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++) {
t[i][j] = 0.0;
for (k=0;k<=2;k++)
t[i][j] += y[i][k]*z[k*3+j];
}
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
x[i*3+j] = t[i][j];
}
__device__ void dev_mmmul( double x[3][3], double y[3][3], double z[3][3])
{
double t[3][3];
int i, j, k;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++) {
t[i][j] = 0.0;
for (k=0;k<=2;k++)
t[i][j] += y[i][k]*z[k][j];
}
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
x[i][j] = t[i][j];
}
__device__ void dev_mmmul2(double3 *x, double y[3][3], double3 *z, int frm)
{ /* This version turns the original double x[3][3] and double z[3][3] into
* double3 pointers with nframes entries. Selection is made via f */
double t[3][3];
int i, f;
f = 3*frm;
for (i=0; i<=2; i++) {
t[i][0] = 0.0;
t[i][0] += y[i][0] * z[f+0].x;
t[i][0] += y[i][1] * z[f+1].x;
t[i][0] += y[i][2] * z[f+2].x;
t[i][1] = 0.0;
t[i][1] += y[i][0] * z[f+0].y;
t[i][1] += y[i][1] * z[f+1].y;
t[i][1] += y[i][2] * z[f+2].y;
t[i][2] = 0.0;
t[i][2] += y[i][0] * z[f+0].z;
t[i][2] += y[i][1] * z[f+1].z;
t[i][2] += y[i][2] * z[f+2].z;
}
for (i=0; i<=2; i++) {
x[f+i].x = t[i][0];
x[f+i].y = t[i][1];
x[f+i].z = t[i][2];
}
}
__device__ int dev_vp_iround(double x)
{
if (x < 0.0)
return ((int)(x - 0.5));
else
return ((int)(x + 0.5));
}
void mtrnsps_cuda( double *a, double b[3][3])
{
double t[3][3];
int i, j;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
a[i*3+j] = t[i][j];
//a[i][j] = t[i][j];
}
void checkErrorAfterKernelLaunch(char *location) {
cudaError_t cudaStatus;
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed in %s: %s\n", location, cudaGetErrorString(cudaStatus));
}
}
void deviceSyncAfterKernelLaunch(char *location) {
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaError_t cudaStatus;
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching the kernel in %s.\n", cudaStatus, location);
}
__device__ double dev_dot( double x[3], double y[3])
{
return x[0]*y[0]+x[1]*y[1]+x[2]*y[2];
}
__device__ double dev_dot2( double x[3], double3 *y)
{
/* This version replaces double y[3] with a double3 *y */
return x[0]*y->x+x[1]*y->y+x[2]*y->z;
}
__device__ double dev_normalize(double *u)
{
int i;
double norm;
norm = 0.0;
for (i=0; i<=2; i++)
norm += u[i]*u[i];
norm = sqrt(norm);
if (norm != 0.0) {
for (i=0; i<=2; i++)
u[i] /= norm;
}
return norm;
}
__device__ void dev_cotrans1( double y[3], double *a, double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[3*j+i]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[3*i+j]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
__device__ void dev_cotrans2( double y[3], double a[3][3], double x[3], int dir)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
for (i=0;i<=2;i++)
y[i] = t[i];
}
__device__ void dev_cotrans3(double y[3], double a[3][3], double x[3],
int dir) {
double t[3];
int i, j;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
for (j = 0; j <= 2; j++)
t[i] += a[i][j] * x[j];
}
if (dir == (-1))
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
for (j = 0; j <= 2; j++)
t[i] += a[j][i] * x[j];
}
for (i = 0; i <= 2; i++)
y[i] = t[i];
}
__device__ void dev_cotrans4(float3 *y, double a[3][3], double x[3], int dir, int f)
{
double t[3];
int i, j;
if (dir==1)
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[i][j]*x[j];
}
if (dir==(-1))
for (i=0;i<=2;i++) {
t[i] = 0.0;
for (j=0;j<=2;j++)
t[i] += a[j][i]*x[j];
}
y[f].x = t[0];
y[f].y = t[1];
y[f].z = t[2];
}
__device__ void dev_cotrans5(double3 *y, double a[3][3], double3 *x,
int dir) {
/* This version replaces double y[3] and double x[3] with double3 y and double3 x */
double t[3];
int i;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[i][0] * x->x;
t[i] += a[i][1] * x->y;
t[i] += a[i][2] * x->z;
}
if (dir == (-1))
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[0][i] * x->x;
t[i] += a[1][i] * x->y;
t[i] += a[2][i] * x->z;
}
y->x = t[0];
y->y = t[1];
y->z = t[2];
}
__device__ void dev_cotrans6(double y[3], double3 *a, double x[3], int dir, int frm) {
/* This version replaces double a[3][3] with a double3 pointers of lenght
* nframes, selected with 'f' */
double t[3];
int i, j, f;
f = frm*3;
if (dir == 1)
for (i = 0; i <= 2; i++) {
t[i] = 0.0;
t[i] += a[f+i].x * x[0];
t[i] += a[f+i].y * x[1];
t[i] += a[f+i].z * x[2];
}
if (dir == (-1)) {
t[0] = 0.0;
for (j=0; j<=2; j++) {
t[0] += a[f+j].x * x[j];
t[0] += a[f+j].x * x[j];
t[0] += a[f+j].x * x[j];
}
t[1] = 0.0;
for (j=0; j<=2; j++) {
t[1] += a[f+j].y * x[j];
t[1] += a[f+j].y * x[j];
t[1] += a[f+j].y * x[j];
}
t[2] = 0.0;
for (j=0; j<=2; j++) {
t[2] += a[f+j].z * x[j];
t[2] += a[f+j].z * x[j];
t[2] += a[f+j].z * x[j];
}
}
for (i = 0; i <= 2; i++)
y[i] = t[i];
}
__device__ void dev_mtrnsps( double a[3][3], double b[3][3])
{
double t[3][3];
int i, j;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
a[i][j] = t[i][j];
}
__device__ void dev_mtrnsps2(double3 *a, double b[3][3], int frm)
{ /* This version splits the double a[3][3] of the original function into
* three separate double3 vector variables. b[3][3] remains unchanged. */
double t[3][3];
int i, j, f;
f = frm *3;
for (i=0;i<=2;i++)
for (j=0;j<=2;j++)
t[i][j] = b[j][i];
a[f+0].x = t[0][0];
a[f+0].y = t[0][1];
a[f+0].z = t[0][2];
a[f+1].x = t[1][0];
a[f+1].y = t[1][1];
a[f+1].z = t[1][2];
a[f+2].x = t[2][0];
a[f+2].y = t[2][1];
a[f+2].z = t[2][2];
}
__device__ double radlaw_cuda(union radscat_t *radar, unsigned char *radtype,
int ilaw, double cosinc, int c, int f)
{
int irdl;
double tan2inc, sin2inc, hagforsarg, incidence, rho1, rho2, angres, angratio;
double diffxsec_proj = -9.99; /* dummy value */
switch (radtype[ilaw]) {
case COSINELAW_DIFF:
diffxsec_proj = radar[ilaw].RC.R.val*(radar[ilaw].RC.C.val + 1)
* pow( cosinc, 2*radar[ilaw].RC.C.val - 1);
break;
case TABULARLAW:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
incidence = acos(cosinc);
angres = (PIE/2) / (radar[ilaw].tabular.n - 1);
angratio = incidence/angres;
irdl = (int) floor(angratio);
rho1 = radar[ilaw].tabular.rho[irdl].val;
rho2 = radar[ilaw].tabular.rho[irdl+1].val;
diffxsec_proj = (rho1 + (angratio - irdl)*(rho2 - rho1)) / cosinc;
}
break;
case GAUSSIANLAW:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj = radar[ilaw].quasispec.R.val*radar[ilaw].quasispec.C.val
* exp( -radar[ilaw].quasispec.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
} else {
diffxsec_proj = 0.0;
}
break;
case HAGFORSLAW:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + radar[ilaw].quasispec.C.val*sin2inc;
diffxsec_proj = 0.5*radar[ilaw].quasispec.R.val*radar[ilaw].quasispec.C.val
* pow( hagforsarg, -1.5) / cosinc;
} else {
diffxsec_proj = 0.0;
}
break;
case COSINELAW_QS:
if (cosinc >= radar[ilaw].quasispec.cos_cutoff)
diffxsec_proj = radar[ilaw].quasispec.R.val*(radar[ilaw].quasispec.C.val + 1)
* pow( cosinc, 2*radar[ilaw].quasispec.C.val - 1);
else
diffxsec_proj = 0.0;
break;
case GAUSSIAN_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj += radar[ilaw].hybrid.qs.R.val*radar[ilaw].hybrid.qs.C.val
* exp( -radar[ilaw].hybrid.qs.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
}
break;
case HAGFORS_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + radar[ilaw].hybrid.qs.C.val*sin2inc;
diffxsec_proj += 0.5*radar[ilaw].hybrid.qs.R.val*radar[ilaw].hybrid.qs.C.val
* pow( hagforsarg, -1.5) / cosinc;
}
break;
case COSINE_COSINE:
diffxsec_proj = radar[ilaw].hybrid.diff.R.val*(radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= radar[ilaw].hybrid.qs.cos_cutoff)
diffxsec_proj += radar[ilaw].hybrid.qs.R.val*(radar[ilaw].hybrid.qs.C.val + 1)
* pow( cosinc, 2*radar[ilaw].hybrid.qs.C.val - 1);
break;
case HARMCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = radar[ilaw].harmcosine.local[c][f].R.val
* (radar[ilaw].harmcosine.local[c][f].C.val + 1)
* pow( cosinc, 2*radar[ilaw].harmcosine.local[c][f].C.val - 1);
}
break;
case INHOCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = radar[ilaw].inhocosine.local[c][f].R.val
* (radar[ilaw].inhocosine.local[c][f].C.val + 1)
* pow( cosinc, 2*radar[ilaw].inhocosine.local[c][f].C.val - 1);
}
break;
case NOLAW:
printf("\n\npos2doppler-cuda.cu: can't set radar scattering law.\n\n");
default:
printf("\n\npos2doppler-cuda.cu: Unspecified error.\n\n");
}
return diffxsec_proj;
}
__device__ double dev_radlaw( struct photo_t *photo, int ilaw, double cosinc, int c, int f)
{
int i;
double tan2inc, sin2inc, hagforsarg, incidence, rho1, rho2, angres, angratio;
double diffxsec_proj = -9.99; /* dummy value */
switch (photo->radtype[ilaw]) {
case COSINELAW_DIFF:
diffxsec_proj = photo->radar[ilaw].RC.R.val*(photo->radar[ilaw].RC.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].RC.C.val - 1);
break;
case TABULARLAW:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
incidence = acos(cosinc);
angres = (PIE/2) / (photo->radar[ilaw].tabular.n - 1);
angratio = incidence/angres;
i = (int) floor(angratio);
rho1 = photo->radar[ilaw].tabular.rho[i].val;
rho2 = photo->radar[ilaw].tabular.rho[i+1].val;
diffxsec_proj = (rho1 + (angratio - i)*(rho2 - rho1)) / cosinc;
}
break;
case GAUSSIANLAW:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj = photo->radar[ilaw].quasispec.R.val*photo->radar[ilaw].quasispec.C.val
* exp( -photo->radar[ilaw].quasispec.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
} else {
diffxsec_proj = 0.0;
}
break;
case HAGFORSLAW:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + photo->radar[ilaw].quasispec.C.val*sin2inc;
diffxsec_proj = 0.5*photo->radar[ilaw].quasispec.R.val*photo->radar[ilaw].quasispec.C.val
* pow( hagforsarg, -1.5) / cosinc;
} else {
diffxsec_proj = 0.0;
}
break;
case COSINELAW_QS:
if (cosinc >= photo->radar[ilaw].quasispec.cos_cutoff)
diffxsec_proj = photo->radar[ilaw].quasispec.R.val*(photo->radar[ilaw].quasispec.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].quasispec.C.val - 1);
else
diffxsec_proj = 0.0;
break;
case GAUSSIAN_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff) {
tan2inc = 1/(cosinc*cosinc) - 1;
diffxsec_proj += photo->radar[ilaw].hybrid.qs.R.val*photo->radar[ilaw].hybrid.qs.C.val
* exp( -photo->radar[ilaw].hybrid.qs.C.val * tan2inc)
/ (cosinc*cosinc*cosinc*cosinc*cosinc);
}
break;
case HAGFORS_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff) {
sin2inc = 1 - cosinc*cosinc;
hagforsarg = cosinc*cosinc*cosinc*cosinc + photo->radar[ilaw].hybrid.qs.C.val*sin2inc;
diffxsec_proj += 0.5*photo->radar[ilaw].hybrid.qs.R.val*photo->radar[ilaw].hybrid.qs.C.val
* pow( hagforsarg, -1.5) / cosinc;
}
break;
case COSINE_COSINE:
diffxsec_proj = photo->radar[ilaw].hybrid.diff.R.val*(photo->radar[ilaw].hybrid.diff.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.diff.C.val - 1);
if (cosinc >= photo->radar[ilaw].hybrid.qs.cos_cutoff)
diffxsec_proj += photo->radar[ilaw].hybrid.qs.R.val*(photo->radar[ilaw].hybrid.qs.C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].hybrid.qs.C.val - 1);
break;
case HARMCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = photo->radar[ilaw].harmcosine.local[c][f].R.val
* (photo->radar[ilaw].harmcosine.local[c][f].C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].harmcosine.local[c][f].C.val - 1);
}
break;
case INHOCOSINE_DIFF:
if (f < 0) {
diffxsec_proj = 0.0; /* blank sky */
} else {
diffxsec_proj = photo->radar[ilaw].inhocosine.local[c][f].R.val
* (photo->radar[ilaw].inhocosine.local[c][f].C.val + 1)
* pow( cosinc, 2*photo->radar[ilaw].inhocosine.local[c][f].C.val - 1);
}
break;
case NOLAW:
printf("radlaw.c: can't set radar scattering law = \"none\" when radar data are used\n");
break;
default:
printf("radlaw.c: can't handle that radar scattering law yet\n");
}
return diffxsec_proj;
}
|
4cb7e6a3f9e647d31e0aceb0d8cf4ead52f228b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ void mul(float a, float b, float *res)
{
*res = a * b;
// overflow
*res = (*res) * (1e30f * 1e10f);
}
__global__ void dot_prod(float *x, float *y, int size)
{
float d;
for (int i=0; i < size; ++i)
{
float tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
| 4cb7e6a3f9e647d31e0aceb0d8cf4ead52f228b9.cu |
#include <stdio.h>
__device__ void mul(float a, float b, float *res)
{
*res = a * b;
// overflow
*res = (*res) * (1e30f * 1e10f);
}
__global__ void dot_prod(float *x, float *y, int size)
{
float d;
for (int i=0; i < size; ++i)
{
float tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
61a31a0ce39ddbe40a3e0e9ea08ecf0f001c7395.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdlib.h>
#include "matrixfunctions.h"
#include <cblas.h>
/* ******** NAIVE FUNCTION FOR TESTING PURPOSE *********/
void scalMatrix(double *B,int incB,double *A,int incA,int rows,int cols,double scalar){
memcpy(B,A,rows*cols*sizeof(double));
cblas_dscal(rows*cols,scalar,B,1);
}
void addDiag(double *B,int incB,double *A,int incA,int rows,int cols,double scalar){
memcpy(B,A,rows*cols*sizeof(double));
int i,j;
for(i=0;i<rows;i++)
B[i*incB+i] = scalar+B[i*incA+i];
}
void naiveMatrixAdd(double *C,int incC,double *A,int incA,double *B,int incB,int rows,int cols){
int i,j;
for(i=0;i<rows;i++)
for(j=0;j<cols;j++)
C[i*incC+j] = A[i*incA+j] + B[i*incB+j];
}
void mul(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int XB, int XC,
int YA, int YB, int YC,
double alpha, double beta) {
// hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, XB, YA, XA, &alpha, B, ldb, A, lda, &beta, C, ldc);
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,XA,YA,XB,alpha,A,lda,B,ldb,beta,C,ldc);
}
void add(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int YA,
double alpha, double beta) {
// hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, XA, YA, &alpha, A, lda, &beta, B, ldb, C, ldc);
int i,j;
for(i=0;i<XA;i++)
for(j=0;j<YA;j++)
C[i*ldc+j] = alpha*A[i*lda+j] + beta*B[i*ldb+j];
}
void serialStrassen(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int XB, int XC,
int YA, int YB, int YC,
int depth) {
int XA2 = XA / 2;
int XB2 = XB / 2;
int XC2 = XC / 2;
int YA2 = YA / 2;
int YB2 = YB / 2;
int YC2 = YC / 2;
//double *W_1, *W_2;
int lw1 = (XA2 > XC2 ? XA2 : XC2);
int lw2 = XB2;
//hipMalloc((void **)&W_1, lw1 * YA2 * sizeof(double));
double* W_1 = (double*)malloc(lw1 * YA2 * sizeof(double));
// hipMalloc((void **)&W_2, lw2 * YB2 * sizeof(double));
double* W_2 = (double*)malloc(lw2 * YB2 * sizeof(double));
if( W_1 == NULL ) printf("Error\n");
if( W_2 == NULL ) printf("Error2\n");
int dXA = XA2;
int dYA = YA2 * lda;
int dXB = XB2;
int dYB = YB2 * ldb;
int dXC = XC2;
int dYC = YC2 * ldc;
double *A11, *A12, *A21, *A22;
double *B11, *B12, *B21, *B22;
double *C11, *C12, *C21, *C22;
A11 = A;
A12 = A + dXA;
A21 = A + dYA;
A22 = A + dXA + dYA;
B11 = B;
B12 = B + dXB;
B21 = B + dYB;
B22 = B + dXB + dYB;
C11 = C;
C12 = C + dXC;
C21 = C + dYC;
C22 = C + dXC + dYC;
/* cutoff criteria */
bool stop = false;
#if 0
int cutoff = 2048;
float mm = cutoff / XB2;
float nn = cutoff / YA2;
float kk = cutoff / XA2;
if ((mm + nn + kk) >= 3) {
stop = true;
}
#endif
if (depth <= 1 || stop) {
add( A11, A21, W_1, lda, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A11 - A21
add( B22, B12, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - B12
mul( W_1, W_2, C21, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C21 = W_1 * W_2
add( A21, A22, W_1, lda, lda, lw1, XA2, YA2, 1.0, 1.0); // W_1 = A21 + A22
add( B12, B11, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B12 - B11
mul( W_1, W_2, C22, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C22 = W_1 * W_2
add( W_1, A11, W_1, lw1, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = W_1- A11
add( B22, W_2, W_2, ldb, lw2, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - W_2
mul( W_1, W_2, C11, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C11 = W_1 * W_2
add( A12, W_1, W_1, lda, lw1, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A12 - W_1
mul( W_1, B22, C12, lw1, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C12 = W_1 * B22
add( C22, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C22 + C12
mul( A11, B11, W_1, lda, ldb, lw1, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // W_1= A11 * B11
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1 + C11
add( C11, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C11 + C12
add( C11, C21, C11, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = C11 + C21
add( W_2, B21, W_2, lw2, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = W_2- B21
mul( A22, W_2, C21, lda, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C21 = A22 * W_2
add( C11, C21, C21, ldc, ldc, ldc, XC2, YC2, 1.0, -1.0); // C11 = C11 - C21
add( C11, C22, C22, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C22 = C11 + C22
mul( A12, B21, C11, lda, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C11 = A12 * B21
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1+ C11
}
else {
add( A11, A21, W_1, lda, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A11 - A21
add( B22, B12, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - B12
serialStrassen( W_1, W_2, C21, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( A21, A22, W_1, lda, lda, lw1, XA2, YA2, 1.0, 1.0); // W_1 = A21 + A22
add( B12, B11, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B12 - B11
serialStrassen( W_1, W_2, C22, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, A11, W_1, lw1, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = W_1- A11
add( B22, W_2, W_2, ldb, lw2, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - W_2
serialStrassen( W_1, W_2, C11, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( A12, W_1, W_1, lda, lw1, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A12 - W_1
serialStrassen( W_1, B22, C12, lw1, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( C22, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C22 + C12
serialStrassen( A11, B11, W_1, lda, ldb, lw1, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1 + C11
add( C11, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C11 + C12
add( C11, C21, C11, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = C11 + C21
add( W_2, B21, W_2, lw2, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = W_2- B21
serialStrassen( A22, W_2, C21, lda, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( C11, C21, C21, ldc, ldc, ldc, XC2, YC2, 1.0, -1.0); // C11 = C11 - C21
add( C11, C22, C22, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C22 = C11 + C22
serialStrassen( A12, B21, C11, lda, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1+ C11
}
free(W_1);
free(W_2);
/* dynamic peeling fix-up */
int pxa = XA % 2;
int pya = YA % 2;
int pxb = XB % 2;
int pyb = YB % 2;
int pxc = XC % 2;
int pyc = YC % 2;
int nxa = XA - pxa;
int nya = YA - pya;
int nxb = XB - pxb;
int nyb = YB - pyb;
int nxc = XC - pxc;
int nyc = YC - pyc;
double *a12, *a21;
double *b12, *b21;
double *c12, *c21;
int dxa = nxa;
int dya = nya * lda;
int dxb = nxb;
int dyb = nyb * ldb;
int dxc = nxc;
int dyc = nyc * ldc;
a12 = A + dxa;
a21 = A + dya;
// a22 = A + dxa + dya;
b12 = B + dxb;
b21 = B + dyb;
// b22 = B + dxb + dyb;
c12 = C + dxc;
c21 = C + dyc;
// c22 = C + dxc + dyc;
/*
A11 = nxa x nya
a12 = pxa x nya
a21 = nxa x pya
a22 = pxa x pya
*/
mul( a21, B11, c21, lda, ldb, ldc, nxa, XB, XC, pya, nyb, pyc, 1.0, 0.0);
mul( A11, b12, c12, lda, ldb, ldc, nxa, pxb, pxc, YA, nyb, YC, 1.0, 0.0);
mul( a12, b21, C11, lda, ldb, ldc, pxa, XB, XC, YA, pyb, YC, 1.0, 1.0);
}
void naiveMatrixMul(double* C,int incC,double* A,int incA,double *B,int incB,int m,int k,int n){
double *tmp = (double*)malloc(m*n*sizeof(double));
memset(tmp,0,m*n*sizeof(double));
//serialStrassen(A,B,tmp,incA,incB,m,m,k,m,k,n,n,1);
///serialStrassen(A,B,C,incA,incB,incC,m,k,m,k,n,n,1);
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,m,n,k,1.0,A,incA,B,incB,0.0,tmp,n);
//printf("error strassen\n");
memcpy(C,tmp,m*n*sizeof(double));
free(tmp);
}
void naivePolComp(double *B,int incB,double *A,int incA,int rows,int cols,double *coef,int coefNum){
double *tmpMatrix = (double*)malloc(rows*cols*sizeof(double));
double *tmpMatrix2 = (double*)malloc(rows*cols*sizeof(double));
double *A_2 = (double*)malloc(rows*cols*sizeof(double));
naiveMatrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols) ;
//matrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols); /* A_2 = A*A */
/*
double e=0,normA_2=0;
for(int i =0;i<rows*cols;i++){
e = abs(A_2[i] - tmpMatrix[i])*abs(A_2[i] - tmpMatrix[i]);
normA_2 = A_2[i]*A_2[i];
}
printf("sfalma= %e\n",(double)e/normA_2);
return;
*/
int loopStart;
if( (coefNum % 2) == 0 ) {
/* if polynomial order is even compute the aI + bX */
scalMatrix(B,incB,A,incA,rows,cols,coef[coefNum-1]);
addDiag(B,incB,B,incB,rows,cols,coef[coefNum-2]);
loopStart=coefNum-3;
}else{
/* if polynomial order is odd compute the aI */
memset(tmpMatrix,0,rows*cols*sizeof(double));
addDiag(B,incB,tmpMatrix,cols,rows,cols,coef[coefNum-1]);
loopStart=coefNum-2;
}
for(int i =loopStart;i>=0;i=i-2){
naiveMatrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols) ;
//matrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols); /*B = X_2*B */
scalMatrix(tmpMatrix,cols,A,incA,rows,cols,coef[i]);
addDiag(tmpMatrix,cols,tmpMatrix,cols,rows,cols,coef[i-1]);
naiveMatrixAdd(B,incB,B,incB,tmpMatrix,cols,rows,cols);
}
free(tmpMatrix);
free(A_2);
}
/* **************************************************** */
int main(int argc, char **argv) {
/* this program is called like that ./test -d degree -r rows */
srand(time(0));
double time_spent;
unsigned long start,end;
struct timeval tv1,tv2;
int opt,rows=10,cols=10,deg=2;
extern char *optarg;
while ( (opt=getopt(argc,argv,"r:d:h"))!= EOF) {
switch (opt) {
case 'r': rows=atoi(optarg);
cols=rows;
break;
case 'd': deg = atoi(optarg);
break;
default: break;
}
}
double *x = (double*)malloc(rows*cols*sizeof(double));
double *xout = (double*)malloc(rows*cols*sizeof(double));
double *coef = (double*)malloc(deg*sizeof(double));
double *xtest = (double*)malloc(rows*cols*sizeof(double)); /*****/
int i,j;
for(i=0;i<rows;i++)
{
for(j=0;j<rows;j++)
{
x[i*rows+j] = (double)(rand()%10)/12;
}
}
for(i=0;i<rows*cols;i++) xout[i] = 1;//x1[i];
for(i=0;i<deg;i++)
{
coef[i] = (double)(rand()%10)/12;
}
printf("GPUs polynomial computation...\n");
cublasParallelInit();
gettimeofday(&tv1, NULL);
initializeZero(xout,cols,rows,cols);
matrixPol(xout,cols,x,cols,rows,cols,coef,deg);
gettimeofday(&tv2, NULL);
cublasParallelDestroy();
start = (unsigned long)(tv1.tv_usec + tv1.tv_sec * 1000000);
end = (unsigned long)(tv2.tv_usec + tv2.tv_sec * 1000000);
time_spent=(double)((end - start) / 1000000.0);
clock_t begin2,end2;
double time_spent2;
printf("Done in GPUs\nNaive method computation in CPU...\n");
cublasParallelInit();
begin2 = clock();
naivePolComp(xtest,cols,x,cols,rows,cols,coef,deg); /****/
end2 = clock();
cublasParallelDestroy();
printf("Done in CPU\n");
time_spent2 = (double)(end2-begin2)/CLOCKS_PER_SEC;
printf("Execution time GPUs:%lfs CPU:%lf \n",time_spent,time_spent2);
/*****/
double resDif=0;
double resX =0;
double resT =0;
for(i=0;i<rows;i++)
{
for(j=0;j<rows;j++)
{
resDif += (xout[i]-xtest[i])*(xout[i]-xtest[i]);
resX += xout[i]*xout[i];
resT += xtest[i]*xtest[i];
}
}
printf("||Xgpu-Xcpu||_2 %e\n",(double)sqrt(resDif/resX));
printf("||Xgpu||_2 %e\n",(double)sqrt(resX));
printf("||Xcpu||_@ %e\n",(double)sqrt(resT));
free(xtest); /*****/
free(xout);
free(x);
free(coef);
}
| 61a31a0ce39ddbe40a3e0e9ea08ecf0f001c7395.cu | #include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdlib.h>
#include "matrixfunctions.h"
#include <cblas.h>
/* ******** NAIVE FUNCTION FOR TESTING PURPOSE *********/
void scalMatrix(double *B,int incB,double *A,int incA,int rows,int cols,double scalar){
memcpy(B,A,rows*cols*sizeof(double));
cblas_dscal(rows*cols,scalar,B,1);
}
void addDiag(double *B,int incB,double *A,int incA,int rows,int cols,double scalar){
memcpy(B,A,rows*cols*sizeof(double));
int i,j;
for(i=0;i<rows;i++)
B[i*incB+i] = scalar+B[i*incA+i];
}
void naiveMatrixAdd(double *C,int incC,double *A,int incA,double *B,int incB,int rows,int cols){
int i,j;
for(i=0;i<rows;i++)
for(j=0;j<cols;j++)
C[i*incC+j] = A[i*incA+j] + B[i*incB+j];
}
void mul(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int XB, int XC,
int YA, int YB, int YC,
double alpha, double beta) {
// cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, XB, YA, XA, &alpha, B, ldb, A, lda, &beta, C, ldc);
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,XA,YA,XB,alpha,A,lda,B,ldb,beta,C,ldc);
}
void add(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int YA,
double alpha, double beta) {
// cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, XA, YA, &alpha, A, lda, &beta, B, ldb, C, ldc);
int i,j;
for(i=0;i<XA;i++)
for(j=0;j<YA;j++)
C[i*ldc+j] = alpha*A[i*lda+j] + beta*B[i*ldb+j];
}
void serialStrassen(double *A, double *B, double *C,
int lda, int ldb, int ldc,
int XA, int XB, int XC,
int YA, int YB, int YC,
int depth) {
int XA2 = XA / 2;
int XB2 = XB / 2;
int XC2 = XC / 2;
int YA2 = YA / 2;
int YB2 = YB / 2;
int YC2 = YC / 2;
//double *W_1, *W_2;
int lw1 = (XA2 > XC2 ? XA2 : XC2);
int lw2 = XB2;
//cudaMalloc((void **)&W_1, lw1 * YA2 * sizeof(double));
double* W_1 = (double*)malloc(lw1 * YA2 * sizeof(double));
// cudaMalloc((void **)&W_2, lw2 * YB2 * sizeof(double));
double* W_2 = (double*)malloc(lw2 * YB2 * sizeof(double));
if( W_1 == NULL ) printf("Error\n");
if( W_2 == NULL ) printf("Error2\n");
int dXA = XA2;
int dYA = YA2 * lda;
int dXB = XB2;
int dYB = YB2 * ldb;
int dXC = XC2;
int dYC = YC2 * ldc;
double *A11, *A12, *A21, *A22;
double *B11, *B12, *B21, *B22;
double *C11, *C12, *C21, *C22;
A11 = A;
A12 = A + dXA;
A21 = A + dYA;
A22 = A + dXA + dYA;
B11 = B;
B12 = B + dXB;
B21 = B + dYB;
B22 = B + dXB + dYB;
C11 = C;
C12 = C + dXC;
C21 = C + dYC;
C22 = C + dXC + dYC;
/* cutoff criteria */
bool stop = false;
#if 0
int cutoff = 2048;
float mm = cutoff / XB2;
float nn = cutoff / YA2;
float kk = cutoff / XA2;
if ((mm + nn + kk) >= 3) {
stop = true;
}
#endif
if (depth <= 1 || stop) {
add( A11, A21, W_1, lda, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A11 - A21
add( B22, B12, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - B12
mul( W_1, W_2, C21, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C21 = W_1 * W_2
add( A21, A22, W_1, lda, lda, lw1, XA2, YA2, 1.0, 1.0); // W_1 = A21 + A22
add( B12, B11, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B12 - B11
mul( W_1, W_2, C22, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C22 = W_1 * W_2
add( W_1, A11, W_1, lw1, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = W_1- A11
add( B22, W_2, W_2, ldb, lw2, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - W_2
mul( W_1, W_2, C11, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C11 = W_1 * W_2
add( A12, W_1, W_1, lda, lw1, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A12 - W_1
mul( W_1, B22, C12, lw1, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C12 = W_1 * B22
add( C22, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C22 + C12
mul( A11, B11, W_1, lda, ldb, lw1, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // W_1= A11 * B11
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1 + C11
add( C11, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C11 + C12
add( C11, C21, C11, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = C11 + C21
add( W_2, B21, W_2, lw2, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = W_2- B21
mul( A22, W_2, C21, lda, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C21 = A22 * W_2
add( C11, C21, C21, ldc, ldc, ldc, XC2, YC2, 1.0, -1.0); // C11 = C11 - C21
add( C11, C22, C22, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C22 = C11 + C22
mul( A12, B21, C11, lda, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, 1.0, 0.0); // C11 = A12 * B21
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1+ C11
}
else {
add( A11, A21, W_1, lda, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A11 - A21
add( B22, B12, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - B12
serialStrassen( W_1, W_2, C21, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( A21, A22, W_1, lda, lda, lw1, XA2, YA2, 1.0, 1.0); // W_1 = A21 + A22
add( B12, B11, W_2, ldb, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B12 - B11
serialStrassen( W_1, W_2, C22, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, A11, W_1, lw1, lda, lw1, XA2, YA2, 1.0, -1.0); // W_1 = W_1- A11
add( B22, W_2, W_2, ldb, lw2, lw2, XB2, YB2, 1.0, -1.0); // W_2 = B22 - W_2
serialStrassen( W_1, W_2, C11, lw1, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( A12, W_1, W_1, lda, lw1, lw1, XA2, YA2, 1.0, -1.0); // W_1 = A12 - W_1
serialStrassen( W_1, B22, C12, lw1, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( C22, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C22 + C12
serialStrassen( A11, B11, W_1, lda, ldb, lw1, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1 + C11
add( C11, C12, C12, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C12 = C11 + C12
add( C11, C21, C11, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = C11 + C21
add( W_2, B21, W_2, lw2, ldb, lw2, XB2, YB2, 1.0, -1.0); // W_2 = W_2- B21
serialStrassen( A22, W_2, C21, lda, lw2, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( C11, C21, C21, ldc, ldc, ldc, XC2, YC2, 1.0, -1.0); // C11 = C11 - C21
add( C11, C22, C22, ldc, ldc, ldc, XC2, YC2, 1.0, 1.0); // C22 = C11 + C22
serialStrassen( A12, B21, C11, lda, ldb, ldc, XA2, XB2, XC2, YA2, YB2, YC2, depth - 1);
add( W_1, C11, C11, lw1, ldc, ldc, XC2, YC2, 1.0, 1.0); // C11 = W_1+ C11
}
free(W_1);
free(W_2);
/* dynamic peeling fix-up */
int pxa = XA % 2;
int pya = YA % 2;
int pxb = XB % 2;
int pyb = YB % 2;
int pxc = XC % 2;
int pyc = YC % 2;
int nxa = XA - pxa;
int nya = YA - pya;
int nxb = XB - pxb;
int nyb = YB - pyb;
int nxc = XC - pxc;
int nyc = YC - pyc;
double *a12, *a21;
double *b12, *b21;
double *c12, *c21;
int dxa = nxa;
int dya = nya * lda;
int dxb = nxb;
int dyb = nyb * ldb;
int dxc = nxc;
int dyc = nyc * ldc;
a12 = A + dxa;
a21 = A + dya;
// a22 = A + dxa + dya;
b12 = B + dxb;
b21 = B + dyb;
// b22 = B + dxb + dyb;
c12 = C + dxc;
c21 = C + dyc;
// c22 = C + dxc + dyc;
/*
A11 = nxa x nya
a12 = pxa x nya
a21 = nxa x pya
a22 = pxa x pya
*/
mul( a21, B11, c21, lda, ldb, ldc, nxa, XB, XC, pya, nyb, pyc, 1.0, 0.0);
mul( A11, b12, c12, lda, ldb, ldc, nxa, pxb, pxc, YA, nyb, YC, 1.0, 0.0);
mul( a12, b21, C11, lda, ldb, ldc, pxa, XB, XC, YA, pyb, YC, 1.0, 1.0);
}
void naiveMatrixMul(double* C,int incC,double* A,int incA,double *B,int incB,int m,int k,int n){
double *tmp = (double*)malloc(m*n*sizeof(double));
memset(tmp,0,m*n*sizeof(double));
//serialStrassen(A,B,tmp,incA,incB,m,m,k,m,k,n,n,1);
///serialStrassen(A,B,C,incA,incB,incC,m,k,m,k,n,n,1);
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,m,n,k,1.0,A,incA,B,incB,0.0,tmp,n);
//printf("error strassen\n");
memcpy(C,tmp,m*n*sizeof(double));
free(tmp);
}
void naivePolComp(double *B,int incB,double *A,int incA,int rows,int cols,double *coef,int coefNum){
double *tmpMatrix = (double*)malloc(rows*cols*sizeof(double));
double *tmpMatrix2 = (double*)malloc(rows*cols*sizeof(double));
double *A_2 = (double*)malloc(rows*cols*sizeof(double));
naiveMatrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols) ;
//matrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols); /* A_2 = A*A */
/*
double e=0,normA_2=0;
for(int i =0;i<rows*cols;i++){
e = abs(A_2[i] - tmpMatrix[i])*abs(A_2[i] - tmpMatrix[i]);
normA_2 = A_2[i]*A_2[i];
}
printf("sfalma= %e\n",(double)e/normA_2);
return;
*/
int loopStart;
if( (coefNum % 2) == 0 ) {
/* if polynomial order is even compute the aI + bX */
scalMatrix(B,incB,A,incA,rows,cols,coef[coefNum-1]);
addDiag(B,incB,B,incB,rows,cols,coef[coefNum-2]);
loopStart=coefNum-3;
}else{
/* if polynomial order is odd compute the aI */
memset(tmpMatrix,0,rows*cols*sizeof(double));
addDiag(B,incB,tmpMatrix,cols,rows,cols,coef[coefNum-1]);
loopStart=coefNum-2;
}
for(int i =loopStart;i>=0;i=i-2){
naiveMatrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols) ;
//matrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols); /*B = X_2*B */
scalMatrix(tmpMatrix,cols,A,incA,rows,cols,coef[i]);
addDiag(tmpMatrix,cols,tmpMatrix,cols,rows,cols,coef[i-1]);
naiveMatrixAdd(B,incB,B,incB,tmpMatrix,cols,rows,cols);
}
free(tmpMatrix);
free(A_2);
}
/* **************************************************** */
int main(int argc, char **argv) {
/* this program is called like that ./test -d degree -r rows */
srand(time(0));
double time_spent;
unsigned long start,end;
struct timeval tv1,tv2;
int opt,rows=10,cols=10,deg=2;
extern char *optarg;
while ( (opt=getopt(argc,argv,"r:d:h"))!= EOF) {
switch (opt) {
case 'r': rows=atoi(optarg);
cols=rows;
break;
case 'd': deg = atoi(optarg);
break;
default: break;
}
}
double *x = (double*)malloc(rows*cols*sizeof(double));
double *xout = (double*)malloc(rows*cols*sizeof(double));
double *coef = (double*)malloc(deg*sizeof(double));
double *xtest = (double*)malloc(rows*cols*sizeof(double)); /*****/
int i,j;
for(i=0;i<rows;i++)
{
for(j=0;j<rows;j++)
{
x[i*rows+j] = (double)(rand()%10)/12;
}
}
for(i=0;i<rows*cols;i++) xout[i] = 1;//x1[i];
for(i=0;i<deg;i++)
{
coef[i] = (double)(rand()%10)/12;
}
printf("GPUs polynomial computation...\n");
cublasParallelInit();
gettimeofday(&tv1, NULL);
initializeZero(xout,cols,rows,cols);
matrixPol(xout,cols,x,cols,rows,cols,coef,deg);
gettimeofday(&tv2, NULL);
cublasParallelDestroy();
start = (unsigned long)(tv1.tv_usec + tv1.tv_sec * 1000000);
end = (unsigned long)(tv2.tv_usec + tv2.tv_sec * 1000000);
time_spent=(double)((end - start) / 1000000.0);
clock_t begin2,end2;
double time_spent2;
printf("Done in GPUs\nNaive method computation in CPU...\n");
cublasParallelInit();
begin2 = clock();
naivePolComp(xtest,cols,x,cols,rows,cols,coef,deg); /****/
end2 = clock();
cublasParallelDestroy();
printf("Done in CPU\n");
time_spent2 = (double)(end2-begin2)/CLOCKS_PER_SEC;
printf("Execution time GPUs:%lfs CPU:%lf \n",time_spent,time_spent2);
/*****/
double resDif=0;
double resX =0;
double resT =0;
for(i=0;i<rows;i++)
{
for(j=0;j<rows;j++)
{
resDif += (xout[i]-xtest[i])*(xout[i]-xtest[i]);
resX += xout[i]*xout[i];
resT += xtest[i]*xtest[i];
}
}
printf("||Xgpu-Xcpu||_2 %e\n",(double)sqrt(resDif/resX));
printf("||Xgpu||_2 %e\n",(double)sqrt(resX));
printf("||Xcpu||_@ %e\n",(double)sqrt(resT));
free(xtest); /*****/
free(xout);
free(x);
free(coef);
}
|
d649e67d9f6b283f20a749f1fd91d534d65521a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hipfft.h>
#include <cutil_inline.h>
#include <cudabuffer.h>
#include <defines.h>
void runTest(int x, int y, int z)
{
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
int batch = TEST_BUFFER_SIZE / (x * y * z * sizeof(complexType));
if(batch == 0)
{
printf("Resulting buffer size is too big, test skipped\n");
return;
}
hipfftHandle plan;
CudaBuffer<complexType> idata(x * y * z * batch), odata(x * y * z * batch);
printf("--- (%d, %d, %d), batch %d\n", x, y, z, batch);
float gflop = 5.0e-9 * log2((float)(x * y * z)) * x * y * z * batch;
// prepare plan
int n[3] = {x, y, z};
int rank = 1;
if(y != 1) rank = 2;
if(z != 1) rank = 3;
cufftSafeCall(hipfftPlanMany(&plan, rank, n, NULL, 1, 0, NULL, 1, 0, PLAN_TYPE, batch));
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)odata, HIPFFT_FORWARD));
cutilSafeCall(hipDeviceSynchronize());
// measure out of place time
cutilCheckError(cutStartTimer(timer));
for(int i = 0; i < NUMITER; i++)
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)odata, HIPFFT_FORWARD));
cutilSafeCall(hipDeviceSynchronize());
cutilCheckError(cutStopTimer(timer));
printf("Out-of-place time: %f ms (%f GFLOPS)\n",
cutGetTimerValue(timer) / NUMITER,
gflop / (cutGetTimerValue(timer) / NUMITER / 1000));
cutilCheckError(cutResetTimer(timer));
// measure inplace
cutilCheckError(cutStartTimer(timer));
for(int i = 0; i < NUMITER; i++)
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)idata, HIPFFT_FORWARD));
cutilSafeCall(hipDeviceSynchronize());
cutilCheckError(cutStopTimer(timer));
printf("Inplace time: %f ms (%f GFLOPS)\n",
cutGetTimerValue(timer) / NUMITER,
gflop / (cutGetTimerValue(timer) / NUMITER / 1000));
cutilCheckError( cutDeleteTimer( timer));
hipfftDestroy(plan);
}
int main(int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// 1D
runTest(16, 1, 1);
runTest(1024, 1, 1);
runTest(8192, 1, 1);
// 2D
runTest(16, 16, 1);
runTest(128, 128, 1);
runTest(1024, 1024, 1);
// 3D
runTest(8, 8, 64);
runTest(16, 16, 16);
runTest(16, 16, 128);
runTest(32, 32, 128);
runTest(128, 128, 128);
}
| d649e67d9f6b283f20a749f1fd91d534d65521a9.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cufft.h>
#include <cutil_inline.h>
#include <cudabuffer.h>
#include <defines.h>
void runTest(int x, int y, int z)
{
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
int batch = TEST_BUFFER_SIZE / (x * y * z * sizeof(complexType));
if(batch == 0)
{
printf("Resulting buffer size is too big, test skipped\n");
return;
}
cufftHandle plan;
CudaBuffer<complexType> idata(x * y * z * batch), odata(x * y * z * batch);
printf("--- (%d, %d, %d), batch %d\n", x, y, z, batch);
float gflop = 5.0e-9 * log2((float)(x * y * z)) * x * y * z * batch;
// prepare plan
int n[3] = {x, y, z};
int rank = 1;
if(y != 1) rank = 2;
if(z != 1) rank = 3;
cufftSafeCall(cufftPlanMany(&plan, rank, n, NULL, 1, 0, NULL, 1, 0, PLAN_TYPE, batch));
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)odata, CUFFT_FORWARD));
cutilSafeCall(cudaThreadSynchronize());
// measure out of place time
cutilCheckError(cutStartTimer(timer));
for(int i = 0; i < NUMITER; i++)
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)odata, CUFFT_FORWARD));
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timer));
printf("Out-of-place time: %f ms (%f GFLOPS)\n",
cutGetTimerValue(timer) / NUMITER,
gflop / (cutGetTimerValue(timer) / NUMITER / 1000));
cutilCheckError(cutResetTimer(timer));
// measure inplace
cutilCheckError(cutStartTimer(timer));
for(int i = 0; i < NUMITER; i++)
cufftSafeCall(executePlan(plan, (complexType*)idata, (complexType*)idata, CUFFT_FORWARD));
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timer));
printf("Inplace time: %f ms (%f GFLOPS)\n",
cutGetTimerValue(timer) / NUMITER,
gflop / (cutGetTimerValue(timer) / NUMITER / 1000));
cutilCheckError( cutDeleteTimer( timer));
cufftDestroy(plan);
}
int main(int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// 1D
runTest(16, 1, 1);
runTest(1024, 1, 1);
runTest(8192, 1, 1);
// 2D
runTest(16, 16, 1);
runTest(128, 128, 1);
runTest(1024, 1024, 1);
// 3D
runTest(8, 8, 64);
runTest(16, 16, 16);
runTest(16, 16, 128);
runTest(32, 32, 128);
runTest(128, 128, 128);
}
|
ad568a3abe57807c094f5b0c68a619eadd3835cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <iostream>
#include <thrust/device_vector.h>
static const size_t N = 102400;
__global__ void kernel(const thrust::device_ptr<float> A, const thrust::device_ptr<float> B, thrust::device_ptr<float> C, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < N) {
C[tid] = A[tid] + B[tid];
}
}
int main() {
thrust::device_vector<float> d_A, d_B, d_C;
d_A.resize(N);
d_B.resize(N);
d_C.resize(N);
for (int i = 0; i < N; i++) {
d_A[i] = i;
d_B[i] = 0.5f * i - 2;
}
hipLaunchKernelGGL(( kernel), dim3(ceil(double(N) / 512)), dim3(512), 0, 0, d_A.data(), d_B.data(), d_C.data(), N);
double err = 0;
for (int i = 0; i < N; i++) {
err += (d_A[i] + d_B[i]) - d_C[i];
}
std::cout << "Cum error: " << sqrt(err) << std::endl;
return 0;
}
| ad568a3abe57807c094f5b0c68a619eadd3835cd.cu | #include <cuda.h>
#include <math.h>
#include <iostream>
#include <thrust/device_vector.h>
static const size_t N = 102400;
__global__ void kernel(const thrust::device_ptr<float> A, const thrust::device_ptr<float> B, thrust::device_ptr<float> C, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < N) {
C[tid] = A[tid] + B[tid];
}
}
int main() {
thrust::device_vector<float> d_A, d_B, d_C;
d_A.resize(N);
d_B.resize(N);
d_C.resize(N);
for (int i = 0; i < N; i++) {
d_A[i] = i;
d_B[i] = 0.5f * i - 2;
}
kernel<<<ceil(double(N) / 512), 512>>>(d_A.data(), d_B.data(), d_C.data(), N);
double err = 0;
for (int i = 0; i < N; i++) {
err += (d_A[i] + d_B[i]) - d_C[i];
}
std::cout << "Cum error: " << sqrt(err) << std::endl;
return 0;
}
|
ef0b0005e159bd7f295fc71b8122d159bec2617e.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
namespace soft_dice_space {
template<typename T>
class sum_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<template<typename> class Reduction, typename scalar_t>
__device__ __forceinline__ void reduce_op(
scalar_t* sdata, int blocksize,
const Reduction<scalar_t>& oper) {
int tid = threadIdx.x;
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = oper(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void compute_numer_denor(const int batchsize,
const int sample_size,
const int n_blockxs_sample,
const scalar_t *logits,
const int64_t *labels,
scalar_t *numer,
scalar_t *denor,
const float p) {
/* Tips about shared memory:
* 1. torch will instantiate the template with three types: double, float, half;
* 2. these three types should not share same definitions of shared memory;
* 3. so one method is to use static shared memory with memory size explicitly assigned, and another method is to allocate shared memory with same raw type, such as unsigned char here, and then cast the pointer according to different template types */
// method1: use static sized shared memory
// __shared__ scalar_t sdata[BLOCKSIZE * 2];
// method2: allocate with raw uchar type and then cast in different kernel
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int tstrd = blockDim.x * n_blockxs_sample;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
int n_sample_blocks = n_blockxs_sample * batchsize;
// TODO: exp use different types
const scalar_t one(1.);
for (int i{bid}; i < n_sample_blocks; i += bstrd) {
int sample_start = (i / n_blockxs_sample) * sample_size;
int local_tid = (i % n_blockxs_sample) * blockDim.x + tid;
scalar_t v_numer{0}, v_denor{0};
for (int j{local_tid}; j < sample_size; j += tstrd) {
scalar_t prob = one / (one + exp(-logits[j + sample_start]));
scalar_t lb = static_cast<scalar_t>(labels[j + sample_start]);
v_numer += prob * lb * 2;
v_denor += pow(prob, p) + lb;
}
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
numer[i] = sdata[0];
}
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
denor[i] = sdata[0];
}
}
}
template<typename scalar_t>
__global__ void SoftDiceForward(const int batchsize, const int n_blockxs_sample,
scalar_t *numer,
scalar_t *denor,
scalar_t *losses,
const float smooth) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
const scalar_t one(1.);
for (int i{bid}; i < batchsize; i += bstrd) {
scalar_t v_numer{0}, v_denor{0};
int t_start = i * n_blockxs_sample;
for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) {
v_numer += numer[j + t_start];
v_denor += denor[j + t_start];
}
// reduce numer
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
v_numer = sdata[0];
// reduce denorm
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
v_denor = sdata[0];
if (tid == 0) {
losses[bid] = one - (v_numer + smooth) / (v_denor + smooth);
}
}
}
template<typename scalar_t>
__global__ void reduce_numer_denor(const int batchsize, const int n_blockxs_sample,
scalar_t *numer,
scalar_t *denor,
const float smooth) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
for (int i{bid}; i < batchsize; i += bstrd) {
scalar_t v_numer{0}, v_denor{0};
int t_start = i * n_blockxs_sample;
for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) {
v_numer += numer[j + t_start];
v_denor += denor[j + t_start];
}
// reduce numer
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
numer[t_start] = sdata[0] + smooth;
}
// reduce denorm
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
denor[t_start] = sdata[0] + smooth;
}
}
}
template<typename scalar_t>
__global__ void SoftDiceBackward(const int batchsize, const int sample_size,
const int n_blockxs_sample,
const scalar_t *logits,
const int64_t *labels,
const scalar_t *grad,
const scalar_t *numer,
const scalar_t *denor,
scalar_t *grad_logits,
const float p) {
int tid = threadIdx.x;
int tstrd = blockDim.x * n_blockxs_sample;
int bid = blockIdx.x * blockDim.y + threadIdx.y;
int bstrd = blockDim.y * gridDim.x;
const scalar_t one(1.);
const scalar_t two(2.);
int n_sample_blocks = n_blockxs_sample * batchsize;
for (int i{bid}; i < n_sample_blocks; i += bstrd) {
int sample_idx = i / n_blockxs_sample;
int sample_start = sample_idx * sample_size;
int local_tid = (i % n_blockxs_sample) * blockDim.x + tid;
scalar_t v_numer = numer[sample_idx * n_blockxs_sample];
scalar_t v_denor = denor[sample_idx * n_blockxs_sample];
scalar_t grad_val = grad[sample_idx];
for (int j{local_tid}; j < sample_size; j += tstrd) {
scalar_t prob = one / (one + exp(-logits[j + sample_start]));
int64_t lb = labels[j + sample_start];
scalar_t m = v_numer - two * (prob * static_cast<scalar_t>(lb));
scalar_t n = v_denor - powf(prob, p);
scalar_t g = -pow(prob, p - one) * p * m;
if (lb == 1L) {
g += pow(prob, p) * two * (one - p) + (n * two);
}
g = - (g / powf(powf(prob, p) + n, two)) * prob * (one - prob);
grad_logits[j + sample_start] = grad_val * g;
}
}
}
// cuda forward and backward
at::Tensor SoftDice_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int batchsize = logits.size(0);
const int num_samples = logits.numel();
const int sample_size = num_samples / batchsize;
// parallel method for numer/denor
int blockx1 = 32;
while (blockx1 < sample_size) blockx1 *= 2;
blockx1 = ::max(32, ::min(BLOCKSIZE, blockx1 / 2));
int n_blockxs_sample = ::max(1, sample_size / blockx1);
int blocky1 = ::max(1, BLOCKSIZE / blockx1);
if (blocky1 > batchsize) blocky1 = batchsize;
int gridx1 = batchsize * n_blockxs_sample / blocky1;
gridx1 = ::max(1, ::min(4096, gridx1));
dim3 block1(blockx1, blocky1);
dim3 grid1(gridx1);
// parallel method for loss
int blockx2 = 32;
while (blockx2 < n_blockxs_sample) blockx2 *= 2;
blockx2 = ::max(32, ::min(BLOCKSIZE, blockx2 / 2));
int blocky2 = ::max(1, BLOCKSIZE / blockx2);
int gridx2 = ::min(batchsize / blocky2, 4096);
gridx2 = ::max(1, gridx2);
dim3 block2(blockx2, blocky2);
dim3 grid2(gridx2);
// allocate memory and cuda grid/block
// Note: should use torch::zeros rather than at::zeros, torch::zeros is variable
// and at::zeros is tensor
auto losses = torch::empty({batchsize}, logits.options());
auto numer = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
auto denor = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
if (losses.numel() == 0) {
THCudaCheck(hipGetLastError());
return losses;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "soft dice forward", [&] {
int shm_size = blockx1 * blocky1 * sizeof(scalar_t);
hipLaunchKernelGGL(( compute_numer_denor<scalar_t>), dim3(grid1), dim3(block1), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
p
);
shm_size = blockx2 * blocky2 * sizeof(scalar_t);
hipLaunchKernelGGL(( SoftDiceForward<scalar_t>), dim3(grid2), dim3(block2), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batchsize, n_blockxs_sample,
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
losses.contiguous().data_ptr<scalar_t>(),
smooth
);
});
THCudaCheck(hipGetLastError());
return losses;
}
at::Tensor SoftDice_backward_cuda(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int batchsize = logits.size(0);
const int num_samples = logits.numel();
const int sample_size = num_samples / batchsize;
// parallel settings for numer/denor
int blockx1 = 32;
while (blockx1 < sample_size) blockx1 *= 2;
blockx1 = ::max(32, ::min(BLOCKSIZE, blockx1 / 2));
int n_blockxs_sample = sample_size / blockx1;
int blocky1 = ::max(1, BLOCKSIZE / blockx1);
if (blocky1 > batchsize) blocky1 = batchsize;
int gridx1 = batchsize * n_blockxs_sample / blocky1;
gridx1 = ::max(1, ::min(4096, gridx1));
dim3 block1(blockx1, blocky1);
dim3 grid1(gridx1);
// parallel settings for reduce numer/denor
int blockx2 = 32;
while (blockx2 < n_blockxs_sample) blockx2 *= 2;
blockx2 = ::max(32, ::min(BLOCKSIZE, blockx2 / 2));
int blocky2 = ::max(1, BLOCKSIZE / blockx2);
int gridx2 = ::min(batchsize / blocky2, 4096);
gridx2 = ::max(1, gridx2);
dim3 block2(blockx2, blocky2);
dim3 grid2(gridx2);
// allocate memory and cuda grid/block
auto grad_logits = torch::empty_like(logits);
auto numer = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
auto denor = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "soft dice backwrd", [&] {
int shm_size = blockx1 * blocky1 * sizeof(scalar_t);
hipLaunchKernelGGL(( compute_numer_denor<scalar_t>), dim3(grid1), dim3(block1), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
p
);
shm_size = blockx2 * blocky2 * sizeof(scalar_t);
hipLaunchKernelGGL(( reduce_numer_denor<scalar_t>), dim3(grid2), dim3(block2), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batchsize, n_blockxs_sample,
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
smooth
);
hipLaunchKernelGGL(( SoftDiceBackward<scalar_t>), dim3(grid1), dim3(block1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
grad.contiguous().data_ptr<scalar_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
p
);
});
THCudaCheck(hipGetLastError());
return grad_logits;
}
// python inferface
at::Tensor SoftDice_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this dice loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return SoftDice_forward_cuda(logits, labels, p, smooth);
}
at::Tensor SoftDice_backward(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this dice loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return SoftDice_backward_cuda(grad, logits, labels, p, smooth);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("soft_dice_forward", &SoftDice_forward, "soft-dice forward");
m.def("soft_dice_backward", &SoftDice_backward, "soft-dice backward");
}
| ef0b0005e159bd7f295fc71b8122d159bec2617e.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
namespace soft_dice_space {
template<typename T>
class sum_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<template<typename> class Reduction, typename scalar_t>
__device__ __forceinline__ void reduce_op(
scalar_t* sdata, int blocksize,
const Reduction<scalar_t>& oper) {
int tid = threadIdx.x;
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = oper(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void compute_numer_denor(const int batchsize,
const int sample_size,
const int n_blockxs_sample,
const scalar_t *logits,
const int64_t *labels,
scalar_t *numer,
scalar_t *denor,
const float p) {
/* Tips about shared memory:
* 1. torch will instantiate the template with three types: double, float, half;
* 2. these three types should not share same definitions of shared memory;
* 3. so one method is to use static shared memory with memory size explicitly assigned, and another method is to allocate shared memory with same raw type, such as unsigned char here, and then cast the pointer according to different template types */
// method1: use static sized shared memory
// __shared__ scalar_t sdata[BLOCKSIZE * 2];
// method2: allocate with raw uchar type and then cast in different kernel
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int tstrd = blockDim.x * n_blockxs_sample;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
int n_sample_blocks = n_blockxs_sample * batchsize;
// TODO: exp use different types
const scalar_t one(1.);
for (int i{bid}; i < n_sample_blocks; i += bstrd) {
int sample_start = (i / n_blockxs_sample) * sample_size;
int local_tid = (i % n_blockxs_sample) * blockDim.x + tid;
scalar_t v_numer{0}, v_denor{0};
for (int j{local_tid}; j < sample_size; j += tstrd) {
scalar_t prob = one / (one + exp(-logits[j + sample_start]));
scalar_t lb = static_cast<scalar_t>(labels[j + sample_start]);
v_numer += prob * lb * 2;
v_denor += pow(prob, p) + lb;
}
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
numer[i] = sdata[0];
}
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
denor[i] = sdata[0];
}
}
}
template<typename scalar_t>
__global__ void SoftDiceForward(const int batchsize, const int n_blockxs_sample,
scalar_t *numer,
scalar_t *denor,
scalar_t *losses,
const float smooth) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
const scalar_t one(1.);
for (int i{bid}; i < batchsize; i += bstrd) {
scalar_t v_numer{0}, v_denor{0};
int t_start = i * n_blockxs_sample;
for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) {
v_numer += numer[j + t_start];
v_denor += denor[j + t_start];
}
// reduce numer
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
v_numer = sdata[0];
// reduce denorm
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
v_denor = sdata[0];
if (tid == 0) {
losses[bid] = one - (v_numer + smooth) / (v_denor + smooth);
}
}
}
template<typename scalar_t>
__global__ void reduce_numer_denor(const int batchsize, const int n_blockxs_sample,
scalar_t *numer,
scalar_t *denor,
const float smooth) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + threadIdx.y * blockDim.x;
int tid = threadIdx.x;
int bid = threadIdx.y + blockIdx.x * blockDim.y;
int bstrd = gridDim.x * blockDim.y;
for (int i{bid}; i < batchsize; i += bstrd) {
scalar_t v_numer{0}, v_denor{0};
int t_start = i * n_blockxs_sample;
for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) {
v_numer += numer[j + t_start];
v_denor += denor[j + t_start];
}
// reduce numer
sdata[tid] = v_numer;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
numer[t_start] = sdata[0] + smooth;
}
// reduce denorm
sdata[tid] = v_denor;
__syncthreads();
soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>(
sdata,
blockDim.x,
soft_dice_space::sum_op<scalar_t>());
if (tid == 0) {
denor[t_start] = sdata[0] + smooth;
}
}
}
template<typename scalar_t>
__global__ void SoftDiceBackward(const int batchsize, const int sample_size,
const int n_blockxs_sample,
const scalar_t *logits,
const int64_t *labels,
const scalar_t *grad,
const scalar_t *numer,
const scalar_t *denor,
scalar_t *grad_logits,
const float p) {
int tid = threadIdx.x;
int tstrd = blockDim.x * n_blockxs_sample;
int bid = blockIdx.x * blockDim.y + threadIdx.y;
int bstrd = blockDim.y * gridDim.x;
const scalar_t one(1.);
const scalar_t two(2.);
int n_sample_blocks = n_blockxs_sample * batchsize;
for (int i{bid}; i < n_sample_blocks; i += bstrd) {
int sample_idx = i / n_blockxs_sample;
int sample_start = sample_idx * sample_size;
int local_tid = (i % n_blockxs_sample) * blockDim.x + tid;
scalar_t v_numer = numer[sample_idx * n_blockxs_sample];
scalar_t v_denor = denor[sample_idx * n_blockxs_sample];
scalar_t grad_val = grad[sample_idx];
for (int j{local_tid}; j < sample_size; j += tstrd) {
scalar_t prob = one / (one + exp(-logits[j + sample_start]));
int64_t lb = labels[j + sample_start];
scalar_t m = v_numer - two * (prob * static_cast<scalar_t>(lb));
scalar_t n = v_denor - powf(prob, p);
scalar_t g = -pow(prob, p - one) * p * m;
if (lb == 1L) {
g += pow(prob, p) * two * (one - p) + (n * two);
}
g = - (g / powf(powf(prob, p) + n, two)) * prob * (one - prob);
grad_logits[j + sample_start] = grad_val * g;
}
}
}
// cuda forward and backward
at::Tensor SoftDice_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int batchsize = logits.size(0);
const int num_samples = logits.numel();
const int sample_size = num_samples / batchsize;
// parallel method for numer/denor
int blockx1 = 32;
while (blockx1 < sample_size) blockx1 *= 2;
blockx1 = std::max(32, std::min(BLOCKSIZE, blockx1 / 2));
int n_blockxs_sample = std::max(1, sample_size / blockx1);
int blocky1 = std::max(1, BLOCKSIZE / blockx1);
if (blocky1 > batchsize) blocky1 = batchsize;
int gridx1 = batchsize * n_blockxs_sample / blocky1;
gridx1 = std::max(1, std::min(4096, gridx1));
dim3 block1(blockx1, blocky1);
dim3 grid1(gridx1);
// parallel method for loss
int blockx2 = 32;
while (blockx2 < n_blockxs_sample) blockx2 *= 2;
blockx2 = std::max(32, std::min(BLOCKSIZE, blockx2 / 2));
int blocky2 = std::max(1, BLOCKSIZE / blockx2);
int gridx2 = std::min(batchsize / blocky2, 4096);
gridx2 = std::max(1, gridx2);
dim3 block2(blockx2, blocky2);
dim3 grid2(gridx2);
// allocate memory and cuda grid/block
// Note: should use torch::zeros rather than at::zeros, torch::zeros is variable
// and at::zeros is tensor
auto losses = torch::empty({batchsize}, logits.options());
auto numer = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
auto denor = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
if (losses.numel() == 0) {
THCudaCheck(cudaGetLastError());
return losses;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "soft dice forward", [&] {
int shm_size = blockx1 * blocky1 * sizeof(scalar_t);
compute_numer_denor<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>(
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
p
);
shm_size = blockx2 * blocky2 * sizeof(scalar_t);
SoftDiceForward<scalar_t><<<grid2, block2, shm_size, at::cuda::getCurrentCUDAStream()>>>(
batchsize, n_blockxs_sample,
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
losses.contiguous().data_ptr<scalar_t>(),
smooth
);
});
THCudaCheck(cudaGetLastError());
return losses;
}
at::Tensor SoftDice_backward_cuda(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int batchsize = logits.size(0);
const int num_samples = logits.numel();
const int sample_size = num_samples / batchsize;
// parallel settings for numer/denor
int blockx1 = 32;
while (blockx1 < sample_size) blockx1 *= 2;
blockx1 = std::max(32, std::min(BLOCKSIZE, blockx1 / 2));
int n_blockxs_sample = sample_size / blockx1;
int blocky1 = std::max(1, BLOCKSIZE / blockx1);
if (blocky1 > batchsize) blocky1 = batchsize;
int gridx1 = batchsize * n_blockxs_sample / blocky1;
gridx1 = std::max(1, std::min(4096, gridx1));
dim3 block1(blockx1, blocky1);
dim3 grid1(gridx1);
// parallel settings for reduce numer/denor
int blockx2 = 32;
while (blockx2 < n_blockxs_sample) blockx2 *= 2;
blockx2 = std::max(32, std::min(BLOCKSIZE, blockx2 / 2));
int blocky2 = std::max(1, BLOCKSIZE / blockx2);
int gridx2 = std::min(batchsize / blocky2, 4096);
gridx2 = std::max(1, gridx2);
dim3 block2(blockx2, blocky2);
dim3 grid2(gridx2);
// allocate memory and cuda grid/block
auto grad_logits = torch::empty_like(logits);
auto numer = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
auto denor = torch::zeros(
{batchsize * n_blockxs_sample},
logits.options());
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "soft dice backwrd", [&] {
int shm_size = blockx1 * blocky1 * sizeof(scalar_t);
compute_numer_denor<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>(
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
p
);
shm_size = blockx2 * blocky2 * sizeof(scalar_t);
reduce_numer_denor<scalar_t><<<grid2, block2, shm_size, at::cuda::getCurrentCUDAStream()>>>(
batchsize, n_blockxs_sample,
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
smooth
);
SoftDiceBackward<scalar_t><<<grid1, block1, 0, at::cuda::getCurrentCUDAStream()>>>(
batchsize, sample_size, n_blockxs_sample,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
grad.contiguous().data_ptr<scalar_t>(),
numer.contiguous().data_ptr<scalar_t>(),
denor.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
p
);
});
THCudaCheck(cudaGetLastError());
return grad_logits;
}
// python inferface
at::Tensor SoftDice_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this dice loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return SoftDice_forward_cuda(logits, labels, p, smooth);
}
at::Tensor SoftDice_backward(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float p,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this dice loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return SoftDice_backward_cuda(grad, logits, labels, p, smooth);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("soft_dice_forward", &SoftDice_forward, "soft-dice forward");
m.def("soft_dice_backward", &SoftDice_backward, "soft-dice backward");
}
|
222f041c3e8b3073339a3ae197bad84c73328b33.hip | // !!! This is a file automatically generated by hipify!!!
#include "STLPSimulatorCUDA.h"
#include "ShaderManager.h"
#include "STLPUtils.h"
#include "Utils.h"
#include "HeightMap.h"
#include "CUDAUtils.cuh"
#include "ParticleSystem.h"
#include <random>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__constant__ int d_const_numProfiles;
__constant__ float d_const_P0;
//__constant__ float d_const_delta_t;
__constant__ float d_const_groundHeight;
__constant__ float d_const_boxTopHeight;
__constant__ float d_const_latticeHeight;
//__constant__ glm::vec2 *d_const_ambientTempCurve;
//
//__constant__ glm::vec2 *d_const_dryAdiabatProfiles;
//__constant__ int *d_const_dryAdiabatOffsets; // since each dry adiabat can have different amount of vertices
//
//__constant__ glm::vec2 *d_const_moistAdiabatProfiles;
//__constant__ int *d_const_moistAdiabatOffsets; // since each moist adiabat can have different amount of vertices
//
//__constant__ glm::vec2 *d_const_CCLProfiles;
//__constant__ glm::vec2 *d_const_TcProfiles;
__device__ float getNormalizedTemp(float T, float y) {
return (T - MIN_TEMP) / (MAX_TEMP - MIN_TEMP) + (1.0f - y);
}
__device__ float getNormalizedPres(float P) {
return ((log10f(P) - log10f(MIN_P)) / (log10f(d_const_P0) - log10f(MIN_P)));
}
__device__ float getDenormalizedTemp(float x, float y) {
return (x + y - 1.0f) * (MAX_TEMP - MIN_TEMP) + MIN_TEMP;
}
__device__ float getDenormalizedPres(float y) {
return powf(10.0f, y * (log10f(d_const_P0) - log10f(MIN_P)) + log10f(MIN_P));
}
__device__ glm::vec2 getNormalizedCoords(glm::vec2 coords) {
glm::vec2 res;
res.y = getNormalizedPres(coords.y);
res.x = getNormalizedTemp(coords.x, res.y);
return res;
}
__device__ glm::vec2 getDenormalizedCoords(glm::vec2 coords) {
glm::vec2 res;
res.x = getDenormalizedTemp(coords.x, coords.y);
res.y = getDenormalizedPres(coords.y);
return res;
}
__device__ glm::vec2 getNormalizedCoords(float T, float P) {
return getNormalizedCoords(glm::vec2(T, P));
}
__device__ glm::vec2 getDenormalizedCoords(float x, float y) {
return getDenormalizedCoords(glm::vec2(x, y));
}
__device__ float computeThetaFromAbsoluteK_dev(float T, float P, float P0 = 1000.0f) {
float tmp = (P == P0) ? 1.0f : pow(P0 / P, k_ratio);
return T * tmp;
}
__device__ float getKelvin_dev(float T) {
return T + 273.15f;
}
__device__ float getCelsius_dev(float T) {
return T - 273.15f;
}
__device__ void toKelvin_dev(float &T) {
T += 273.15f;
}
__device__ void toCelsius_dev(float &T) {
T -= 273.15f;
}
__device__ float getPressureVal_dev(float height) {
// based on CRC Handbook of Chemistry and Physics
return pow(((44331.514f - height) / 11880.516f), 1 / 0.1902632f);
}
__device__ void normalizeFromRange_dev(float &val, float min, float max) {
val = (val - min) / (max - min);
}
__device__ void rangeToRange_dev(float &val, float origMin, float origMax, float newMin, float newMax) {
normalizeFromRange_dev(val, origMin, origMax);
val *= (newMax - newMin);
val += newMin;
}
__device__ void mapToSimulationBox_dev(float & val) {
rangeToRange_dev(val, d_const_groundHeight, d_const_boxTopHeight, 0.0f, d_const_latticeHeight);
}
__device__ void mapFromSimulationBox_dev(float & val) {
rangeToRange_dev(val, 0.0f, d_const_latticeHeight, d_const_groundHeight, d_const_boxTopHeight);
}
__device__ float getMappedFromSimulationBox_dev(float val) {
mapFromSimulationBox_dev(val);
return val;
}
//! Finds an intersection of a curve with an isobar defined by its normalized pressure value.
/*!
\param[in] curveVertices Device pointer to array of curve vertices.
\param[in] numCurveVertices Length of the array containing the curve vertices.
\param[in] normP Normalized pressure value of the isobar.
\return Intersection point clamped to diagram bounds.
*/
__device__ glm::vec2 getIntersectionWithIsobar(glm::vec2 *curveVertices, int numCurveVertices, float normP) {
#define USE_BINARY_ISOBAR_INTERSECTION_SEARCH
#ifndef USE_BINARY_ISOBAR_INTERSECTION_SEARCH
// naively search for correct interval - TODO better solutions are: binary search and direct indexation using (non-normalized) pressure - needs better design
for (int i = 0; i < numCurveVertices - 1; i += 1) {
if (curveVertices[i + 1].y > normP) {
continue;
}
if (curveVertices[i + 1].y <= normP) {
float t = (normP - curveVertices[i + 1].y) / (curveVertices[i].y - curveVertices[i + 1].y);
float normalizedTemperature = t * curveVertices[i].x + (1.0f - t) * curveVertices[i + 1].x;
return glm::vec2(normalizedTemperature, normP);
}
}
return glm::vec2();
#else
// clamp to max values - the previous (naive search) interpolates the top-most and bottom-most edges (even beyond the curve)
if (normP >= curveVertices[0].y) {
return curveVertices[0];
} else if (normP <= curveVertices[numCurveVertices - 1].y) {
return curveVertices[numCurveVertices - 1];
}
int left = 0; // largest normP here
int right = numCurveVertices - 1; // smallest normP here
int curr;
while (left <= right) {
curr = (left + right) / 2;
if (curveVertices[curr].y > normP) {
left = curr + 1;
} else if (curveVertices[curr].y < normP) {
right = curr - 1;
} else {
return curveVertices[curr]; // no need to interpolate since the values match (the particle lies on an isobar that goes through the curve vertex)
}
}
// left will now hold index to the vertex above (in the curve) normP (normalized pressure at curveVertices[left] is smaller than normP)
// right is the opposite
float t = (normP - curveVertices[left].y) / (curveVertices[right].y - curveVertices[left].y);
float normalizedTemperature = t * curveVertices[right].x + (1.0f - t) * curveVertices[left].x;
return glm::vec2(normalizedTemperature, normP);
#endif
}
//! Runs the STLP simulation inside a kernel.
/*!
\param[in] particleVertices Device pointer to particle vertices array.
\param[in] numParticles Number of particles that should be simulated (numActiveParticles).
\param[in] delta_t Delta time to be used by the simulator.
\param[in] verticalVelocities Device pointer to vertical velocities array of the particles.
\param[in] profileIndices Device pointer to profile indices of the particles.
\param[in] ambientTempCurve Array of vertices of the ambient temperature sounding curve.
\param[in] numAmbientTempCurveVertices Number of vertices of the ambient temperature sounding curve.
\param[in] dryAdiabatProfiles Array of all dry adiabat profile curves.
\param[in] dryAdiabatOffsetsAndLengths Array of all dry adiabat curve offsets and lengths.
\param[in] moistAdiabatProfiles Array of all moist adiabat profile curves.
\param[in] moistAdiabatOffsetsAndLengths Array of all moist adiabat curve offsets and lengths.
\param[in] CCLProfiles Array of all CCL (convective condensation level) points for all profiles.
\param[in] TcProfiles Array of all Tc (convective temperature) points for all profiles.
\param[in] diagramParticleVertices Array of particle vertices that are shown in the diagram.
\param[in] dividePrevVelocity Whether to divide previous velocity in the simulation to produce artificial damping.
\param[in] prevVelocityDivisor By how much the previous velocity is divided if dividePrevVelocity is enabled.
*/
__global__ void simulationStepKernel(glm::vec3 *particleVertices, int numParticles, float delta_t, float *verticalVelocities, int *profileIndices, /*float *particlePressures, */glm::vec2 *ambientTempCurve, int numAmbientTempCurveVertices, glm::vec2 *dryAdiabatProfiles, glm::ivec2 *dryAdiabatOffsetsAndLengths, glm::vec2 *moistAdiabatProfiles, glm::ivec2 *moistAdiabatOffsetsAndLengths, glm::vec2 *CCLProfiles, glm::vec2 *TcProfiles, glm::vec2 *diagramParticleVertices, bool dividePrevVelocity, float prevVelocityDivisor) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < numParticles) {
float particlePressure = getPressureVal_dev(particleVertices[idx].y);
float normP = getNormalizedPres(particlePressure);
///*
//Stop particles out of diagram bounds:
// - the intersection with isobar test does work even beyond the diagram bounds but
// -> but using Duarte's approach, particles that start directly on the moist adiabat
// will accelerate infinitely, thus crashing the application due to NaN and Inf operations
//*/
//if (normP > ambientTempCurve[0].y || normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y) {
// verticalVelocities[idx] = 0.0f;
// //return;
//}
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 particleCurveIntersection;
if (particlePressure > CCLProfiles[profileIndices[idx]].y) {
particleCurveIntersection = getIntersectionWithIsobar(&dryAdiabatProfiles[dryAdiabatOffsetsAndLengths[profileIndices[idx]].x], dryAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
} else {
particleCurveIntersection = getIntersectionWithIsobar(&moistAdiabatProfiles[moistAdiabatOffsetsAndLengths[profileIndices[idx]].x], moistAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
}
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(particleCurveIntersection.x, normP);
diagramParticleVertices[idx].x = particleCurveIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
/*
Stop particles out of diagram bounds:
- the intersection with isobar test does work even beyond the diagram bounds but
-> but using Duarte's approach, particles that start directly on the moist adiabat
will accelerate infinitely, thus crashing the application due to NaN and Inf operations.
Checking here (after acceleration was computed) gives us the option to determine whether the particle
would accelerate further out of bounds and go to infinity (making the simulator unstable) or not.
If not, we continue with computation. One important thing is the fact that getIntersectionWithIsobar
function clamps the returned vector to the last valid values in the diagram, making this whole process valid.
*/
// Particle below diagram that has negative acceleration is not permitted!
if (normP > ambientTempCurve[0].y && a < 0.0f) {
return;
}
// Particle above diagram that has positive acceleration is not permitted!
if (normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y && a > 0.0f) {
return;
}
//if (normP > ambientTempCurve[0].y || normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y) {
// ///*
// // x * y > 0.0f returns true if x and y have the same sign
// // Note: This does not work for infinity since 0 * infinity is NaN.
// // We can have it here since this is exactly the thing preventing infinity and NaN values.
// //*/
// //if (verticalVelocities[idx] * a > 0.0f) {
// // return; // do nothing if the particle would accelerate to infinity
// //}
//}
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
particleVertices[idx].y += deltaY;
}
}
__global__ void simulationStepKernel_backup(glm::vec3 *particleVertices, int numParticles, float delta_t, float *verticalVelocities, int *profileIndices, /*float *particlePressures, */glm::vec2 *ambientTempCurve, int numAmbientTempCurveVertices, glm::vec2 *dryAdiabatProfiles, glm::ivec2 *dryAdiabatOffsetsAndLengths, glm::vec2 *moistAdiabatProfiles, glm::ivec2 *moistAdiabatOffsetsAndLengths, glm::vec2 *CCLProfiles, glm::vec2 *TcProfiles, glm::vec2 *diagramParticleVertices, bool dividePrevVelocity, float prevVelocityDivisor) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < numParticles) {
//float particlePressure = getPressureVal_dev(getMappedFromSimulationBox_dev(particleVertices[idx].y));
float particlePressure = getPressureVal_dev(particleVertices[idx].y);
if (particlePressure > CCLProfiles[profileIndices[idx]].y) {
//printf("| pressure = %0.2f\n", particlePressures[idx]);
//particleVertices[idx].y += 0.1f;
float normP = getNormalizedPres(particlePressure);
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 dryAdiabatIntersection = getIntersectionWithIsobar(&dryAdiabatProfiles[dryAdiabatOffsetsAndLengths[profileIndices[idx]].x], dryAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(dryAdiabatIntersection.x, normP);
//printf("| ambientTemp [deg C] = %0.2f\n", ambientTemp);
//printf("| particleTemp [deg C] = %0.2f\n", particleTemp);
diagramParticleVertices[idx].x = dryAdiabatIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
//mapFromSimulationBox_dev(particleVertices[idx].y);
particleVertices[idx].y += deltaY;
//mapToSimulationBox_dev(particleVertices[idx].y);
} else {
float normP = getNormalizedPres(particlePressure);
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 moistAdiabatIntersection = getIntersectionWithIsobar(&moistAdiabatProfiles[moistAdiabatOffsetsAndLengths[profileIndices[idx]].x], moistAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(moistAdiabatIntersection.x, normP);
diagramParticleVertices[idx].x = moistAdiabatIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
//mapFromSimulationBox_dev(particleVertices[idx].y);
particleVertices[idx].y += deltaY;
//mapToSimulationBox_dev(particleVertices[idx].y);
}
}
}
STLPSimulatorCUDA::STLPSimulatorCUDA(VariableManager * vars, STLPDiagram * stlpDiagram) : vars(vars), stlpDiagram(stlpDiagram) {
heightMap = vars->heightMap;
groundHeight = getAltitudeFromPressure(stlpDiagram->P0);
boxTopHeight = groundHeight + simulationBoxHeight;
layerVisShader = ShaderManager::getShaderPtr("singleColorAlpha");
initBuffers();
profileMap = new ppmImage("profile_maps/120x80_pm_01.ppm");
}
STLPSimulatorCUDA::~STLPSimulatorCUDA() {
if (profileMap != nullptr) {
delete profileMap;
}
}
void STLPSimulatorCUDA::initBuffers() {
ShaderProgram *s = ShaderManager::getShaderPtr("pointSpriteTest");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_1st_pass_alt2");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_2nd_pass_alt2");
uploadProfileIndicesUniforms(s);
vector<glm::vec3> vertices;
glGenVertexArrays(1, &CCLLevelVAO);
glBindVertexArray(CCLLevelVAO);
glGenBuffers(1, &CCLLevelVBO);
glBindBuffer(GL_ARRAY_BUFFER, CCLLevelVBO);
float altitude;
float w = vars->heightMap->getWorldWidth();
float d = vars->heightMap->getWorldDepth();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->LCL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->CCL.y);
}
//mapToSimulationBox(altitude);
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0);
glBindVertexArray(0);
vertices.clear();
glGenVertexArrays(1, &ELLevelVAO);
glBindVertexArray(ELLevelVAO);
glGenBuffers(1, &ELLevelVBO);
glBindBuffer(GL_ARRAY_BUFFER, ELLevelVBO);
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->orographicEL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->EL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0);
glBindVertexArray(0);
}
void STLPSimulatorCUDA::uploadProfileIndicesUniforms(ShaderProgram *shader) {
shader->use();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
string fullName = "u_ProfileCCLs[" + to_string(i) + "]";
float P = stlpDiagram->CCLProfiles[i].y;
float y = getAltitudeFromPressure(P);
//mapToSimulationBox(y);
shader->setFloat(fullName, y);
}
shader->setInt("u_NumProfiles", stlpDiagram->numProfiles);
}
void STLPSimulatorCUDA::initCUDA() {
blockDim = dim3(256, 1, 1);
gridDim = dim3((int)ceil((float)particleSystem->numParticles / (float)blockDim.x), 1, 1);
// ambient temp curve can be mapped to VBO, no need for this
CHECK_ERROR(hipMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
CHECK_ERROR(hipMemcpy(d_ambientTempCurve, &stlpDiagram->ambientCurve.vertices[0], sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size(), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpyToSymbol(d_const_numProfiles, &stlpDiagram->numProfiles, sizeof(int)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_P0, &stlpDiagram->P0, sizeof(float)));
//CHECK_ERROR(hipMemcpyToSymbol(d_const_delta_t, &delta_t, sizeof(float)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_boxTopHeight, &boxTopHeight, sizeof(float)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_groundHeight, &groundHeight, sizeof(float)));
float latticeH = (float)vars->latticeHeight;
CHECK_ERROR(hipMemcpyToSymbol(d_const_latticeHeight, &latticeH, sizeof(float)));
vector<int> itmp;
//itmp.clear();
vector<glm::vec2> tmp;
vector<glm::ivec2> ivectmp;
tmp.reserve(stlpDiagram->numProfiles * stlpDiagram->dryAdiabatProfiles[0].vertices.size()); // probably the largest possible collection
// DRY ADIABAT OFFSETS
tmp.clear();
ivectmp.clear();
int sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->dryAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
//cout << stlpDiagram->dryAdiabatProfiles[i].vertices.size() << endl;
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_dryAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size()));
//hipMalloc((void**)&d_dryAdiabatOffsets, sizeof(int) * itmp.size());
//CHECK_ERROR(hipMemcpy(d_dryAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size(), hipMemcpyHostToDevice));
CHECK_ERROR(hipMalloc((void**)&d_dryAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * ivectmp.size()));
CHECK_ERROR(hipMemcpy(d_dryAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), hipMemcpyHostToDevice));
// MOIST ADIABAT OFFSETS
itmp.clear();
tmp.clear();
ivectmp.clear();
sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->moistAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
//cout << stlpDiagram->moistAdiabatProfiles[i].vertices.size() << endl;
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_moistAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size()));
//hipMalloc((void**)&d_moistAdiabatOffsets, sizeof(int) * itmp.size());
//CHECK_ERROR(hipMemcpy(d_moistAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size(), hipMemcpyHostToDevice));
CHECK_ERROR(hipMalloc((void**)&d_moistAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * ivectmp.size()));
CHECK_ERROR(hipMemcpy(d_moistAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), hipMemcpyHostToDevice));
// DRY ADIABATS
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->dryAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->dryAdiabatProfiles[i].vertices[j]);
}
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMalloc((void**)&d_dryAdiabatProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMemcpy(d_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// MOIST ADIABATS
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->moistAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->moistAdiabatProfiles[i].vertices[j]);
}
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMalloc((void**)&d_moistAdiabatProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMemcpy(d_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// CCL Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->CCLProfiles[i]);
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMalloc((void**)&d_CCLProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMemcpy(d_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// Tc Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->TcProfiles[i]);
}
//CHECK_ERROR(hipMemcpyToSymbol(d_const_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMalloc((void**)&d_TcProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(hipMemcpy(d_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
}
void STLPSimulatorCUDA::initCUDAGeneral() {
blockDim = dim3(256, 1, 1);
gridDim = dim3((int)ceil((float)particleSystem->numParticles / (float)blockDim.x), 1, 1);
CHECK_ERROR(hipMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
currAmbientCurveVertexCount = (int)stlpDiagram->ambientCurve.vertices.size();
CHECK_ERROR(hipMalloc((void**)&d_dryAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * vars->stlpMaxProfiles));
CHECK_ERROR(hipMalloc((void**)&d_moistAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * vars->stlpMaxProfiles));
CHECK_ERROR(hipMalloc((void**)&d_dryAdiabatProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve));
CHECK_ERROR(hipMalloc((void**)&d_moistAdiabatProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve));
CHECK_ERROR(hipMalloc((void**)&d_CCLProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles));
CHECK_ERROR(hipMalloc((void**)&d_TcProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles));
}
void STLPSimulatorCUDA::uploadDataFromDiagramToGPU() {
if (currAmbientCurveVertexCount != stlpDiagram->ambientCurve.vertices.size()) {
CHECK_ERROR(hipFree(d_ambientTempCurve));
CHECK_ERROR(hipMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
currAmbientCurveVertexCount = (int)stlpDiagram->ambientCurve.vertices.size();
}
//cout << "Ambient curve length: " << stlpDiagram->ambientCurve.vertices.size() << endl;
// ambient temp curve can be mapped to VBO, no need for this
CHECK_ERROR(hipMemcpy(d_ambientTempCurve, &stlpDiagram->ambientCurve.vertices[0], sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size(), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpyToSymbol(d_const_numProfiles, &stlpDiagram->numProfiles, sizeof(int)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_P0, &stlpDiagram->P0, sizeof(float)));
//CHECK_ERROR(hipMemcpyToSymbol(d_const_delta_t, &delta_t, sizeof(float)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_boxTopHeight, &boxTopHeight, sizeof(float)));
CHECK_ERROR(hipMemcpyToSymbol(d_const_groundHeight, &groundHeight, sizeof(float)));
float latticeH = (float)vars->latticeHeight;
CHECK_ERROR(hipMemcpyToSymbol(d_const_latticeHeight, &latticeH, sizeof(float)));
vector<int> itmp;
//itmp.clear();
vector<glm::vec2> tmp;
vector<glm::ivec2> ivectmp;
tmp.reserve(stlpDiagram->numProfiles * stlpDiagram->dryAdiabatProfiles[0].vertices.size()); // probably the largest possible collection
// DRY ADIABAT OFFSETS
tmp.clear();
ivectmp.clear();
int sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->dryAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
}
CHECK_ERROR(hipMemcpy(d_dryAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), hipMemcpyHostToDevice));
// MOIST ADIABAT OFFSETS
itmp.clear();
tmp.clear();
ivectmp.clear();
sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->moistAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
}
CHECK_ERROR(hipMemcpy(d_moistAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), hipMemcpyHostToDevice));
// DRY ADIABATS
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->dryAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->dryAdiabatProfiles[i].vertices[j]);
}
}
//cout << vars->stlpMaxProfiles << ", " << tmp.size() << endl;
CHECK_ERROR(hipMemcpy(d_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// MOIST ADIABATS
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->moistAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->moistAdiabatProfiles[i].vertices[j]);
}
}
//cout << (vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve) << ", " << tmp.size() << endl;
CHECK_ERROR(hipMemcpy(d_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// CCL Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->CCLProfiles[i]);
}
CHECK_ERROR(hipMemcpy(d_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
// Tc Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->TcProfiles[i]);
}
CHECK_ERROR(hipMemcpy(d_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), hipMemcpyHostToDevice));
ShaderProgram *s = ShaderManager::getShaderPtr("pointSpriteTest");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_1st_pass_alt2");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_2nd_pass_alt2");
uploadProfileIndicesUniforms(s);
refreshLevelVisualizationBuffers();
}
void STLPSimulatorCUDA::doStep() {
size_t num_bytes;
glm::vec3 *d_mappedParticleVerticesVBO;
glm::vec2 *d_mappedDiagramParticleVerticesVBO;
int *d_mappedParticleProfilesVBO;
CHECK_ERROR(hipGraphicsMapResources(1, &particleSystem->cudaParticleVerticesVBO, 0));
CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void **)&d_mappedParticleVerticesVBO, &num_bytes, particleSystem->cudaParticleVerticesVBO));
CHECK_ERROR(hipGraphicsMapResources(1, &particleSystem->cudaParticleProfilesVBO, 0));
CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void **)&d_mappedParticleProfilesVBO, &num_bytes, particleSystem->cudaParticleProfilesVBO));
CHECK_ERROR(hipGraphicsMapResources(1, &particleSystem->cudaDiagramParticleVerticesVBO, 0));
CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void **)&d_mappedDiagramParticleVerticesVBO, &num_bytes, particleSystem->cudaDiagramParticleVerticesVBO));
simulationStepKernel << <gridDim.x, blockDim.x >> > (d_mappedParticleVerticesVBO, particleSystem->numActiveParticles, delta_t, particleSystem->d_verticalVelocities, d_mappedParticleProfilesVBO, d_ambientTempCurve, (int)stlpDiagram->ambientCurve.vertices.size(), d_dryAdiabatProfiles, d_dryAdiabatOffsetsAndLengths, d_moistAdiabatProfiles, d_moistAdiabatOffsetsAndLengths, d_CCLProfiles, d_TcProfiles, d_mappedDiagramParticleVerticesVBO, vars->dividePrevVelocity != 0, vars->prevVelocityDivisor * 0.01f);
CHECK_ERROR(hipPeekAtLastError());
hipGraphicsUnmapResources(1, &particleSystem->cudaParticleVerticesVBO, 0);
hipGraphicsUnmapResources(1, &particleSystem->cudaParticleProfilesVBO, 0);
hipGraphicsUnmapResources(1, &particleSystem->cudaDiagramParticleVerticesVBO, 0);
}
/*
void STLPSimulatorCUDA::generateParticle() {
// testing generation in circle
float randx;
float randz;
bool incircle = false;
if (incircle) {
float R = 10.0f;
static std::random_device rd;
static std::mt19937 mt(rd());
static std::uniform_real_distribution<float> dist(0.0f, 1.0f);
float a = dist(mt) * 2.0f * (float)PI;
float r = R * sqrtf(dist(mt));
randx = r * cos(a);
randz = r * sin(a);
randx += heightMap->width / 2;
randz += heightMap->height / 2;
} else {
randx = (float)(rand() / (float)(RAND_MAX / ((float)heightMap->width - 2.0f)));
randz = (float)(rand() / (float)(RAND_MAX / ((float)heightMap->height - 2.0f)));
}
// interpolate
int leftx = (int)randx;
int rightx = leftx + 1;
int leftz = (int)randz;
int rightz = leftz + 1;
// leftx and leftz cannot be < 0 and rightx and rightz cannot be >= GRID_WIDTH or GRID_DEPTH
float xRatio = randx - leftx;
float zRatio = randz - leftz;
float y1 = heightMap->data[leftx + leftz * heightMap->width];
float y2 = heightMap->data[leftx + rightz * heightMap->width];
float y3 = heightMap->data[rightx + leftz * heightMap->width];
float y4 = heightMap->data[rightx + rightz * heightMap->width];
float yLeftx = zRatio * y2 + (1.0f - zRatio) * y1;
float yRightx = zRatio * y4 + (1.0f - zRatio) * y3;
float y = yRightx * xRatio + (1.0f - xRatio) * yLeftx;
//y = 5.0f; //////////////////////////////////////////////////////// FORCE Y to dry adiabat
particlePositions.push_back(glm::vec3(randx, y, randz));
mapFromSimulationBox(y);
Particle p;
p.position = glm::vec3(randx, y, randz);
p.velocity = glm::vec3(0.0f);
if (profileMap && profileMap->height >= heightMap->height && profileMap->width >= heightMap->width) {
glm::vec2 p1 = profileMap->data[leftx][leftz];
glm::vec2 p2 = profileMap->data[leftx][rightz];
glm::vec2 p3 = profileMap->data[rightx][leftz];
glm::vec2 p4 = profileMap->data[rightx][rightz];
glm::vec2 pi1 = zRatio * p2 + (1.0f - zRatio) * p1;
glm::vec2 pi2 = zRatio * p4 + (1.0f - zRatio) * p3;
glm::vec2 pif = xRatio * pi2 + (1.0f - xRatio) * pi1;
glm::ivec2 pii = (glm::ivec2)pif;
if (pii.y != pii.x) {
p.profileIndex = (rand() % (pii.y - pii.x) + pii.x) % (stlpDiagram->numProfiles - 1);
} else {
p.profileIndex = pii.x % (stlpDiagram->numProfiles - 1);
}
} else {
p.profileIndex = rand() % (stlpDiagram->numProfiles - 1);
}
p.updatePressureVal();
particles.push_back(p);
numParticles++;
}
*/
void STLPSimulatorCUDA::draw() {
if (vars->showCCLLevelLayer || vars->showELLevelLayer) {
GLboolean cullFaceEnabled;
glGetBooleanv(GL_CULL_FACE, &cullFaceEnabled);
glDisable(GL_CULL_FACE);
GLboolean blendEnabled;
glGetBooleanv(GL_BLEND, &blendEnabled);
glEnable(GL_BLEND);
layerVisShader->use();
if (vars->showCCLLevelLayer) {
layerVisShader->setVec4("u_Color", glm::vec4(1.0f, 0.0f, 0.0f, 0.2f));
glBindVertexArray(CCLLevelVAO);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
if (vars->showELLevelLayer) {
layerVisShader->setVec4("u_Color", glm::vec4(0.0f, 1.0f, 0.0f, 0.2f));
glBindVertexArray(ELLevelVAO);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
if (cullFaceEnabled) {
glEnable(GL_CULL_FACE);
}
if (!blendEnabled) {
glDisable(GL_BLEND);
}
}
}
void STLPSimulatorCUDA::mapToSimulationBox(float & val) {
rangeToRange(val, groundHeight, boxTopHeight, 0.0f, (float)vars->latticeHeight);
}
void STLPSimulatorCUDA::mapFromSimulationBox(float & val) {
rangeToRange(val, 0.0f, (float)vars->latticeHeight, groundHeight, boxTopHeight);
}
void STLPSimulatorCUDA::refreshLevelVisualizationBuffers() {
vector<glm::vec3> vertices;
float altitude;
float w = vars->heightMap->getWorldWidth();
float d = vars->heightMap->getWorldDepth();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->LCL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->CCL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBindBuffer(GL_ARRAY_BUFFER, CCLLevelVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
vertices.clear();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->orographicEL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->EL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBindBuffer(GL_ARRAY_BUFFER, ELLevelVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
}
| 222f041c3e8b3073339a3ae197bad84c73328b33.cu | #include "STLPSimulatorCUDA.h"
#include "ShaderManager.h"
#include "STLPUtils.h"
#include "Utils.h"
#include "HeightMap.h"
#include "CUDAUtils.cuh"
#include "ParticleSystem.h"
#include <random>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__constant__ int d_const_numProfiles;
__constant__ float d_const_P0;
//__constant__ float d_const_delta_t;
__constant__ float d_const_groundHeight;
__constant__ float d_const_boxTopHeight;
__constant__ float d_const_latticeHeight;
//__constant__ glm::vec2 *d_const_ambientTempCurve;
//
//__constant__ glm::vec2 *d_const_dryAdiabatProfiles;
//__constant__ int *d_const_dryAdiabatOffsets; // since each dry adiabat can have different amount of vertices
//
//__constant__ glm::vec2 *d_const_moistAdiabatProfiles;
//__constant__ int *d_const_moistAdiabatOffsets; // since each moist adiabat can have different amount of vertices
//
//__constant__ glm::vec2 *d_const_CCLProfiles;
//__constant__ glm::vec2 *d_const_TcProfiles;
__device__ float getNormalizedTemp(float T, float y) {
return (T - MIN_TEMP) / (MAX_TEMP - MIN_TEMP) + (1.0f - y);
}
__device__ float getNormalizedPres(float P) {
return ((log10f(P) - log10f(MIN_P)) / (log10f(d_const_P0) - log10f(MIN_P)));
}
__device__ float getDenormalizedTemp(float x, float y) {
return (x + y - 1.0f) * (MAX_TEMP - MIN_TEMP) + MIN_TEMP;
}
__device__ float getDenormalizedPres(float y) {
return powf(10.0f, y * (log10f(d_const_P0) - log10f(MIN_P)) + log10f(MIN_P));
}
__device__ glm::vec2 getNormalizedCoords(glm::vec2 coords) {
glm::vec2 res;
res.y = getNormalizedPres(coords.y);
res.x = getNormalizedTemp(coords.x, res.y);
return res;
}
__device__ glm::vec2 getDenormalizedCoords(glm::vec2 coords) {
glm::vec2 res;
res.x = getDenormalizedTemp(coords.x, coords.y);
res.y = getDenormalizedPres(coords.y);
return res;
}
__device__ glm::vec2 getNormalizedCoords(float T, float P) {
return getNormalizedCoords(glm::vec2(T, P));
}
__device__ glm::vec2 getDenormalizedCoords(float x, float y) {
return getDenormalizedCoords(glm::vec2(x, y));
}
__device__ float computeThetaFromAbsoluteK_dev(float T, float P, float P0 = 1000.0f) {
float tmp = (P == P0) ? 1.0f : pow(P0 / P, k_ratio);
return T * tmp;
}
__device__ float getKelvin_dev(float T) {
return T + 273.15f;
}
__device__ float getCelsius_dev(float T) {
return T - 273.15f;
}
__device__ void toKelvin_dev(float &T) {
T += 273.15f;
}
__device__ void toCelsius_dev(float &T) {
T -= 273.15f;
}
__device__ float getPressureVal_dev(float height) {
// based on CRC Handbook of Chemistry and Physics
return pow(((44331.514f - height) / 11880.516f), 1 / 0.1902632f);
}
__device__ void normalizeFromRange_dev(float &val, float min, float max) {
val = (val - min) / (max - min);
}
__device__ void rangeToRange_dev(float &val, float origMin, float origMax, float newMin, float newMax) {
normalizeFromRange_dev(val, origMin, origMax);
val *= (newMax - newMin);
val += newMin;
}
__device__ void mapToSimulationBox_dev(float & val) {
rangeToRange_dev(val, d_const_groundHeight, d_const_boxTopHeight, 0.0f, d_const_latticeHeight);
}
__device__ void mapFromSimulationBox_dev(float & val) {
rangeToRange_dev(val, 0.0f, d_const_latticeHeight, d_const_groundHeight, d_const_boxTopHeight);
}
__device__ float getMappedFromSimulationBox_dev(float val) {
mapFromSimulationBox_dev(val);
return val;
}
//! Finds an intersection of a curve with an isobar defined by its normalized pressure value.
/*!
\param[in] curveVertices Device pointer to array of curve vertices.
\param[in] numCurveVertices Length of the array containing the curve vertices.
\param[in] normP Normalized pressure value of the isobar.
\return Intersection point clamped to diagram bounds.
*/
__device__ glm::vec2 getIntersectionWithIsobar(glm::vec2 *curveVertices, int numCurveVertices, float normP) {
#define USE_BINARY_ISOBAR_INTERSECTION_SEARCH
#ifndef USE_BINARY_ISOBAR_INTERSECTION_SEARCH
// naively search for correct interval - TODO better solutions are: binary search and direct indexation using (non-normalized) pressure - needs better design
for (int i = 0; i < numCurveVertices - 1; i += 1) {
if (curveVertices[i + 1].y > normP) {
continue;
}
if (curveVertices[i + 1].y <= normP) {
float t = (normP - curveVertices[i + 1].y) / (curveVertices[i].y - curveVertices[i + 1].y);
float normalizedTemperature = t * curveVertices[i].x + (1.0f - t) * curveVertices[i + 1].x;
return glm::vec2(normalizedTemperature, normP);
}
}
return glm::vec2();
#else
// clamp to max values - the previous (naive search) interpolates the top-most and bottom-most edges (even beyond the curve)
if (normP >= curveVertices[0].y) {
return curveVertices[0];
} else if (normP <= curveVertices[numCurveVertices - 1].y) {
return curveVertices[numCurveVertices - 1];
}
int left = 0; // largest normP here
int right = numCurveVertices - 1; // smallest normP here
int curr;
while (left <= right) {
curr = (left + right) / 2;
if (curveVertices[curr].y > normP) {
left = curr + 1;
} else if (curveVertices[curr].y < normP) {
right = curr - 1;
} else {
return curveVertices[curr]; // no need to interpolate since the values match (the particle lies on an isobar that goes through the curve vertex)
}
}
// left will now hold index to the vertex above (in the curve) normP (normalized pressure at curveVertices[left] is smaller than normP)
// right is the opposite
float t = (normP - curveVertices[left].y) / (curveVertices[right].y - curveVertices[left].y);
float normalizedTemperature = t * curveVertices[right].x + (1.0f - t) * curveVertices[left].x;
return glm::vec2(normalizedTemperature, normP);
#endif
}
//! Runs the STLP simulation inside a kernel.
/*!
\param[in] particleVertices Device pointer to particle vertices array.
\param[in] numParticles Number of particles that should be simulated (numActiveParticles).
\param[in] delta_t Delta time to be used by the simulator.
\param[in] verticalVelocities Device pointer to vertical velocities array of the particles.
\param[in] profileIndices Device pointer to profile indices of the particles.
\param[in] ambientTempCurve Array of vertices of the ambient temperature sounding curve.
\param[in] numAmbientTempCurveVertices Number of vertices of the ambient temperature sounding curve.
\param[in] dryAdiabatProfiles Array of all dry adiabat profile curves.
\param[in] dryAdiabatOffsetsAndLengths Array of all dry adiabat curve offsets and lengths.
\param[in] moistAdiabatProfiles Array of all moist adiabat profile curves.
\param[in] moistAdiabatOffsetsAndLengths Array of all moist adiabat curve offsets and lengths.
\param[in] CCLProfiles Array of all CCL (convective condensation level) points for all profiles.
\param[in] TcProfiles Array of all Tc (convective temperature) points for all profiles.
\param[in] diagramParticleVertices Array of particle vertices that are shown in the diagram.
\param[in] dividePrevVelocity Whether to divide previous velocity in the simulation to produce artificial damping.
\param[in] prevVelocityDivisor By how much the previous velocity is divided if dividePrevVelocity is enabled.
*/
__global__ void simulationStepKernel(glm::vec3 *particleVertices, int numParticles, float delta_t, float *verticalVelocities, int *profileIndices, /*float *particlePressures, */glm::vec2 *ambientTempCurve, int numAmbientTempCurveVertices, glm::vec2 *dryAdiabatProfiles, glm::ivec2 *dryAdiabatOffsetsAndLengths, glm::vec2 *moistAdiabatProfiles, glm::ivec2 *moistAdiabatOffsetsAndLengths, glm::vec2 *CCLProfiles, glm::vec2 *TcProfiles, glm::vec2 *diagramParticleVertices, bool dividePrevVelocity, float prevVelocityDivisor) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < numParticles) {
float particlePressure = getPressureVal_dev(particleVertices[idx].y);
float normP = getNormalizedPres(particlePressure);
///*
//Stop particles out of diagram bounds:
// - the intersection with isobar test does work even beyond the diagram bounds but
// -> but using Duarte's approach, particles that start directly on the moist adiabat
// will accelerate infinitely, thus crashing the application due to NaN and Inf operations
//*/
//if (normP > ambientTempCurve[0].y || normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y) {
// verticalVelocities[idx] = 0.0f;
// //return;
//}
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 particleCurveIntersection;
if (particlePressure > CCLProfiles[profileIndices[idx]].y) {
particleCurveIntersection = getIntersectionWithIsobar(&dryAdiabatProfiles[dryAdiabatOffsetsAndLengths[profileIndices[idx]].x], dryAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
} else {
particleCurveIntersection = getIntersectionWithIsobar(&moistAdiabatProfiles[moistAdiabatOffsetsAndLengths[profileIndices[idx]].x], moistAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
}
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(particleCurveIntersection.x, normP);
diagramParticleVertices[idx].x = particleCurveIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
/*
Stop particles out of diagram bounds:
- the intersection with isobar test does work even beyond the diagram bounds but
-> but using Duarte's approach, particles that start directly on the moist adiabat
will accelerate infinitely, thus crashing the application due to NaN and Inf operations.
Checking here (after acceleration was computed) gives us the option to determine whether the particle
would accelerate further out of bounds and go to infinity (making the simulator unstable) or not.
If not, we continue with computation. One important thing is the fact that getIntersectionWithIsobar
function clamps the returned vector to the last valid values in the diagram, making this whole process valid.
*/
// Particle below diagram that has negative acceleration is not permitted!
if (normP > ambientTempCurve[0].y && a < 0.0f) {
return;
}
// Particle above diagram that has positive acceleration is not permitted!
if (normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y && a > 0.0f) {
return;
}
//if (normP > ambientTempCurve[0].y || normP < ambientTempCurve[numAmbientTempCurveVertices - 1].y) {
// ///*
// // x * y > 0.0f returns true if x and y have the same sign
// // Note: This does not work for infinity since 0 * infinity is NaN.
// // We can have it here since this is exactly the thing preventing infinity and NaN values.
// //*/
// //if (verticalVelocities[idx] * a > 0.0f) {
// // return; // do nothing if the particle would accelerate to infinity
// //}
//}
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
particleVertices[idx].y += deltaY;
}
}
__global__ void simulationStepKernel_backup(glm::vec3 *particleVertices, int numParticles, float delta_t, float *verticalVelocities, int *profileIndices, /*float *particlePressures, */glm::vec2 *ambientTempCurve, int numAmbientTempCurveVertices, glm::vec2 *dryAdiabatProfiles, glm::ivec2 *dryAdiabatOffsetsAndLengths, glm::vec2 *moistAdiabatProfiles, glm::ivec2 *moistAdiabatOffsetsAndLengths, glm::vec2 *CCLProfiles, glm::vec2 *TcProfiles, glm::vec2 *diagramParticleVertices, bool dividePrevVelocity, float prevVelocityDivisor) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < numParticles) {
//float particlePressure = getPressureVal_dev(getMappedFromSimulationBox_dev(particleVertices[idx].y));
float particlePressure = getPressureVal_dev(particleVertices[idx].y);
if (particlePressure > CCLProfiles[profileIndices[idx]].y) {
//printf("| pressure = %0.2f\n", particlePressures[idx]);
//particleVertices[idx].y += 0.1f;
float normP = getNormalizedPres(particlePressure);
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 dryAdiabatIntersection = getIntersectionWithIsobar(&dryAdiabatProfiles[dryAdiabatOffsetsAndLengths[profileIndices[idx]].x], dryAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(dryAdiabatIntersection.x, normP);
//printf("| ambientTemp [deg C] = %0.2f\n", ambientTemp);
//printf("| particleTemp [deg C] = %0.2f\n", particleTemp);
diagramParticleVertices[idx].x = dryAdiabatIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
//mapFromSimulationBox_dev(particleVertices[idx].y);
particleVertices[idx].y += deltaY;
//mapToSimulationBox_dev(particleVertices[idx].y);
} else {
float normP = getNormalizedPres(particlePressure);
glm::vec2 ambientIntersection = getIntersectionWithIsobar(ambientTempCurve, numAmbientTempCurveVertices, normP);
glm::vec2 moistAdiabatIntersection = getIntersectionWithIsobar(&moistAdiabatProfiles[moistAdiabatOffsetsAndLengths[profileIndices[idx]].x], moistAdiabatOffsetsAndLengths[profileIndices[idx]].y, normP);
float ambientTemp = getDenormalizedTemp(ambientIntersection.x, normP);
float particleTemp = getDenormalizedTemp(moistAdiabatIntersection.x, normP);
diagramParticleVertices[idx].x = moistAdiabatIntersection.x;
diagramParticleVertices[idx].y = normP;
toKelvin_dev(ambientTemp);
toKelvin_dev(particleTemp);
float ambientTheta = computeThetaFromAbsoluteK_dev(ambientTemp, particlePressure);
float particleTheta = computeThetaFromAbsoluteK_dev(particleTemp, particlePressure);
float a = 9.81f * (particleTheta - ambientTheta) / ambientTheta;
if (dividePrevVelocity) {
verticalVelocities[idx] /= prevVelocityDivisor;
}
verticalVelocities[idx] = verticalVelocities[idx] + a * delta_t;
float deltaY = verticalVelocities[idx] * delta_t + 0.5f * a * delta_t * delta_t;
//mapFromSimulationBox_dev(particleVertices[idx].y);
particleVertices[idx].y += deltaY;
//mapToSimulationBox_dev(particleVertices[idx].y);
}
}
}
STLPSimulatorCUDA::STLPSimulatorCUDA(VariableManager * vars, STLPDiagram * stlpDiagram) : vars(vars), stlpDiagram(stlpDiagram) {
heightMap = vars->heightMap;
groundHeight = getAltitudeFromPressure(stlpDiagram->P0);
boxTopHeight = groundHeight + simulationBoxHeight;
layerVisShader = ShaderManager::getShaderPtr("singleColorAlpha");
initBuffers();
profileMap = new ppmImage("profile_maps/120x80_pm_01.ppm");
}
STLPSimulatorCUDA::~STLPSimulatorCUDA() {
if (profileMap != nullptr) {
delete profileMap;
}
}
void STLPSimulatorCUDA::initBuffers() {
ShaderProgram *s = ShaderManager::getShaderPtr("pointSpriteTest");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_1st_pass_alt2");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_2nd_pass_alt2");
uploadProfileIndicesUniforms(s);
vector<glm::vec3> vertices;
glGenVertexArrays(1, &CCLLevelVAO);
glBindVertexArray(CCLLevelVAO);
glGenBuffers(1, &CCLLevelVBO);
glBindBuffer(GL_ARRAY_BUFFER, CCLLevelVBO);
float altitude;
float w = vars->heightMap->getWorldWidth();
float d = vars->heightMap->getWorldDepth();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->LCL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->CCL.y);
}
//mapToSimulationBox(altitude);
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0);
glBindVertexArray(0);
vertices.clear();
glGenVertexArrays(1, &ELLevelVAO);
glBindVertexArray(ELLevelVAO);
glGenBuffers(1, &ELLevelVBO);
glBindBuffer(GL_ARRAY_BUFFER, ELLevelVBO);
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->orographicEL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->EL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0);
glBindVertexArray(0);
}
void STLPSimulatorCUDA::uploadProfileIndicesUniforms(ShaderProgram *shader) {
shader->use();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
string fullName = "u_ProfileCCLs[" + to_string(i) + "]";
float P = stlpDiagram->CCLProfiles[i].y;
float y = getAltitudeFromPressure(P);
//mapToSimulationBox(y);
shader->setFloat(fullName, y);
}
shader->setInt("u_NumProfiles", stlpDiagram->numProfiles);
}
void STLPSimulatorCUDA::initCUDA() {
blockDim = dim3(256, 1, 1);
gridDim = dim3((int)ceil((float)particleSystem->numParticles / (float)blockDim.x), 1, 1);
// ambient temp curve can be mapped to VBO, no need for this
CHECK_ERROR(cudaMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
CHECK_ERROR(cudaMemcpy(d_ambientTempCurve, &stlpDiagram->ambientCurve.vertices[0], sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size(), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_numProfiles, &stlpDiagram->numProfiles, sizeof(int)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_P0, &stlpDiagram->P0, sizeof(float)));
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_delta_t, &delta_t, sizeof(float)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_boxTopHeight, &boxTopHeight, sizeof(float)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_groundHeight, &groundHeight, sizeof(float)));
float latticeH = (float)vars->latticeHeight;
CHECK_ERROR(cudaMemcpyToSymbol(d_const_latticeHeight, &latticeH, sizeof(float)));
vector<int> itmp;
//itmp.clear();
vector<glm::vec2> tmp;
vector<glm::ivec2> ivectmp;
tmp.reserve(stlpDiagram->numProfiles * stlpDiagram->dryAdiabatProfiles[0].vertices.size()); // probably the largest possible collection
// DRY ADIABAT OFFSETS
tmp.clear();
ivectmp.clear();
int sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->dryAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
//cout << stlpDiagram->dryAdiabatProfiles[i].vertices.size() << endl;
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_dryAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size()));
//cudaMalloc((void**)&d_dryAdiabatOffsets, sizeof(int) * itmp.size());
//CHECK_ERROR(cudaMemcpy(d_dryAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size(), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMalloc((void**)&d_dryAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * ivectmp.size()));
CHECK_ERROR(cudaMemcpy(d_dryAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), cudaMemcpyHostToDevice));
// MOIST ADIABAT OFFSETS
itmp.clear();
tmp.clear();
ivectmp.clear();
sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->moistAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
//cout << stlpDiagram->moistAdiabatProfiles[i].vertices.size() << endl;
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_moistAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size()));
//cudaMalloc((void**)&d_moistAdiabatOffsets, sizeof(int) * itmp.size());
//CHECK_ERROR(cudaMemcpy(d_moistAdiabatOffsets, &itmp[0], sizeof(int) * itmp.size(), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMalloc((void**)&d_moistAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * ivectmp.size()));
CHECK_ERROR(cudaMemcpy(d_moistAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), cudaMemcpyHostToDevice));
// DRY ADIABATS
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->dryAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->dryAdiabatProfiles[i].vertices[j]);
}
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMalloc((void**)&d_dryAdiabatProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMemcpy(d_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// MOIST ADIABATS
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->moistAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->moistAdiabatProfiles[i].vertices[j]);
}
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMalloc((void**)&d_moistAdiabatProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMemcpy(d_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// CCL Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->CCLProfiles[i]);
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMalloc((void**)&d_CCLProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMemcpy(d_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// Tc Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->TcProfiles[i]);
}
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMalloc((void**)&d_TcProfiles, sizeof(glm::vec2) * tmp.size()));
CHECK_ERROR(cudaMemcpy(d_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
}
void STLPSimulatorCUDA::initCUDAGeneral() {
blockDim = dim3(256, 1, 1);
gridDim = dim3((int)ceil((float)particleSystem->numParticles / (float)blockDim.x), 1, 1);
CHECK_ERROR(cudaMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
currAmbientCurveVertexCount = (int)stlpDiagram->ambientCurve.vertices.size();
CHECK_ERROR(cudaMalloc((void**)&d_dryAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * vars->stlpMaxProfiles));
CHECK_ERROR(cudaMalloc((void**)&d_moistAdiabatOffsetsAndLengths, sizeof(glm::ivec2) * vars->stlpMaxProfiles));
CHECK_ERROR(cudaMalloc((void**)&d_dryAdiabatProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve));
CHECK_ERROR(cudaMalloc((void**)&d_moistAdiabatProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve));
CHECK_ERROR(cudaMalloc((void**)&d_CCLProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles));
CHECK_ERROR(cudaMalloc((void**)&d_TcProfiles, sizeof(glm::vec2) * vars->stlpMaxProfiles));
}
void STLPSimulatorCUDA::uploadDataFromDiagramToGPU() {
if (currAmbientCurveVertexCount != stlpDiagram->ambientCurve.vertices.size()) {
CHECK_ERROR(cudaFree(d_ambientTempCurve));
CHECK_ERROR(cudaMalloc((void**)&d_ambientTempCurve, sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size()));
currAmbientCurveVertexCount = (int)stlpDiagram->ambientCurve.vertices.size();
}
//cout << "Ambient curve length: " << stlpDiagram->ambientCurve.vertices.size() << endl;
// ambient temp curve can be mapped to VBO, no need for this
CHECK_ERROR(cudaMemcpy(d_ambientTempCurve, &stlpDiagram->ambientCurve.vertices[0], sizeof(glm::vec2) * stlpDiagram->ambientCurve.vertices.size(), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_numProfiles, &stlpDiagram->numProfiles, sizeof(int)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_P0, &stlpDiagram->P0, sizeof(float)));
//CHECK_ERROR(cudaMemcpyToSymbol(d_const_delta_t, &delta_t, sizeof(float)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_boxTopHeight, &boxTopHeight, sizeof(float)));
CHECK_ERROR(cudaMemcpyToSymbol(d_const_groundHeight, &groundHeight, sizeof(float)));
float latticeH = (float)vars->latticeHeight;
CHECK_ERROR(cudaMemcpyToSymbol(d_const_latticeHeight, &latticeH, sizeof(float)));
vector<int> itmp;
//itmp.clear();
vector<glm::vec2> tmp;
vector<glm::ivec2> ivectmp;
tmp.reserve(stlpDiagram->numProfiles * stlpDiagram->dryAdiabatProfiles[0].vertices.size()); // probably the largest possible collection
// DRY ADIABAT OFFSETS
tmp.clear();
ivectmp.clear();
int sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->dryAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
}
CHECK_ERROR(cudaMemcpy(d_dryAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), cudaMemcpyHostToDevice));
// MOIST ADIABAT OFFSETS
itmp.clear();
tmp.clear();
ivectmp.clear();
sum = 0;
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
itmp.push_back(sum);
int prevSum = sum;
sum += (int)stlpDiagram->moistAdiabatProfiles[i].vertices.size();
ivectmp.push_back(glm::ivec2(prevSum, sum - prevSum)); // x = offset, y = length
}
CHECK_ERROR(cudaMemcpy(d_moistAdiabatOffsetsAndLengths, &ivectmp[0], sizeof(glm::ivec2) * ivectmp.size(), cudaMemcpyHostToDevice));
// DRY ADIABATS
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->dryAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->dryAdiabatProfiles[i].vertices[j]);
}
}
//cout << vars->stlpMaxProfiles << ", " << tmp.size() << endl;
CHECK_ERROR(cudaMemcpy(d_dryAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// MOIST ADIABATS
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
for (int j = 0; j < stlpDiagram->moistAdiabatProfiles[i].vertices.size(); j++) {
tmp.push_back(stlpDiagram->moistAdiabatProfiles[i].vertices[j]);
}
}
//cout << (vars->stlpMaxProfiles * stlpDiagram->maxVerticesPerCurve) << ", " << tmp.size() << endl;
CHECK_ERROR(cudaMemcpy(d_moistAdiabatProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// CCL Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->CCLProfiles[i]);
}
CHECK_ERROR(cudaMemcpy(d_CCLProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
// Tc Profiles
tmp.clear();
for (int i = 0; i < stlpDiagram->numProfiles; i++) {
tmp.push_back(stlpDiagram->TcProfiles[i]);
}
CHECK_ERROR(cudaMemcpy(d_TcProfiles, &tmp[0], sizeof(glm::vec2) * tmp.size(), cudaMemcpyHostToDevice));
ShaderProgram *s = ShaderManager::getShaderPtr("pointSpriteTest");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_1st_pass_alt2");
uploadProfileIndicesUniforms(s);
s = ShaderManager::getShaderPtr("volume_2nd_pass_alt2");
uploadProfileIndicesUniforms(s);
refreshLevelVisualizationBuffers();
}
void STLPSimulatorCUDA::doStep() {
size_t num_bytes;
glm::vec3 *d_mappedParticleVerticesVBO;
glm::vec2 *d_mappedDiagramParticleVerticesVBO;
int *d_mappedParticleProfilesVBO;
CHECK_ERROR(cudaGraphicsMapResources(1, &particleSystem->cudaParticleVerticesVBO, 0));
CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&d_mappedParticleVerticesVBO, &num_bytes, particleSystem->cudaParticleVerticesVBO));
CHECK_ERROR(cudaGraphicsMapResources(1, &particleSystem->cudaParticleProfilesVBO, 0));
CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&d_mappedParticleProfilesVBO, &num_bytes, particleSystem->cudaParticleProfilesVBO));
CHECK_ERROR(cudaGraphicsMapResources(1, &particleSystem->cudaDiagramParticleVerticesVBO, 0));
CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&d_mappedDiagramParticleVerticesVBO, &num_bytes, particleSystem->cudaDiagramParticleVerticesVBO));
simulationStepKernel << <gridDim.x, blockDim.x >> > (d_mappedParticleVerticesVBO, particleSystem->numActiveParticles, delta_t, particleSystem->d_verticalVelocities, d_mappedParticleProfilesVBO, d_ambientTempCurve, (int)stlpDiagram->ambientCurve.vertices.size(), d_dryAdiabatProfiles, d_dryAdiabatOffsetsAndLengths, d_moistAdiabatProfiles, d_moistAdiabatOffsetsAndLengths, d_CCLProfiles, d_TcProfiles, d_mappedDiagramParticleVerticesVBO, vars->dividePrevVelocity != 0, vars->prevVelocityDivisor * 0.01f);
CHECK_ERROR(cudaPeekAtLastError());
cudaGraphicsUnmapResources(1, &particleSystem->cudaParticleVerticesVBO, 0);
cudaGraphicsUnmapResources(1, &particleSystem->cudaParticleProfilesVBO, 0);
cudaGraphicsUnmapResources(1, &particleSystem->cudaDiagramParticleVerticesVBO, 0);
}
/*
void STLPSimulatorCUDA::generateParticle() {
// testing generation in circle
float randx;
float randz;
bool incircle = false;
if (incircle) {
float R = 10.0f;
static std::random_device rd;
static std::mt19937 mt(rd());
static std::uniform_real_distribution<float> dist(0.0f, 1.0f);
float a = dist(mt) * 2.0f * (float)PI;
float r = R * sqrtf(dist(mt));
randx = r * cos(a);
randz = r * sin(a);
randx += heightMap->width / 2;
randz += heightMap->height / 2;
} else {
randx = (float)(rand() / (float)(RAND_MAX / ((float)heightMap->width - 2.0f)));
randz = (float)(rand() / (float)(RAND_MAX / ((float)heightMap->height - 2.0f)));
}
// interpolate
int leftx = (int)randx;
int rightx = leftx + 1;
int leftz = (int)randz;
int rightz = leftz + 1;
// leftx and leftz cannot be < 0 and rightx and rightz cannot be >= GRID_WIDTH or GRID_DEPTH
float xRatio = randx - leftx;
float zRatio = randz - leftz;
float y1 = heightMap->data[leftx + leftz * heightMap->width];
float y2 = heightMap->data[leftx + rightz * heightMap->width];
float y3 = heightMap->data[rightx + leftz * heightMap->width];
float y4 = heightMap->data[rightx + rightz * heightMap->width];
float yLeftx = zRatio * y2 + (1.0f - zRatio) * y1;
float yRightx = zRatio * y4 + (1.0f - zRatio) * y3;
float y = yRightx * xRatio + (1.0f - xRatio) * yLeftx;
//y = 5.0f; //////////////////////////////////////////////////////// FORCE Y to dry adiabat
particlePositions.push_back(glm::vec3(randx, y, randz));
mapFromSimulationBox(y);
Particle p;
p.position = glm::vec3(randx, y, randz);
p.velocity = glm::vec3(0.0f);
if (profileMap && profileMap->height >= heightMap->height && profileMap->width >= heightMap->width) {
glm::vec2 p1 = profileMap->data[leftx][leftz];
glm::vec2 p2 = profileMap->data[leftx][rightz];
glm::vec2 p3 = profileMap->data[rightx][leftz];
glm::vec2 p4 = profileMap->data[rightx][rightz];
glm::vec2 pi1 = zRatio * p2 + (1.0f - zRatio) * p1;
glm::vec2 pi2 = zRatio * p4 + (1.0f - zRatio) * p3;
glm::vec2 pif = xRatio * pi2 + (1.0f - xRatio) * pi1;
glm::ivec2 pii = (glm::ivec2)pif;
if (pii.y != pii.x) {
p.profileIndex = (rand() % (pii.y - pii.x) + pii.x) % (stlpDiagram->numProfiles - 1);
} else {
p.profileIndex = pii.x % (stlpDiagram->numProfiles - 1);
}
} else {
p.profileIndex = rand() % (stlpDiagram->numProfiles - 1);
}
p.updatePressureVal();
particles.push_back(p);
numParticles++;
}
*/
void STLPSimulatorCUDA::draw() {
if (vars->showCCLLevelLayer || vars->showELLevelLayer) {
GLboolean cullFaceEnabled;
glGetBooleanv(GL_CULL_FACE, &cullFaceEnabled);
glDisable(GL_CULL_FACE);
GLboolean blendEnabled;
glGetBooleanv(GL_BLEND, &blendEnabled);
glEnable(GL_BLEND);
layerVisShader->use();
if (vars->showCCLLevelLayer) {
layerVisShader->setVec4("u_Color", glm::vec4(1.0f, 0.0f, 0.0f, 0.2f));
glBindVertexArray(CCLLevelVAO);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
if (vars->showELLevelLayer) {
layerVisShader->setVec4("u_Color", glm::vec4(0.0f, 1.0f, 0.0f, 0.2f));
glBindVertexArray(ELLevelVAO);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
if (cullFaceEnabled) {
glEnable(GL_CULL_FACE);
}
if (!blendEnabled) {
glDisable(GL_BLEND);
}
}
}
void STLPSimulatorCUDA::mapToSimulationBox(float & val) {
rangeToRange(val, groundHeight, boxTopHeight, 0.0f, (float)vars->latticeHeight);
}
void STLPSimulatorCUDA::mapFromSimulationBox(float & val) {
rangeToRange(val, 0.0f, (float)vars->latticeHeight, groundHeight, boxTopHeight);
}
void STLPSimulatorCUDA::refreshLevelVisualizationBuffers() {
vector<glm::vec3> vertices;
float altitude;
float w = vars->heightMap->getWorldWidth();
float d = vars->heightMap->getWorldDepth();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->LCL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->CCL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBindBuffer(GL_ARRAY_BUFFER, CCLLevelVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
vertices.clear();
if (stlpDiagram->useOrographicParameters) {
altitude = getAltitudeFromPressure(stlpDiagram->orographicEL.y);
} else {
altitude = getAltitudeFromPressure(stlpDiagram->EL.y);
}
vertices.push_back(glm::vec3(0.0f, altitude, 0.0f));
vertices.push_back(glm::vec3(0.0f, altitude, d));
vertices.push_back(glm::vec3(w, altitude, d));
vertices.push_back(glm::vec3(w, altitude, 0.0f));
glBindBuffer(GL_ARRAY_BUFFER, ELLevelVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * 4, &vertices[0], GL_STATIC_DRAW);
}
|
102398ebbb9e29f724816566f761126d51e1de68.hip | // !!! This is a file automatically generated by hipify!!!
/* Based on the NVIDIA example
* Yekta & Dogukan
*/
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
/* if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
*/
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 102398ebbb9e29f724816566f761126d51e1de68.cu | /* Based on the NVIDIA example
* Yekta & Dogukan
*/
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
/* if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
*/
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
69eb16824bde1b7535a2bbfd926eaecec20b88ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Author: Dongwei Shi
//Created: 06/15/2016
//Description: this program is for template matching with cuda. The program is expected to template match several template simutaneously
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <unistd.h>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include </usr/local/cuda-8.0/include/cuda.h>
#include </usr/local/cuda-8.0/include/hipfft.h>
#include </usr/local/cuda-8.0/include/hipfft.h>
#define KERNEL_WIDTH 31
#define KERNEL_RADIUS (KERNEL_WIDTH/2)
#define TILE_WIDTH (33-KERNEL_WIDTH)
#define BLK_SIZE (TILE_WIDTH+KERNEL_WIDTH-1)
#define TMP_NUM 8
#define ACCURATE_MODE KERNEL_WIDTH
#define SPEED_MODE 1
#define RECORD 0
#define CROP_PARAM 2.2
using namespace std;
using namespace cv;
//global image and templates
Mat img, gray_img, prev_img;
Mat templs[TMP_NUM];
Mat img_vec[TMP_NUM];
Point kpt_vec[TMP_NUM];
Point ext_vec[TMP_NUM];
vector<Point2f > corners;
int dis[TMP_NUM];
//deviceKernel for storing the templates
__constant__ float deviceKernel[TMP_NUM*KERNEL_WIDTH*KERNEL_WIDTH];
///////////////////////////////////////////////////////////////////
/* conv2d
* Description: This funtion is CUDA kernel. Where perform the 2D convolution of the images and templates.
* Using CV_TM_CCOEFF_NORMED method for template matching. Simutaneously perform 2D convolution
* on several images with specific templates.
* Input: A -- the input data of images
* x_size -- the image width
* y_size -- the image height
* template_num -- the total templates need to be matched.
* Output: B -- the convolution results of the images.
*
*
*/
///////////////////////////////////////////////////////////////////
__global__ void conv2d(float* A, float* B, const int x_size, const int y_size, const int template_num)
{
//allocated shared memory for storing the image
__shared__ float Nds[BLK_SIZE][BLK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int x_out = bx*TILE_WIDTH + tx;
int y_out = by*TILE_WIDTH + ty;
int x_in = x_out - KERNEL_RADIUS;
int y_in = y_out - KERNEL_RADIUS;
float res = 0.0;
float templ_res = 0.0;
float img_res = 0.0;
//copy the image to the shared memeory
if((x_in>=0) && (x_in<x_size) && (y_in>=0) && (y_in<y_size) && (bz>=0) && (bz<template_num) )
{
Nds[ty][tx] = A[bz*x_size*y_size + y_in*x_size + x_in];
}
else
{
Nds[ty][tx] = 0.0;
}
__syncthreads();
//perform convolution below using CV_TM_CCOEFF_NORMED method for template matching
if( (tx<TILE_WIDTH) && (ty<TILE_WIDTH) && (x_out<x_size) && (y_out<y_size) && (bz>=0) && (bz<template_num))
{
res = 0.0;
templ_res = 0.0;
img_res = 0.0;
for( int idx_y=0; idx_y<KERNEL_WIDTH; idx_y++ )
{
for( int idx_x=0; idx_x<SPEED_MODE; idx_x++ )
{
templ_res += pow(deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x],2);
img_res += pow(Nds[ty+idx_y][tx+idx_x],2);
res += Nds[ty+idx_y][tx+idx_x] * deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x];
}
}
//copy the result into the output data
__syncthreads();
if((x_out<x_size) && (y_out<y_size) && (bz<template_num))
{
B[bz*x_size*y_size + y_out*x_size + x_out] = res/sqrt(templ_res*img_res);
}
__syncthreads();
}
}
///////////////////////////////////////////////////////////////////
/* cuda_tp_img
* Description: This function use for preparation step for the
* cuda kernel. It is allocate several memory space
* on both GPU and CPU. It also be used to select the
* peak value of the convolution results
* Input: templates number -- the total number of templates that need to
* be matched.
* Output: 0 -- success, -1 -- failure
*
*
*/
///////////////////////////////////////////////////////////////////
int cuda_tp_img(int template_num)
{
//get size of templates and images.
int x_size = img_vec[0].cols;
int y_size = img_vec[0].rows;
int tmp_x_size = KERNEL_WIDTH;//templs[0].cols;
int tmp_y_size = KERNEL_WIDTH;//templs[0].rows;
int img_size = x_size * y_size;
int tmpl_size = tmp_x_size * tmp_y_size;
//allocate a space to store the image intensity
float* host_img = (float*) malloc(sizeof(float)*img_size*template_num);
float* host_templ = (float*) malloc(sizeof(float)*tmpl_size*template_num);
float* gpu_out = (float*) malloc(sizeof(float)*img_size*template_num);
float* device_img_input;
float* device_img_output;
//copy the intensity value from image
for(int img_idx=0; img_idx<template_num; img_idx++)
{
for(int y=0; y<y_size; y++)
{
for(int x=0; x<x_size; x++)
{
Scalar intensity = img_vec[img_idx].at<uchar>(y,x);
host_img[y*x_size+x + img_idx*img_size] = intensity.val[0];
}
}
}
//copy the intensity value from templates
for(int tmpl_idx=0; tmpl_idx<template_num; tmpl_idx++)
{
for(int y=0; y<tmp_y_size; y++)
{
for(int x=0; x<tmp_x_size; x++)
{
Scalar intensity = templs[tmpl_idx].at<uchar>(y,x);
host_templ[y*tmp_x_size+x+tmpl_idx*tmpl_size] = intensity.val[0];
}
}
}
//allocate memory in cuda global memory
hipMalloc( (void**)&device_img_input, img_size*sizeof(float)*template_num );
hipMalloc( (void**)&device_img_output, img_size*sizeof(float)*template_num );
hipMemcpy( device_img_input, host_img, img_size*sizeof(float)*template_num, hipMemcpyHostToDevice);
hipMemcpyToSymbol( deviceKernel, host_templ, tmpl_size*sizeof(float)*template_num);
//assign blocks and threads
dim3 Dimblock(BLK_SIZE, BLK_SIZE, 1);
dim3 DimGrid(((TILE_WIDTH+x_size)-1/TILE_WIDTH), ((TILE_WIDTH+y_size)-1/TILE_WIDTH),template_num);
//calling the convolution gpu function
hipLaunchKernelGGL(( conv2d) , dim3(DimGrid), dim3(Dimblock) , 0, 0, device_img_input, device_img_output, x_size, y_size, template_num);
hipDeviceSynchronize();
hipMemcpy( gpu_out, device_img_output, img_size*sizeof(float)*template_num, hipMemcpyDeviceToHost);
//Selecting peak value of each image's convolution result and label out on the image.
float res = 0;
int y_pos;
for(int idx=0; idx<template_num; idx++)
{
y_pos = 0;
res = 0;
for(int y=0; y<y_size; y++)
{
for(int x=0; x<x_size; x++)
{
if(gpu_out[idx*img_size+y*x_size+x]>res)
{
res = gpu_out[idx*img_size+y*x_size+x];
y_pos = y;
}
}
}
ext_vec[idx].x = kpt_vec[idx].x;
ext_vec[idx].y = (img.rows/CROP_PARAM)+dis[idx]+y_pos;
rectangle(img, Point(kpt_vec[idx].x-KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos-KERNEL_RADIUS), Point(kpt_vec[idx].x+KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos+KERNEL_RADIUS), Scalar(0,255,0 ), 1, 4);
line(img,kpt_vec[idx],Point(kpt_vec[idx].x,(img.rows/CROP_PARAM)+dis[idx]+y_pos),Scalar(0,0,255),1,8,0);
}
//Free the allocated memory before
hipFree(device_img_input);
hipFree(device_img_output);
free(host_img);
free(host_templ);
free(gpu_out);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char*argv[])
{
//declear varible here
int template_num;
int start = 0;
vector<Point2f > pred_vec;
vector<Point2f > ref_pred_vec;
Mat status;
Mat ref_status;
Mat err;
Mat ref_err;
//VideoWriter video("reflection_matching.avi", CV_FOURCC('M','J','P','G'), 10, Size(800, 600),true);
char filename[256];
while(fscanf(stdin, "%s", filename)!=EOF)
{
cout << filename << endl;
template_num = TMP_NUM;
img = imread(filename, -1);
img = img(Rect(30,15,img.cols-65,img.rows-45));
//imshow("input",img);
//waitKey(0);
if(!img.data)
{
cout << "Problem loading image !!!" << endl;
return -1;
}
//convert the image to gray scale in order to only have one pointer
cvtColor(img, gray_img, CV_BGR2GRAY);
//cropping the image
Mat hf_img = gray_img(Rect(0,0,gray_img.cols,gray_img.rows/CROP_PARAM));
Mat mask;
bool useHarrisDetector = false;
goodFeaturesToTrack(hf_img, corners, TMP_NUM, 0.01, 20.0, mask, 3, useHarrisDetector, 0.04);
//imshow("hf_img", hf_img);
//waitKey(0);
if(corners.size() == 0)
{
cout << "bad frame" << endl;
continue;
}
Point kpt;
for(int temp_generate_idx = 0; temp_generate_idx<template_num; temp_generate_idx++)
{
kpt = corners[temp_generate_idx];
//get the predict distance
dis[temp_generate_idx] = gray_img.rows/CROP_PARAM-kpt.y;
//boundary check for the images
if( kpt.x < KERNEL_RADIUS)
kpt.x = KERNEL_RADIUS;
if( kpt.x > (img.cols-KERNEL_WIDTH) )
kpt.x = img.cols-KERNEL_WIDTH;
if( kpt.y < KERNEL_RADIUS)
kpt.y = KERNEL_RADIUS;
if( kpt.y > ((img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH) )
kpt.y = (img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH;
//label the original feature point of the image
rectangle(img, Point(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS), Point(kpt.x+KERNEL_RADIUS,kpt.y+KERNEL_RADIUS), Scalar(255,0,0 ), 1, 4);
Mat curr_tmpl = hf_img(Rect(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS,KERNEL_WIDTH,KERNEL_WIDTH));
//flip the template in order to find the reflections
flip(curr_tmpl,templs[temp_generate_idx],0);
/*
imshow("img", img);
waitKey(0);
printf("%d:%d\n", temp_generate_idx,dis[temp_generate_idx]);
*/
//cropping the image
img_vec[temp_generate_idx] = gray_img(Rect(kpt.x-KERNEL_RADIUS,gray_img.rows/CROP_PARAM+dis[temp_generate_idx],KERNEL_WIDTH,gray_img.rows-(gray_img.rows/CROP_PARAM+dis[temp_generate_idx])));
/*
imshow("temp_img",img_vec[temp_generate_idx]);
waitKey(0);
*/
kpt_vec[temp_generate_idx] = kpt;
}
cuda_tp_img(template_num);
if( start == 0 )
{
start = 1;
prev_img = img;
continue;
}
/////**optical flow track starts here**/////
calcOpticalFlowPyrLK(prev_img, img, corners, pred_vec, status, err);
//calcOpticalFlowPyrLK(prev_img, img, ref_corners, ref_pred_vec, ref_status, ref_err);
prev_img = img;
//video.write(img);
//line(img, Point(0,img.rows/CROP_PARAM), Point(img.cols,img.rows/CROP_PARAM), Scalar(110,220,0));
imshow("img", img);
waitKey(1);
}
}
| 69eb16824bde1b7535a2bbfd926eaecec20b88ce.cu | //Author: Dongwei Shi
//Created: 06/15/2016
//Description: this program is for template matching with cuda. The program is expected to template match several template simutaneously
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <unistd.h>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include </usr/local/cuda-8.0/include/cuda.h>
#include </usr/local/cuda-8.0/include/cufft.h>
#include </usr/local/cuda-8.0/include/cufft.h>
#define KERNEL_WIDTH 31
#define KERNEL_RADIUS (KERNEL_WIDTH/2)
#define TILE_WIDTH (33-KERNEL_WIDTH)
#define BLK_SIZE (TILE_WIDTH+KERNEL_WIDTH-1)
#define TMP_NUM 8
#define ACCURATE_MODE KERNEL_WIDTH
#define SPEED_MODE 1
#define RECORD 0
#define CROP_PARAM 2.2
using namespace std;
using namespace cv;
//global image and templates
Mat img, gray_img, prev_img;
Mat templs[TMP_NUM];
Mat img_vec[TMP_NUM];
Point kpt_vec[TMP_NUM];
Point ext_vec[TMP_NUM];
vector<Point2f > corners;
int dis[TMP_NUM];
//deviceKernel for storing the templates
__constant__ float deviceKernel[TMP_NUM*KERNEL_WIDTH*KERNEL_WIDTH];
///////////////////////////////////////////////////////////////////
/* conv2d
* Description: This funtion is CUDA kernel. Where perform the 2D convolution of the images and templates.
* Using CV_TM_CCOEFF_NORMED method for template matching. Simutaneously perform 2D convolution
* on several images with specific templates.
* Input: A -- the input data of images
* x_size -- the image width
* y_size -- the image height
* template_num -- the total templates need to be matched.
* Output: B -- the convolution results of the images.
*
*
*/
///////////////////////////////////////////////////////////////////
__global__ void conv2d(float* A, float* B, const int x_size, const int y_size, const int template_num)
{
//allocated shared memory for storing the image
__shared__ float Nds[BLK_SIZE][BLK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int x_out = bx*TILE_WIDTH + tx;
int y_out = by*TILE_WIDTH + ty;
int x_in = x_out - KERNEL_RADIUS;
int y_in = y_out - KERNEL_RADIUS;
float res = 0.0;
float templ_res = 0.0;
float img_res = 0.0;
//copy the image to the shared memeory
if((x_in>=0) && (x_in<x_size) && (y_in>=0) && (y_in<y_size) && (bz>=0) && (bz<template_num) )
{
Nds[ty][tx] = A[bz*x_size*y_size + y_in*x_size + x_in];
}
else
{
Nds[ty][tx] = 0.0;
}
__syncthreads();
//perform convolution below using CV_TM_CCOEFF_NORMED method for template matching
if( (tx<TILE_WIDTH) && (ty<TILE_WIDTH) && (x_out<x_size) && (y_out<y_size) && (bz>=0) && (bz<template_num))
{
res = 0.0;
templ_res = 0.0;
img_res = 0.0;
for( int idx_y=0; idx_y<KERNEL_WIDTH; idx_y++ )
{
for( int idx_x=0; idx_x<SPEED_MODE; idx_x++ )
{
templ_res += pow(deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x],2);
img_res += pow(Nds[ty+idx_y][tx+idx_x],2);
res += Nds[ty+idx_y][tx+idx_x] * deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x];
}
}
//copy the result into the output data
__syncthreads();
if((x_out<x_size) && (y_out<y_size) && (bz<template_num))
{
B[bz*x_size*y_size + y_out*x_size + x_out] = res/sqrt(templ_res*img_res);
}
__syncthreads();
}
}
///////////////////////////////////////////////////////////////////
/* cuda_tp_img
* Description: This function use for preparation step for the
* cuda kernel. It is allocate several memory space
* on both GPU and CPU. It also be used to select the
* peak value of the convolution results
* Input: templates number -- the total number of templates that need to
* be matched.
* Output: 0 -- success, -1 -- failure
*
*
*/
///////////////////////////////////////////////////////////////////
int cuda_tp_img(int template_num)
{
//get size of templates and images.
int x_size = img_vec[0].cols;
int y_size = img_vec[0].rows;
int tmp_x_size = KERNEL_WIDTH;//templs[0].cols;
int tmp_y_size = KERNEL_WIDTH;//templs[0].rows;
int img_size = x_size * y_size;
int tmpl_size = tmp_x_size * tmp_y_size;
//allocate a space to store the image intensity
float* host_img = (float*) malloc(sizeof(float)*img_size*template_num);
float* host_templ = (float*) malloc(sizeof(float)*tmpl_size*template_num);
float* gpu_out = (float*) malloc(sizeof(float)*img_size*template_num);
float* device_img_input;
float* device_img_output;
//copy the intensity value from image
for(int img_idx=0; img_idx<template_num; img_idx++)
{
for(int y=0; y<y_size; y++)
{
for(int x=0; x<x_size; x++)
{
Scalar intensity = img_vec[img_idx].at<uchar>(y,x);
host_img[y*x_size+x + img_idx*img_size] = intensity.val[0];
}
}
}
//copy the intensity value from templates
for(int tmpl_idx=0; tmpl_idx<template_num; tmpl_idx++)
{
for(int y=0; y<tmp_y_size; y++)
{
for(int x=0; x<tmp_x_size; x++)
{
Scalar intensity = templs[tmpl_idx].at<uchar>(y,x);
host_templ[y*tmp_x_size+x+tmpl_idx*tmpl_size] = intensity.val[0];
}
}
}
//allocate memory in cuda global memory
cudaMalloc( (void**)&device_img_input, img_size*sizeof(float)*template_num );
cudaMalloc( (void**)&device_img_output, img_size*sizeof(float)*template_num );
cudaMemcpy( device_img_input, host_img, img_size*sizeof(float)*template_num, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol( deviceKernel, host_templ, tmpl_size*sizeof(float)*template_num);
//assign blocks and threads
dim3 Dimblock(BLK_SIZE, BLK_SIZE, 1);
dim3 DimGrid(((TILE_WIDTH+x_size)-1/TILE_WIDTH), ((TILE_WIDTH+y_size)-1/TILE_WIDTH),template_num);
//calling the convolution gpu function
conv2d <<< DimGrid, Dimblock >>>( device_img_input, device_img_output, x_size, y_size, template_num);
cudaDeviceSynchronize();
cudaMemcpy( gpu_out, device_img_output, img_size*sizeof(float)*template_num, cudaMemcpyDeviceToHost);
//Selecting peak value of each image's convolution result and label out on the image.
float res = 0;
int y_pos;
for(int idx=0; idx<template_num; idx++)
{
y_pos = 0;
res = 0;
for(int y=0; y<y_size; y++)
{
for(int x=0; x<x_size; x++)
{
if(gpu_out[idx*img_size+y*x_size+x]>res)
{
res = gpu_out[idx*img_size+y*x_size+x];
y_pos = y;
}
}
}
ext_vec[idx].x = kpt_vec[idx].x;
ext_vec[idx].y = (img.rows/CROP_PARAM)+dis[idx]+y_pos;
rectangle(img, Point(kpt_vec[idx].x-KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos-KERNEL_RADIUS), Point(kpt_vec[idx].x+KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos+KERNEL_RADIUS), Scalar(0,255,0 ), 1, 4);
line(img,kpt_vec[idx],Point(kpt_vec[idx].x,(img.rows/CROP_PARAM)+dis[idx]+y_pos),Scalar(0,0,255),1,8,0);
}
//Free the allocated memory before
cudaFree(device_img_input);
cudaFree(device_img_output);
free(host_img);
free(host_templ);
free(gpu_out);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char*argv[])
{
//declear varible here
int template_num;
int start = 0;
vector<Point2f > pred_vec;
vector<Point2f > ref_pred_vec;
Mat status;
Mat ref_status;
Mat err;
Mat ref_err;
//VideoWriter video("reflection_matching.avi", CV_FOURCC('M','J','P','G'), 10, Size(800, 600),true);
char filename[256];
while(fscanf(stdin, "%s", filename)!=EOF)
{
cout << filename << endl;
template_num = TMP_NUM;
img = imread(filename, -1);
img = img(Rect(30,15,img.cols-65,img.rows-45));
//imshow("input",img);
//waitKey(0);
if(!img.data)
{
cout << "Problem loading image !!!" << endl;
return -1;
}
//convert the image to gray scale in order to only have one pointer
cvtColor(img, gray_img, CV_BGR2GRAY);
//cropping the image
Mat hf_img = gray_img(Rect(0,0,gray_img.cols,gray_img.rows/CROP_PARAM));
Mat mask;
bool useHarrisDetector = false;
goodFeaturesToTrack(hf_img, corners, TMP_NUM, 0.01, 20.0, mask, 3, useHarrisDetector, 0.04);
//imshow("hf_img", hf_img);
//waitKey(0);
if(corners.size() == 0)
{
cout << "bad frame" << endl;
continue;
}
Point kpt;
for(int temp_generate_idx = 0; temp_generate_idx<template_num; temp_generate_idx++)
{
kpt = corners[temp_generate_idx];
//get the predict distance
dis[temp_generate_idx] = gray_img.rows/CROP_PARAM-kpt.y;
//boundary check for the images
if( kpt.x < KERNEL_RADIUS)
kpt.x = KERNEL_RADIUS;
if( kpt.x > (img.cols-KERNEL_WIDTH) )
kpt.x = img.cols-KERNEL_WIDTH;
if( kpt.y < KERNEL_RADIUS)
kpt.y = KERNEL_RADIUS;
if( kpt.y > ((img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH) )
kpt.y = (img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH;
//label the original feature point of the image
rectangle(img, Point(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS), Point(kpt.x+KERNEL_RADIUS,kpt.y+KERNEL_RADIUS), Scalar(255,0,0 ), 1, 4);
Mat curr_tmpl = hf_img(Rect(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS,KERNEL_WIDTH,KERNEL_WIDTH));
//flip the template in order to find the reflections
flip(curr_tmpl,templs[temp_generate_idx],0);
/*
imshow("img", img);
waitKey(0);
printf("%d:%d\n", temp_generate_idx,dis[temp_generate_idx]);
*/
//cropping the image
img_vec[temp_generate_idx] = gray_img(Rect(kpt.x-KERNEL_RADIUS,gray_img.rows/CROP_PARAM+dis[temp_generate_idx],KERNEL_WIDTH,gray_img.rows-(gray_img.rows/CROP_PARAM+dis[temp_generate_idx])));
/*
imshow("temp_img",img_vec[temp_generate_idx]);
waitKey(0);
*/
kpt_vec[temp_generate_idx] = kpt;
}
cuda_tp_img(template_num);
if( start == 0 )
{
start = 1;
prev_img = img;
continue;
}
/////**optical flow track starts here**/////
calcOpticalFlowPyrLK(prev_img, img, corners, pred_vec, status, err);
//calcOpticalFlowPyrLK(prev_img, img, ref_corners, ref_pred_vec, ref_status, ref_err);
prev_img = img;
//video.write(img);
//line(img, Point(0,img.rows/CROP_PARAM), Point(img.cols,img.rows/CROP_PARAM), Scalar(110,220,0));
imshow("img", img);
waitKey(1);
}
}
|
85128f76d957d64399ee4cf9059d902d696dd0a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main(int argc,char **argv)
{
// launch the kernel
hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
// force the printf()s to flush
hipDeviceSynchronize();
printf("That's all!\n");
return 0;
}
| 85128f76d957d64399ee4cf9059d902d696dd0a3.cu | #include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main(int argc,char **argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
}
|
b55804fe3ba32e6b21778a2d3f52af8c6f566ade.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
#include "dynamicconv_cuda_forward.cu"
#include "dynamicconv_cuda_backward.cu"
#include "hip_utils.hip"
// FS is filter size and kernels are specialized for filter sizes
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_forward_kernel(const scalar_t* input,
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int head = featureIdx / numFiltersInBlock;
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
scalar_t filter[FS];
__shared__ scalar_t tempInput[SB + FS];
zeroSharedMem<FS, SB, padding_l>(tempInput);
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
__syncthreads();
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, i,
numIterations, false, tempInput);
__syncthreads();
if (inputOffset + tid < sequenceLength) {
#pragma unroll
for (int k = 0; k < FS; ++k) {
const int filterOffset = batchIdx * numHeads * FS * sequenceLength
+ head * FS * sequenceLength
+ k * sequenceLength
+ i * SB + tid;
filter[k] = weight[filterOffset];
}
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
out += filter[k] * tempInput[tid + k];
}
outputFeature[inputOffset + tid] = out;
}
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_backward_kernel(
const scalar_t* gradOutput, // B * C * T
const scalar_t* input, // B * C * T
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* gradWeight,
scalar_t* gradInput) { // B * H * k * T
assert(blockDim.x == SB);
// each block operates on a single batch and filter head
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int headIdx = blockIdx.y;
const int chunkIdx = blockIdx.z;
const int numChunks = divUp<int, int>(sequenceLength, SB);
const int inputOffset = chunkIdx * SB;
// initialize shared memory for output gradient and input
__shared__ scalar_t tempGradOutput[SB + FS];
__shared__ scalar_t tempInput[SB + FS];
const int padding = FS - padding_l - 1;
zeroSharedMem<FS, SB, padding>(tempGradOutput);
zeroSharedMem<FS, SB, padding_l>(tempInput);
// initialize local filter and weight gradient sum arrays
scalar_t tempGradSum[FS];
scalar_t bfilter[FS];
for (int k = 0; k < FS; ++k) {
tempGradSum[k] = scalar_t(0.0);
int idxOffset = inputOffset + tid + k - padding;
if (idxOffset >= 0 && idxOffset < sequenceLength) {
int bfilterOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength
+ (FS - k - 1) * sequenceLength
+ idxOffset;
bfilter[k] = weight[bfilterOffset];
} else {
bfilter[k] = scalar_t(0.0);
}
}
// iterate over filter block
for (int featureIdx = 0; featureIdx < numFiltersInBlock; ++featureIdx) {
__syncthreads();
// load input and output gradient for this channel and chunk
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ (headIdx * numFiltersInBlock + featureIdx) * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
const scalar_t* gradOutputFeature = &gradOutput[IOOffset];
scalar_t* gradInputFeature = &gradInput[IOOffset];
load_input_to_shared<FS, SB, padding>(gradOutputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempGradOutput);
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempInput);
__syncthreads();
// sum input and weight gradients
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
tempGradSum[k] += tempInput[tid + k] * tempGradOutput[tid + padding];
out += bfilter[k] * tempGradOutput[tid + k];
}
if (inputOffset + tid < sequenceLength) {
gradInputFeature[inputOffset + tid] = out;
}
}
const int gradOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength;
scalar_t *gradWeightFeature = &gradWeight[gradOffset];
// write weight gradient
if (inputOffset + tid < sequenceLength) {
for (int k = 0; k < FS; ++k) {
const int outputOffset = k * sequenceLength + inputOffset + tid;
gradWeightFeature[outputOffset] = tempGradSum[k];
}
}
}
| b55804fe3ba32e6b21778a2d3f52af8c6f566ade.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
#include "dynamicconv_cuda_forward.cu"
#include "dynamicconv_cuda_backward.cu"
#include "cuda_utils.cu"
// FS is filter size and kernels are specialized for filter sizes
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_forward_kernel(const scalar_t* input,
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int head = featureIdx / numFiltersInBlock;
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
scalar_t filter[FS];
__shared__ scalar_t tempInput[SB + FS];
zeroSharedMem<FS, SB, padding_l>(tempInput);
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
__syncthreads();
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, i,
numIterations, false, tempInput);
__syncthreads();
if (inputOffset + tid < sequenceLength) {
#pragma unroll
for (int k = 0; k < FS; ++k) {
const int filterOffset = batchIdx * numHeads * FS * sequenceLength
+ head * FS * sequenceLength
+ k * sequenceLength
+ i * SB + tid;
filter[k] = weight[filterOffset];
}
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
out += filter[k] * tempInput[tid + k];
}
outputFeature[inputOffset + tid] = out;
}
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_backward_kernel(
const scalar_t* gradOutput, // B * C * T
const scalar_t* input, // B * C * T
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* gradWeight,
scalar_t* gradInput) { // B * H * k * T
assert(blockDim.x == SB);
// each block operates on a single batch and filter head
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int headIdx = blockIdx.y;
const int chunkIdx = blockIdx.z;
const int numChunks = divUp<int, int>(sequenceLength, SB);
const int inputOffset = chunkIdx * SB;
// initialize shared memory for output gradient and input
__shared__ scalar_t tempGradOutput[SB + FS];
__shared__ scalar_t tempInput[SB + FS];
const int padding = FS - padding_l - 1;
zeroSharedMem<FS, SB, padding>(tempGradOutput);
zeroSharedMem<FS, SB, padding_l>(tempInput);
// initialize local filter and weight gradient sum arrays
scalar_t tempGradSum[FS];
scalar_t bfilter[FS];
for (int k = 0; k < FS; ++k) {
tempGradSum[k] = scalar_t(0.0);
int idxOffset = inputOffset + tid + k - padding;
if (idxOffset >= 0 && idxOffset < sequenceLength) {
int bfilterOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength
+ (FS - k - 1) * sequenceLength
+ idxOffset;
bfilter[k] = weight[bfilterOffset];
} else {
bfilter[k] = scalar_t(0.0);
}
}
// iterate over filter block
for (int featureIdx = 0; featureIdx < numFiltersInBlock; ++featureIdx) {
__syncthreads();
// load input and output gradient for this channel and chunk
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ (headIdx * numFiltersInBlock + featureIdx) * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
const scalar_t* gradOutputFeature = &gradOutput[IOOffset];
scalar_t* gradInputFeature = &gradInput[IOOffset];
load_input_to_shared<FS, SB, padding>(gradOutputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempGradOutput);
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempInput);
__syncthreads();
// sum input and weight gradients
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
tempGradSum[k] += tempInput[tid + k] * tempGradOutput[tid + padding];
out += bfilter[k] * tempGradOutput[tid + k];
}
if (inputOffset + tid < sequenceLength) {
gradInputFeature[inputOffset + tid] = out;
}
}
const int gradOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength;
scalar_t *gradWeightFeature = &gradWeight[gradOffset];
// write weight gradient
if (inputOffset + tid < sequenceLength) {
for (int k = 0; k < FS; ++k) {
const int outputOffset = k * sequenceLength + inputOffset + tid;
gradWeightFeature[outputOffset] = tempGradSum[k];
}
}
}
|
95fb6dd1b2c51b9cbed95890353bbbe3511f1cde.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Implementing the FFT algorithm for general input
* Input should be fp32 vectors with size equals to the power of 4
* Number of vectors is given by BATCH (B)
* Recursive algorithm
* Base case is fft4
* Combine all components in one file
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include "nvidia_helper/checkCudaErrors.h"
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
#define PI 3.14159265
#define EPS 0.0000001192f
const float UPPER_BOUND = 1.0f;
const int BATCH = 8;
const int SIZE = 1024;
// Utility function declaration
FFT_S init_F4();
__global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp);
__global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B);
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im);
__global__ void mySplit_transposed(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int n, int M, int B, float* Xtemp);
__global__ void myAccumulate_transposed(float* X1, float* X2, float* alpha, float* R1, float* R2, int n, int M, int B);
FFT_S fft4_transposed(int M, int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im);
__global__ void myTranspose(int m, int n, float* input, float* output, int B);
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B);
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im);
// Global variables
fft::MatrixH F4_re;
fft::MatrixH F4_im;
float* buffer;
float* X_temp;
int main()
{
int mem_size;
// Set device heap size
hipDeviceSetLimit(hipLimitMallocHeapSize, 1024 * 1024 * 64);
// allocate unified memory for input matrix
fft::MatrixF input_re;
input_re.width = BATCH;
input_re.height = SIZE;
mem_size = input_re.width * input_re.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(input_re.array), mem_size));
fft::MatrixF input_im;
input_im.width = BATCH;
input_im.height = SIZE;
mem_size = input_im.width * input_im.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(input_im.array), mem_size));
// Initialize the input matrix
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_re.element(i, j) = (float)i;
input_im.element(i, j) = 0.0f;
printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j));
}
printf("\n");
}
// allocate unified memory for output matrix
fft::MatrixF output_re;
output_re.width = BATCH;
output_re.height = SIZE;
mem_size = output_re.width * output_re.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(output_re.array), mem_size));
fft::MatrixF output_im;
output_im.width = BATCH;
output_im.height = SIZE;
mem_size = output_im.width * output_im.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(output_im.array), mem_size));
// allocate unified memory for the buffer (array of float)
mem_size = SIZE * BATCH * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &buffer, mem_size));
checkCudaErrors(hipMallocManaged((void **) &X_temp, mem_size));
FFT_S status;
// Initialize Fourier matrix
status = init_F4();
if (status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n");
return FFT_FAILURE;
}
// Call gfft function
status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im);
if (status != FFT_SUCCESS){
printf("Error in running fft algorithm\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j));
}
}
checkCudaErrors(hipFree(input_re.array));
checkCudaErrors(hipFree(input_im.array));
checkCudaErrors(hipFree(output_re.array));
checkCudaErrors(hipFree(output_im.array));
return 0;
}
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im)
{
if (N == 4) {
return fft4(B, X_re, X_im, FX_re, FX_im);
}
// Status variable declaration
hipblasStatus_t status;
hipblasHandle_t handle;
FFT_S fft_status;
hipError_t cerror;
// Initialize cublas
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
// Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B
FX_re.width = N / 4 * B; FX_re.height = 4;
FX_im.width = N / 4 * B; FX_im.height = 4;
// Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B
// Store result directly in FX_re.array and FX_im.array
//// Set grid and block size
dim3 threadsPerBlock1(4, 16);
dim3 blockPerGrid1(B, (N / 4 + 15)/16); // Make sure blocks are enough
//// Real matrix
hipLaunchKernelGGL(( myTranspose), dim3(blockPerGrid1), dim3(threadsPerBlock1), 0, 0, 4, N / 4, X_re.array, FX_re.array, B);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during first transposition of real matrix\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
////// Set dimension (Note that the transpose happens batch-wisely)
FX_re.height = N / 4; FX_re.width = 4 * B;
//// Imaginary matrix
hipLaunchKernelGGL(( myTranspose), dim3(blockPerGrid1), dim3(threadsPerBlock1), 0, 0, 4, N / 4, X_im.array, FX_im.array, B);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during first transposition of imaginary matrix\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
////// Set dimension
FX_im.height = N / 4; FX_im.width = B * 4;
hipDeviceSynchronize();
// Recursively call gfft function, not! using buffer matrix
//// Call gfft, store result in buffer matrix
fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n");
return FFT_FAILURE;
}
// Multiplication with twiddle factors
//// Set grid and block size
dim3 threadsPerBlock2(4, 16);
dim3 blockPerGrid2(B, (N / 4 + 15)/16); // Make sure blocks are enough
//// Call kernel function
hipLaunchKernelGGL(( multiply_twiddle), dim3(blockPerGrid2), dim3(threadsPerBlock2), 0, 0, N, N/4, 4, FX_re.array, FX_im.array, B);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during twiddle multiplication\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
hipDeviceSynchronize();
// Using the improved algorithm without transposition
fft_status = fft4_transposed(N / 4, B, FX_re, FX_im, FX_re, FX_im);
hipDeviceSynchronize();
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (calling fft4_transposed).\n");
return FFT_FAILURE;
}
// Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B
FX_re.width = B; FX_re.height = N;
FX_im.width = B; FX_im.height = N;
// Shutdown cublas
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
__global__ void myTranspose(int m, int n, float* input, float* output, int B)
{
/*
* Transpose the B input matrices with size m * n
* Every matrix in a batch is transposed independently
* Input should be matrix of size m * (n * B)
* Output should be matrix of size n * (m * B)
* The grid size is expected to be B * 1
* Used case: first transpose, from 4 * (N / 4) to (N / 4) * 4
* */
// Calculate position in the OUTPUT matrix (0 based)
int j = threadIdx.x; // Column number within a matrix, expected to be 0, 1, 2, 3
int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix
int matrix_id = blockIdx.x;
if (i < n && j < m && matrix_id < B){
output[matrix_id * m * n + j * n + i] = input[matrix_id * m * n + i * m + j];
}
}
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
// Variable declaration
hipblasStatus_t status;
hipblasHandle_t handle;
hipError_t cerror;
//// Unified variables
float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2;
half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo;
float *result1, *result2; // Store the intermediate result
//// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS setting math mode error\n");
return FFT_FAILURE;
}
// Allocate unified memory with 0 initialization
checkCudaErrors(hipMallocManaged((void **) &scales, B * 4 * sizeof(float)));
checkCudaErrors(hipMemset(scales, 0.0f, B * 4 * sizeof(float)));
checkCudaErrors(hipMallocManaged((void **) &X_split, 4 * B * 4 * sizeof(half)));
checkCudaErrors(hipMemset(X_split, 0.0f, 4 * B * 4 * sizeof(half)));
checkCudaErrors(hipMallocManaged((void **) &result1, 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(hipMemset(result1, 0.0f, 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(hipMallocManaged((void **) &result2, 4 * B * 4 * sizeof(result2[0])));
checkCudaErrors(hipMemset(result2, 0.0f, 4 * B * 4 * sizeof(result2[0])));
// Split input
//// Initialize Matrix and Vector data structure to store split result
fft::MatrixH X_re_hi;
X_re_hi.width = B;
X_re_hi.height = 4;
X_re_hi.array = X_split + 4 * B * 0;
fft::MatrixH X_re_lo;
X_re_lo.width = B;
X_re_lo.height = 4;
X_re_lo.array = X_split + 4 * B * 1;
fft::MatrixH X_im_hi;
X_im_hi.width = B;
X_im_hi.height = 4;
X_im_hi.array = X_split + 4 * B * 2;
fft::MatrixH X_im_lo;
X_im_lo.width = B;
X_im_lo.height = 4;
X_im_lo.array = X_split + 4 * B * 3;
fft::VectorF re_s1;
re_s1.size = B;
re_s1.array = scales + B * 0;
fft::VectorF re_s2;
re_s2.size = B;
re_s2.array = scales + B * 1;
fft::VectorF im_s1;
im_s1.size = B;
im_s1.array = scales + B * 2;
fft::VectorF im_s2;
im_s2.size = B;
im_s2.array = scales + B * 3;
//// Call splitting function
int numThreads = 64;
int numBlocks = (B + 63) / 64;
hipLaunchKernelGGL(( mySplit), dim3(numBlocks), dim3(numThreads), 0, 0, X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, B, X_temp);
hipLaunchKernelGGL(( mySplit), dim3(numBlocks), dim3(numThreads), 0, 0, X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, B, X_temp);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during splitting\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
// Call cublas function and finish Matrix multiplication calculation
//// Call cublas gemm on F4_re
status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 4, 4, &alpha, F4_re.array,
HIP_R_16F, 4, X_split, HIP_R_16F, 4, &beta, result1, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n");
return FFT_FAILURE;
}
//// Call cublas gemm on F4_im
status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 4, 4, &alpha, F4_im.array,
HIP_R_16F, 4, X_split, HIP_R_16F, 4, &beta, result2, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n");
return FFT_FAILURE;
}
// Scale, combine and get result, add to output
//// Set grid and block size
dim3 threadsPerBlock(16, 4);
dim3 BlocksPerGrid((B+15)/16, 1);
//// call kernel function (buffer is zero-initialized inside)
hipLaunchKernelGGL(( myAccumulate), dim3(BlocksPerGrid), dim3(threadsPerBlock), 0, 0, 4, result1, result2, scales, FX_re.array, FX_im.array, B);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during accumulation\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
// Deallocate unified memory
if (hipFree(scales) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free scales vector)\n");
return FFT_FAILURE;
}
if (hipFree(X_split) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n");
return FFT_FAILURE;
}
if (hipFree(result1) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (hipFree(result2) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
// Shutdown cublas
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
hipDeviceSynchronize();
return FFT_SUCCESS;
}
FFT_S init_F4()
{
// Allocate unified memory for Fourier Matrix
int mem_size;
F4_re.width = 4;
F4_re.height = 4;
mem_size = F4_re.width * F4_re.height * sizeof(half);
checkCudaErrors(hipMallocManaged((void **) &(F4_re.array), mem_size));
F4_im.width = 4;
F4_im.height = 4;
mem_size = F4_im.width * F4_im.height * sizeof(half);
checkCudaErrors(hipMallocManaged((void **) &(F4_im.array), mem_size));
F4_re.element(1, 1) = 1.0f;
F4_re.element(2, 1) = 1.0f;
F4_re.element(3, 1) = 1.0f;
F4_re.element(4, 1) = 1.0f;
F4_re.element(1, 2) = 1.0f;
F4_re.element(2, 2) = 0.0f;
F4_re.element(3, 2) =-1.0f;
F4_re.element(4, 2) = 0.0f;
F4_re.element(1, 3) = 1.0f;
F4_re.element(2, 3) =-1.0f;
F4_re.element(3, 3) = 1.0f;
F4_re.element(4, 3) =-1.0f;
F4_re.element(1, 4) = 1.0f;
F4_re.element(2, 4) = 0.0f;
F4_re.element(3, 4) =-1.0f;
F4_re.element(4, 4) = 0.0f;
F4_im.element(1, 1) = 0.0f;
F4_im.element(2, 1) = 0.0f;
F4_im.element(3, 1) = 0.0f;
F4_im.element(4, 1) = 0.0f;
F4_im.element(1, 2) = 0.0f;
F4_im.element(2, 2) =-1.0f;
F4_im.element(3, 2) = 0.0f;
F4_im.element(4, 2) = 1.0f;
F4_im.element(1, 3) = 0.0f;
F4_im.element(2, 3) = 0.0f;
F4_im.element(3, 3) = 0.0f;
F4_im.element(4, 3) = 0.0f;
F4_im.element(1, 4) = 0.0f;
F4_im.element(2, 4) = 1.0f;
F4_im.element(3, 4) = 0.0f;
F4_im.element(4, 4) =-1.0f;
return FFT_SUCCESS;
}
__global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp)
{
/*
* fft::MatrixF X (N*B), fft::MatrixH Xhi (N*B), fft::MatrixH Xlo (N*B)
* fft::VectorF s1, fft::VectorF s2
* int N, int B. N is always 4
* Grid and dim size should be 1D, total size = B
* All data should be in unified memory or device memory
* */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < B){
// Calculate scaling factor 1
float scale1 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(X[i + idx * N]);
if (norm > scale1) scale1 = norm;
}
// If all number are zero, skip
if (scale1 == 0.0f){
s1[idx] = 0.0f;
s2[idx] = 0.0f;
for (int i = 0; i < N; i++){
Xhi[i + idx * N] = Xlo[i + idx * N] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale1 < EPS) scale1 = EPS;
if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS;
s1[idx] = scale1;
// Scale the high half
for (int i = 0; i < N; i++){
Xtemp[i + idx * N] = X[i + idx * N]/scale1;
Xhi[i + idx * N] = (half)(Xtemp[i + idx * N]);
// Use Xtemp to store the residual
Xtemp[i + idx * N] = X[i + idx * N] - scale1 * (float)(Xhi[i + idx * N]);
}
// Calculate the lower scaling factor
float scale2 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(Xtemp[i + idx * N]);
if (norm > scale2) scale2 = norm;
}
// If all number are zero, skip
if (scale2 == 0.0f){
s2[idx] = 0.0f;
for (int i = 0; i < N; i++){
Xlo[i + idx * N] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale2 < EPS) scale2 = EPS;
if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS;
s2[idx] = scale2;
for (int i = 0; i < N; i++){
Xlo[i + idx * N] = (half) (Xtemp[i + idx * N] / scale2);
}
}
}
}
}
__global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B)
{
/*
* N is number of elements in one column (expected to be 4)
* X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo
* alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2
* R1, R2 are 4 * B matrix
* B is batch size
* */
int i = blockIdx.y * blockDim.y + threadIdx.y; // row number
int j = blockIdx.x * blockDim.x + threadIdx.x; // column number
if (i < N && j < B){
R1[i + j * N] = R2[i + j * N] = 0.0f;
R1[i + j * N] += alpha[j] * X1[i + j * N];
R1[i + j * N] += alpha[j + B] * X1[i + j * N + N * B];
R1[i + j * N] += -1.0f * alpha[j + 2*B] * X2[i + j * N + N * 2 * B];
R1[i + j * N] += -1.0f * alpha[j + 3*B] * X2[i + j * N + N * 3 * B];
R2[i + j * N] += alpha[j] * X2[i + j * N];
R2[i + j * N] += alpha[j + B] * X2[i + j * N + N * B];
R2[i + j * N] += alpha[j + 2*B] * X1[i + j * N + N * 2 * B];
R2[i + j * N] += alpha[j + 3*B] * X1[i + j * N + N * 3 * B];
}
}
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B)
{
/*
* Multifly every element of the input matrix with twiddle factor
* Every matrix in a batch is scaled independently
* Block and thread layout should be 2D
* Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j)
* Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j)
* */
// Calculate position (0 based)
int j = threadIdx.x; // Column number within a matrix, 0 to 3 in radix 4
int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix
int matrix_id = blockIdx.x;
if (i < m && j < n && matrix_id < B){
// Per-thread local variables
int index = matrix_id * N + j * m + i;
float tw_re = cos(2 * PI / N * i * j);
float tw_im = sin(2 * PI / N * i * j);
float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im;
float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re;
matrix_re[index] = result_re;
matrix_im[index] = result_im;
}
}
FFT_S fft4_transposed(int M, int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
/*
* Perform fft4 assuming the input is in the transposed layout
* M is the number of rows
* 4 * B is the number of columns
* Note that the fourier matrix is symmetric
*/
// Variable declaration
hipblasStatus_t status;
hipblasHandle_t handle;
hipError_t cerror;
//// Unified variables
float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2;
half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo;
float *result1, *result2; // Store the intermediate result
//// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS setting math mode error\n");
return FFT_FAILURE;
}
// Allocate unified memory with 0 initialization
checkCudaErrors(hipMallocManaged((void **) &scales, M * B * 4 * sizeof(float)));
checkCudaErrors(hipMemset(scales, 0.0f, M * B * 4 * sizeof(float)));
checkCudaErrors(hipMallocManaged((void **) &X_split, M * 4 * B * 4 * sizeof(half)));
checkCudaErrors(hipMemset(X_split, 0.0f, M * 4 * B * 4 * sizeof(half)));
checkCudaErrors(hipMallocManaged((void **) &result1, M * 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(hipMemset(result1, 0.0f, M * 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(hipMallocManaged((void **) &result2, M * 4 * B * 4 * sizeof(result2[0])));
checkCudaErrors(hipMemset(result2, 0.0f, M * 4 * B * 4 * sizeof(result2[0])));
// Split input
//// Initialize Matrix and Vector data structure to store split result
fft::MatrixH X_re_hi;
X_re_hi.width = 4 * B;
X_re_hi.height = M;
X_re_hi.array = X_split + M * 4 * B * 0;
fft::MatrixH X_re_lo;
X_re_lo.width = 4 * B;
X_re_lo.height = M;
X_re_lo.array = X_split + M * 4 * B * 1;
fft::MatrixH X_im_hi;
X_im_hi.width = 4 * B;
X_im_hi.height = M;
X_im_hi.array = X_split + M * 4 * B * 2;
fft::MatrixH X_im_lo;
X_im_lo.width = 4 * B;
X_im_lo.height = M;
X_im_lo.array = X_split + M * 4 * B * 3;
fft::VectorF re_s1;
re_s1.size = M * B;
re_s1.array = scales + M * B * 0;
fft::VectorF re_s2;
re_s2.size = M * B;
re_s2.array = scales + M * B * 1;
fft::VectorF im_s1;
im_s1.size = M * B;
im_s1.array = scales + M * B * 2;
fft::VectorF im_s2;
im_s2.size = M * B;
im_s2.array = scales + M * B * 3;
//// Call splitting function
dim3 threadsPerBlock1(4, 16);
dim3 BlocksPerGrid1((B + 3)/4, (M + 15)/16);
hipLaunchKernelGGL(( mySplit_transposed), dim3(BlocksPerGrid1), dim3(threadsPerBlock1), 0, 0, X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, M, B, X_temp);
hipLaunchKernelGGL(( mySplit_transposed), dim3(BlocksPerGrid1), dim3(threadsPerBlock1), 0, 0, X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, M, B, X_temp);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during splitting in fft4_transposed\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
// Call cublas function and finish Matrix multiplication calculation
// The order of multiplicands are reversed
//// Define batched offset
long long int stride = M * 4;
//// Call cublas batched gemm on F4_re
status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_split,
HIP_R_16F, M, stride, F4_re.array, HIP_R_16F, 4, 0, &beta, result1, HIP_R_32F, M, stride, B * 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error in fft4_transposed ((c, d) * a).\n");
return FFT_FAILURE;
}
//// Call cublas gemm on F4_im
status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_split,
HIP_R_16F, M, stride, F4_im.array, HIP_R_16F, 4, 0, &beta, result2, HIP_R_32F, M, stride, B * 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error in fft4_transposed ((c, d) * b).\n");
return FFT_FAILURE;
}
// Scale, combine and get result, add to output
//// Set grid and block size
dim3 threadsPerBlock2(16, 16);
dim3 BlocksPerGrid2((4 * B + 15)/16, (M + 15)/16);
//// call kernel function (buffer is zero-initialized inside)
hipLaunchKernelGGL(( myAccumulate_transposed), dim3(BlocksPerGrid2), dim3(threadsPerBlock2), 0, 0, result1, result2, scales, FX_re.array, FX_im.array, 4, M, B);
cerror = hipGetLastError();
if (cerror != hipSuccess)
{
printf("CUDA error: %s during accumulation in fft4_transposed\n", hipGetErrorString(cerror));
return FFT_FAILURE;
}
// Deallocate unified memory
if (hipFree(scales) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free scales vector)\n");
return FFT_FAILURE;
}
if (hipFree(X_split) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n");
return FFT_FAILURE;
}
if (hipFree(result1) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (hipFree(result2) != hipSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
// Shutdown cublas
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
__global__ void mySplit_transposed(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int n, int M, int B, float* Xtemp)
{
/*
* fft::MatrixF X (M * (n * B)), fft::MatrixH Xhi (M * (n * B)), fft::MatrixH Xlo (M * (n * B))
* fft::VectorF s1 of size M * B, fft::VectorF s2 of size M * B
* int n, int M, int B. n is expected to be 4, M = N / 4
* Grid and dim size should be 2D, total size = M * B
* All data should be in unified memory or device memory
* */
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y; // Row number (max M)
int blockNum = blockIdx.x * blockDim.x + threadIdx.x; // 'Column number' (max B)
if (rowIdx < M && blockNum < B){
/* Data to be manipulated:
* X, Xhi, Xlo (rowIdx, blockIdx * n +0+1+2+3) = X, Xhi, Xlo[rowIdx + blockIdx * n * M + 0/1/2/3 * M]
* s1, s2 (rowIdx, blockIdx) = s1, s2[rowIdx + blockIdx * M]
*/
int offset = rowIdx + blockNum * n * M;
int stride = M;
int factor_idx = rowIdx + blockNum * M;
// Calculate scaling factor 1
float scale1 = 0.0f;
for (int i = 0; i < n; i++){
float norm = (float) fabs(X[offset + i * stride]);
if (norm > scale1) scale1 = norm;
}
// If all number are zero, skip
if (scale1 == 0.0f){
s1[factor_idx] = 0.0f;
s2[factor_idx] = 0.0f;
for (int i = 0; i < n; i++){
Xhi[offset + i * stride] = Xlo[offset + i * stride] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale1 < EPS) scale1 = EPS;
if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS;
s1[factor_idx] = scale1;
// Scale the high half
for (int i = 0; i < n; i++){
Xtemp[offset + i * stride] = X[offset + i * stride]/scale1;
Xhi[offset + i * stride] = (half)(Xtemp[offset + i * stride]);
// Use Xtemp to store the residual
Xtemp[offset + i * stride] = X[offset + i * stride] - scale1 * (float)(Xhi[offset + i * stride]);
}
// Calculate the lower scaling factor
float scale2 = 0.0f;
for (int i = 0; i < n; i++){
float norm = (float) fabs(Xtemp[offset + i * stride]);
if (norm > scale2) scale2 = norm;
}
// If all number are zero, skip
if (scale2 == 0.0f){
s2[factor_idx] = 0.0f;
for (int i = 0; i < n; i++){
Xlo[offset + i * stride] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale2 < EPS) scale2 = EPS;
if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS;
s2[factor_idx] = scale2;
for (int i = 0; i < n; i++){
Xlo[offset + i * stride] = (half) (Xtemp[offset + i * stride] / scale2);
}
}
}
}
}
__global__ void myAccumulate_transposed(float* X1, float* X2, float* alpha, float* R1, float* R2, int n, int M, int B)
{
/*
* X1, X2 are M * (4 * B * 4) matrix. The inner-most column order is by element in a unit. Then by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo
* alpha is a M * B * 4 array. Inner most order is by rows. Then by batch. Outer order is re_s1, re_s2, im_s1, im_s2
* R1, R2 are M * (4 * B) matrix
* n is number of elements in one unit (expected to be 4)
* M is number of rows, B is batch size
* */
int i = blockIdx.y * blockDim.y + threadIdx.y; // row number
int j = blockIdx.x * blockDim.x + threadIdx.x; // column number
if (i < M && j < 4 * B){
int result_idx = i + j * M;
int e_stride = M * 4 * B; // Stride for elements, e.g. from Re_hi to Re_lo
int factor_idx = i + j / 4 * M;
int f_stride = M * B; // Stride for factors, e.g. from re_s1 to re_s2
R1[result_idx] = R2[result_idx] = 0.0f;
R1[result_idx] += alpha[factor_idx] * X1[result_idx];
R1[result_idx] += alpha[factor_idx + f_stride] * X1[result_idx + e_stride];
R1[result_idx] += -1.0f * alpha[factor_idx + 2*f_stride] * X2[result_idx + 2*e_stride];
R1[result_idx] += -1.0f * alpha[factor_idx + 3*f_stride] * X2[result_idx + 3*e_stride];
R2[result_idx] += alpha[factor_idx] * X2[result_idx];
R2[result_idx] += alpha[factor_idx + f_stride] * X2[result_idx + e_stride];
R2[result_idx] += alpha[factor_idx + 2*f_stride] * X1[result_idx + 2*e_stride];
R2[result_idx] += alpha[factor_idx + 3*f_stride] * X1[result_idx + 3*e_stride];
}
}
| 95fb6dd1b2c51b9cbed95890353bbbe3511f1cde.cu | /*
* Implementing the FFT algorithm for general input
* Input should be fp32 vectors with size equals to the power of 4
* Number of vectors is given by BATCH (B)
* Recursive algorithm
* Base case is fft4
* Combine all components in one file
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include "nvidia_helper/checkCudaErrors.h"
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
#define PI 3.14159265
#define EPS 0.0000001192f
const float UPPER_BOUND = 1.0f;
const int BATCH = 8;
const int SIZE = 1024;
// Utility function declaration
FFT_S init_F4();
__global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp);
__global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B);
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im);
__global__ void mySplit_transposed(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int n, int M, int B, float* Xtemp);
__global__ void myAccumulate_transposed(float* X1, float* X2, float* alpha, float* R1, float* R2, int n, int M, int B);
FFT_S fft4_transposed(int M, int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im);
__global__ void myTranspose(int m, int n, float* input, float* output, int B);
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B);
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im);
// Global variables
fft::MatrixH F4_re;
fft::MatrixH F4_im;
float* buffer;
float* X_temp;
int main()
{
int mem_size;
// Set device heap size
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024 * 1024 * 64);
// allocate unified memory for input matrix
fft::MatrixF input_re;
input_re.width = BATCH;
input_re.height = SIZE;
mem_size = input_re.width * input_re.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(input_re.array), mem_size));
fft::MatrixF input_im;
input_im.width = BATCH;
input_im.height = SIZE;
mem_size = input_im.width * input_im.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(input_im.array), mem_size));
// Initialize the input matrix
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_re.element(i, j) = (float)i;
input_im.element(i, j) = 0.0f;
printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j));
}
printf("\n");
}
// allocate unified memory for output matrix
fft::MatrixF output_re;
output_re.width = BATCH;
output_re.height = SIZE;
mem_size = output_re.width * output_re.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(output_re.array), mem_size));
fft::MatrixF output_im;
output_im.width = BATCH;
output_im.height = SIZE;
mem_size = output_im.width * output_im.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(output_im.array), mem_size));
// allocate unified memory for the buffer (array of float)
mem_size = SIZE * BATCH * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &buffer, mem_size));
checkCudaErrors(cudaMallocManaged((void **) &X_temp, mem_size));
FFT_S status;
// Initialize Fourier matrix
status = init_F4();
if (status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n");
return FFT_FAILURE;
}
// Call gfft function
status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im);
if (status != FFT_SUCCESS){
printf("Error in running fft algorithm\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j));
}
}
checkCudaErrors(cudaFree(input_re.array));
checkCudaErrors(cudaFree(input_im.array));
checkCudaErrors(cudaFree(output_re.array));
checkCudaErrors(cudaFree(output_im.array));
return 0;
}
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im)
{
if (N == 4) {
return fft4(B, X_re, X_im, FX_re, FX_im);
}
// Status variable declaration
cublasStatus_t status;
cublasHandle_t handle;
FFT_S fft_status;
cudaError_t cerror;
// Initialize cublas
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
// Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B
FX_re.width = N / 4 * B; FX_re.height = 4;
FX_im.width = N / 4 * B; FX_im.height = 4;
// Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B
// Store result directly in FX_re.array and FX_im.array
//// Set grid and block size
dim3 threadsPerBlock1(4, 16);
dim3 blockPerGrid1(B, (N / 4 + 15)/16); // Make sure blocks are enough
//// Real matrix
myTranspose<<<blockPerGrid1, threadsPerBlock1>>>(4, N / 4, X_re.array, FX_re.array, B);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during first transposition of real matrix\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
////// Set dimension (Note that the transpose happens batch-wisely)
FX_re.height = N / 4; FX_re.width = 4 * B;
//// Imaginary matrix
myTranspose<<<blockPerGrid1, threadsPerBlock1>>>(4, N / 4, X_im.array, FX_im.array, B);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during first transposition of imaginary matrix\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
////// Set dimension
FX_im.height = N / 4; FX_im.width = B * 4;
cudaDeviceSynchronize();
// Recursively call gfft function, not! using buffer matrix
//// Call gfft, store result in buffer matrix
fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n");
return FFT_FAILURE;
}
// Multiplication with twiddle factors
//// Set grid and block size
dim3 threadsPerBlock2(4, 16);
dim3 blockPerGrid2(B, (N / 4 + 15)/16); // Make sure blocks are enough
//// Call kernel function
multiply_twiddle<<<blockPerGrid2, threadsPerBlock2>>>(N, N/4, 4, FX_re.array, FX_im.array, B);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during twiddle multiplication\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
cudaDeviceSynchronize();
// Using the improved algorithm without transposition
fft_status = fft4_transposed(N / 4, B, FX_re, FX_im, FX_re, FX_im);
cudaDeviceSynchronize();
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (calling fft4_transposed).\n");
return FFT_FAILURE;
}
// Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B
FX_re.width = B; FX_re.height = N;
FX_im.width = B; FX_im.height = N;
// Shutdown cublas
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
__global__ void myTranspose(int m, int n, float* input, float* output, int B)
{
/*
* Transpose the B input matrices with size m * n
* Every matrix in a batch is transposed independently
* Input should be matrix of size m * (n * B)
* Output should be matrix of size n * (m * B)
* The grid size is expected to be B * 1
* Used case: first transpose, from 4 * (N / 4) to (N / 4) * 4
* */
// Calculate position in the OUTPUT matrix (0 based)
int j = threadIdx.x; // Column number within a matrix, expected to be 0, 1, 2, 3
int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix
int matrix_id = blockIdx.x;
if (i < n && j < m && matrix_id < B){
output[matrix_id * m * n + j * n + i] = input[matrix_id * m * n + i * m + j];
}
}
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
// Variable declaration
cublasStatus_t status;
cublasHandle_t handle;
cudaError_t cerror;
//// Unified variables
float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2;
half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo;
float *result1, *result2; // Store the intermediate result
//// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS setting math mode error\n");
return FFT_FAILURE;
}
// Allocate unified memory with 0 initialization
checkCudaErrors(cudaMallocManaged((void **) &scales, B * 4 * sizeof(float)));
checkCudaErrors(cudaMemset(scales, 0.0f, B * 4 * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void **) &X_split, 4 * B * 4 * sizeof(half)));
checkCudaErrors(cudaMemset(X_split, 0.0f, 4 * B * 4 * sizeof(half)));
checkCudaErrors(cudaMallocManaged((void **) &result1, 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(cudaMemset(result1, 0.0f, 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(cudaMallocManaged((void **) &result2, 4 * B * 4 * sizeof(result2[0])));
checkCudaErrors(cudaMemset(result2, 0.0f, 4 * B * 4 * sizeof(result2[0])));
// Split input
//// Initialize Matrix and Vector data structure to store split result
fft::MatrixH X_re_hi;
X_re_hi.width = B;
X_re_hi.height = 4;
X_re_hi.array = X_split + 4 * B * 0;
fft::MatrixH X_re_lo;
X_re_lo.width = B;
X_re_lo.height = 4;
X_re_lo.array = X_split + 4 * B * 1;
fft::MatrixH X_im_hi;
X_im_hi.width = B;
X_im_hi.height = 4;
X_im_hi.array = X_split + 4 * B * 2;
fft::MatrixH X_im_lo;
X_im_lo.width = B;
X_im_lo.height = 4;
X_im_lo.array = X_split + 4 * B * 3;
fft::VectorF re_s1;
re_s1.size = B;
re_s1.array = scales + B * 0;
fft::VectorF re_s2;
re_s2.size = B;
re_s2.array = scales + B * 1;
fft::VectorF im_s1;
im_s1.size = B;
im_s1.array = scales + B * 2;
fft::VectorF im_s2;
im_s2.size = B;
im_s2.array = scales + B * 3;
//// Call splitting function
int numThreads = 64;
int numBlocks = (B + 63) / 64;
mySplit<<<numBlocks, numThreads>>>(X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, B, X_temp);
mySplit<<<numBlocks, numThreads>>>(X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, B, X_temp);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during splitting\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
// Call cublas function and finish Matrix multiplication calculation
//// Call cublas gemm on F4_re
status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 4, 4, &alpha, F4_re.array,
CUDA_R_16F, 4, X_split, CUDA_R_16F, 4, &beta, result1, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n");
return FFT_FAILURE;
}
//// Call cublas gemm on F4_im
status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 4, 4, &alpha, F4_im.array,
CUDA_R_16F, 4, X_split, CUDA_R_16F, 4, &beta, result2, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n");
return FFT_FAILURE;
}
// Scale, combine and get result, add to output
//// Set grid and block size
dim3 threadsPerBlock(16, 4);
dim3 BlocksPerGrid((B+15)/16, 1);
//// call kernel function (buffer is zero-initialized inside)
myAccumulate<<<BlocksPerGrid, threadsPerBlock>>>(4, result1, result2, scales, FX_re.array, FX_im.array, B);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during accumulation\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
// Deallocate unified memory
if (cudaFree(scales) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free scales vector)\n");
return FFT_FAILURE;
}
if (cudaFree(X_split) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(result1) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(result2) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
// Shutdown cublas
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
cudaDeviceSynchronize();
return FFT_SUCCESS;
}
FFT_S init_F4()
{
// Allocate unified memory for Fourier Matrix
int mem_size;
F4_re.width = 4;
F4_re.height = 4;
mem_size = F4_re.width * F4_re.height * sizeof(half);
checkCudaErrors(cudaMallocManaged((void **) &(F4_re.array), mem_size));
F4_im.width = 4;
F4_im.height = 4;
mem_size = F4_im.width * F4_im.height * sizeof(half);
checkCudaErrors(cudaMallocManaged((void **) &(F4_im.array), mem_size));
F4_re.element(1, 1) = 1.0f;
F4_re.element(2, 1) = 1.0f;
F4_re.element(3, 1) = 1.0f;
F4_re.element(4, 1) = 1.0f;
F4_re.element(1, 2) = 1.0f;
F4_re.element(2, 2) = 0.0f;
F4_re.element(3, 2) =-1.0f;
F4_re.element(4, 2) = 0.0f;
F4_re.element(1, 3) = 1.0f;
F4_re.element(2, 3) =-1.0f;
F4_re.element(3, 3) = 1.0f;
F4_re.element(4, 3) =-1.0f;
F4_re.element(1, 4) = 1.0f;
F4_re.element(2, 4) = 0.0f;
F4_re.element(3, 4) =-1.0f;
F4_re.element(4, 4) = 0.0f;
F4_im.element(1, 1) = 0.0f;
F4_im.element(2, 1) = 0.0f;
F4_im.element(3, 1) = 0.0f;
F4_im.element(4, 1) = 0.0f;
F4_im.element(1, 2) = 0.0f;
F4_im.element(2, 2) =-1.0f;
F4_im.element(3, 2) = 0.0f;
F4_im.element(4, 2) = 1.0f;
F4_im.element(1, 3) = 0.0f;
F4_im.element(2, 3) = 0.0f;
F4_im.element(3, 3) = 0.0f;
F4_im.element(4, 3) = 0.0f;
F4_im.element(1, 4) = 0.0f;
F4_im.element(2, 4) = 1.0f;
F4_im.element(3, 4) = 0.0f;
F4_im.element(4, 4) =-1.0f;
return FFT_SUCCESS;
}
__global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp)
{
/*
* fft::MatrixF X (N*B), fft::MatrixH Xhi (N*B), fft::MatrixH Xlo (N*B)
* fft::VectorF s1, fft::VectorF s2
* int N, int B. N is always 4
* Grid and dim size should be 1D, total size = B
* All data should be in unified memory or device memory
* */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < B){
// Calculate scaling factor 1
float scale1 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(X[i + idx * N]);
if (norm > scale1) scale1 = norm;
}
// If all number are zero, skip
if (scale1 == 0.0f){
s1[idx] = 0.0f;
s2[idx] = 0.0f;
for (int i = 0; i < N; i++){
Xhi[i + idx * N] = Xlo[i + idx * N] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale1 < EPS) scale1 = EPS;
if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS;
s1[idx] = scale1;
// Scale the high half
for (int i = 0; i < N; i++){
Xtemp[i + idx * N] = X[i + idx * N]/scale1;
Xhi[i + idx * N] = (half)(Xtemp[i + idx * N]);
// Use Xtemp to store the residual
Xtemp[i + idx * N] = X[i + idx * N] - scale1 * (float)(Xhi[i + idx * N]);
}
// Calculate the lower scaling factor
float scale2 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(Xtemp[i + idx * N]);
if (norm > scale2) scale2 = norm;
}
// If all number are zero, skip
if (scale2 == 0.0f){
s2[idx] = 0.0f;
for (int i = 0; i < N; i++){
Xlo[i + idx * N] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale2 < EPS) scale2 = EPS;
if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS;
s2[idx] = scale2;
for (int i = 0; i < N; i++){
Xlo[i + idx * N] = (half) (Xtemp[i + idx * N] / scale2);
}
}
}
}
}
__global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B)
{
/*
* N is number of elements in one column (expected to be 4)
* X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo
* alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2
* R1, R2 are 4 * B matrix
* B is batch size
* */
int i = blockIdx.y * blockDim.y + threadIdx.y; // row number
int j = blockIdx.x * blockDim.x + threadIdx.x; // column number
if (i < N && j < B){
R1[i + j * N] = R2[i + j * N] = 0.0f;
R1[i + j * N] += alpha[j] * X1[i + j * N];
R1[i + j * N] += alpha[j + B] * X1[i + j * N + N * B];
R1[i + j * N] += -1.0f * alpha[j + 2*B] * X2[i + j * N + N * 2 * B];
R1[i + j * N] += -1.0f * alpha[j + 3*B] * X2[i + j * N + N * 3 * B];
R2[i + j * N] += alpha[j] * X2[i + j * N];
R2[i + j * N] += alpha[j + B] * X2[i + j * N + N * B];
R2[i + j * N] += alpha[j + 2*B] * X1[i + j * N + N * 2 * B];
R2[i + j * N] += alpha[j + 3*B] * X1[i + j * N + N * 3 * B];
}
}
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B)
{
/*
* Multifly every element of the input matrix with twiddle factor
* Every matrix in a batch is scaled independently
* Block and thread layout should be 2D
* Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j)
* Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j)
* */
// Calculate position (0 based)
int j = threadIdx.x; // Column number within a matrix, 0 to 3 in radix 4
int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix
int matrix_id = blockIdx.x;
if (i < m && j < n && matrix_id < B){
// Per-thread local variables
int index = matrix_id * N + j * m + i;
float tw_re = cos(2 * PI / N * i * j);
float tw_im = sin(2 * PI / N * i * j);
float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im;
float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re;
matrix_re[index] = result_re;
matrix_im[index] = result_im;
}
}
FFT_S fft4_transposed(int M, int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
/*
* Perform fft4 assuming the input is in the transposed layout
* M is the number of rows
* 4 * B is the number of columns
* Note that the fourier matrix is symmetric
*/
// Variable declaration
cublasStatus_t status;
cublasHandle_t handle;
cudaError_t cerror;
//// Unified variables
float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2;
half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo;
float *result1, *result2; // Store the intermediate result
//// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS setting math mode error\n");
return FFT_FAILURE;
}
// Allocate unified memory with 0 initialization
checkCudaErrors(cudaMallocManaged((void **) &scales, M * B * 4 * sizeof(float)));
checkCudaErrors(cudaMemset(scales, 0.0f, M * B * 4 * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void **) &X_split, M * 4 * B * 4 * sizeof(half)));
checkCudaErrors(cudaMemset(X_split, 0.0f, M * 4 * B * 4 * sizeof(half)));
checkCudaErrors(cudaMallocManaged((void **) &result1, M * 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(cudaMemset(result1, 0.0f, M * 4 * B * 4 * sizeof(result1[0])));
checkCudaErrors(cudaMallocManaged((void **) &result2, M * 4 * B * 4 * sizeof(result2[0])));
checkCudaErrors(cudaMemset(result2, 0.0f, M * 4 * B * 4 * sizeof(result2[0])));
// Split input
//// Initialize Matrix and Vector data structure to store split result
fft::MatrixH X_re_hi;
X_re_hi.width = 4 * B;
X_re_hi.height = M;
X_re_hi.array = X_split + M * 4 * B * 0;
fft::MatrixH X_re_lo;
X_re_lo.width = 4 * B;
X_re_lo.height = M;
X_re_lo.array = X_split + M * 4 * B * 1;
fft::MatrixH X_im_hi;
X_im_hi.width = 4 * B;
X_im_hi.height = M;
X_im_hi.array = X_split + M * 4 * B * 2;
fft::MatrixH X_im_lo;
X_im_lo.width = 4 * B;
X_im_lo.height = M;
X_im_lo.array = X_split + M * 4 * B * 3;
fft::VectorF re_s1;
re_s1.size = M * B;
re_s1.array = scales + M * B * 0;
fft::VectorF re_s2;
re_s2.size = M * B;
re_s2.array = scales + M * B * 1;
fft::VectorF im_s1;
im_s1.size = M * B;
im_s1.array = scales + M * B * 2;
fft::VectorF im_s2;
im_s2.size = M * B;
im_s2.array = scales + M * B * 3;
//// Call splitting function
dim3 threadsPerBlock1(4, 16);
dim3 BlocksPerGrid1((B + 3)/4, (M + 15)/16);
mySplit_transposed<<<BlocksPerGrid1, threadsPerBlock1>>>(X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, M, B, X_temp);
mySplit_transposed<<<BlocksPerGrid1, threadsPerBlock1>>>(X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, M, B, X_temp);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during splitting in fft4_transposed\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
// Call cublas function and finish Matrix multiplication calculation
// The order of multiplicands are reversed
//// Define batched offset
long long int stride = M * 4;
//// Call cublas batched gemm on F4_re
status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_split,
CUDA_R_16F, M, stride, F4_re.array, CUDA_R_16F, 4, 0, &beta, result1, CUDA_R_32F, M, stride, B * 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error in fft4_transposed ((c, d) * a).\n");
return FFT_FAILURE;
}
//// Call cublas gemm on F4_im
status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_split,
CUDA_R_16F, M, stride, F4_im.array, CUDA_R_16F, 4, 0, &beta, result2, CUDA_R_32F, M, stride, B * 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error in fft4_transposed ((c, d) * b).\n");
return FFT_FAILURE;
}
// Scale, combine and get result, add to output
//// Set grid and block size
dim3 threadsPerBlock2(16, 16);
dim3 BlocksPerGrid2((4 * B + 15)/16, (M + 15)/16);
//// call kernel function (buffer is zero-initialized inside)
myAccumulate_transposed<<<BlocksPerGrid2, threadsPerBlock2>>>(result1, result2, scales, FX_re.array, FX_im.array, 4, M, B);
cerror = cudaGetLastError();
if (cerror != cudaSuccess)
{
printf("CUDA error: %s during accumulation in fft4_transposed\n", cudaGetErrorString(cerror));
return FFT_FAILURE;
}
// Deallocate unified memory
if (cudaFree(scales) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free scales vector)\n");
return FFT_FAILURE;
}
if (cudaFree(X_split) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(result1) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(result2) != cudaSuccess) {
fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
// Shutdown cublas
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
__global__ void mySplit_transposed(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int n, int M, int B, float* Xtemp)
{
/*
* fft::MatrixF X (M * (n * B)), fft::MatrixH Xhi (M * (n * B)), fft::MatrixH Xlo (M * (n * B))
* fft::VectorF s1 of size M * B, fft::VectorF s2 of size M * B
* int n, int M, int B. n is expected to be 4, M = N / 4
* Grid and dim size should be 2D, total size = M * B
* All data should be in unified memory or device memory
* */
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y; // Row number (max M)
int blockNum = blockIdx.x * blockDim.x + threadIdx.x; // 'Column number' (max B)
if (rowIdx < M && blockNum < B){
/* Data to be manipulated:
* X, Xhi, Xlo (rowIdx, blockIdx * n +0+1+2+3) = X, Xhi, Xlo[rowIdx + blockIdx * n * M + 0/1/2/3 * M]
* s1, s2 (rowIdx, blockIdx) = s1, s2[rowIdx + blockIdx * M]
*/
int offset = rowIdx + blockNum * n * M;
int stride = M;
int factor_idx = rowIdx + blockNum * M;
// Calculate scaling factor 1
float scale1 = 0.0f;
for (int i = 0; i < n; i++){
float norm = (float) fabs(X[offset + i * stride]);
if (norm > scale1) scale1 = norm;
}
// If all number are zero, skip
if (scale1 == 0.0f){
s1[factor_idx] = 0.0f;
s2[factor_idx] = 0.0f;
for (int i = 0; i < n; i++){
Xhi[offset + i * stride] = Xlo[offset + i * stride] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale1 < EPS) scale1 = EPS;
if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS;
s1[factor_idx] = scale1;
// Scale the high half
for (int i = 0; i < n; i++){
Xtemp[offset + i * stride] = X[offset + i * stride]/scale1;
Xhi[offset + i * stride] = (half)(Xtemp[offset + i * stride]);
// Use Xtemp to store the residual
Xtemp[offset + i * stride] = X[offset + i * stride] - scale1 * (float)(Xhi[offset + i * stride]);
}
// Calculate the lower scaling factor
float scale2 = 0.0f;
for (int i = 0; i < n; i++){
float norm = (float) fabs(Xtemp[offset + i * stride]);
if (norm > scale2) scale2 = norm;
}
// If all number are zero, skip
if (scale2 == 0.0f){
s2[factor_idx] = 0.0f;
for (int i = 0; i < n; i++){
Xlo[offset + i * stride] = 0.0f;
}
}
else
{
// Restrict scale range
if (scale2 < EPS) scale2 = EPS;
if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS;
s2[factor_idx] = scale2;
for (int i = 0; i < n; i++){
Xlo[offset + i * stride] = (half) (Xtemp[offset + i * stride] / scale2);
}
}
}
}
}
__global__ void myAccumulate_transposed(float* X1, float* X2, float* alpha, float* R1, float* R2, int n, int M, int B)
{
/*
* X1, X2 are M * (4 * B * 4) matrix. The inner-most column order is by element in a unit. Then by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo
* alpha is a M * B * 4 array. Inner most order is by rows. Then by batch. Outer order is re_s1, re_s2, im_s1, im_s2
* R1, R2 are M * (4 * B) matrix
* n is number of elements in one unit (expected to be 4)
* M is number of rows, B is batch size
* */
int i = blockIdx.y * blockDim.y + threadIdx.y; // row number
int j = blockIdx.x * blockDim.x + threadIdx.x; // column number
if (i < M && j < 4 * B){
int result_idx = i + j * M;
int e_stride = M * 4 * B; // Stride for elements, e.g. from Re_hi to Re_lo
int factor_idx = i + j / 4 * M;
int f_stride = M * B; // Stride for factors, e.g. from re_s1 to re_s2
R1[result_idx] = R2[result_idx] = 0.0f;
R1[result_idx] += alpha[factor_idx] * X1[result_idx];
R1[result_idx] += alpha[factor_idx + f_stride] * X1[result_idx + e_stride];
R1[result_idx] += -1.0f * alpha[factor_idx + 2*f_stride] * X2[result_idx + 2*e_stride];
R1[result_idx] += -1.0f * alpha[factor_idx + 3*f_stride] * X2[result_idx + 3*e_stride];
R2[result_idx] += alpha[factor_idx] * X2[result_idx];
R2[result_idx] += alpha[factor_idx + f_stride] * X2[result_idx + e_stride];
R2[result_idx] += alpha[factor_idx + 2*f_stride] * X1[result_idx + 2*e_stride];
R2[result_idx] += alpha[factor_idx + 3*f_stride] * X1[result_idx + 3*e_stride];
}
}
|
fe0abd00c7eff7d1196eafe293ce0da7a90632a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Memory object sizes:
// 1. hash table build: 2 * 8 * RLEN + 2 * 32 * 1024 * RBUCKETS
// 2. after hash_build before hash_join: 8 * RLEN
// 3. each hash_join: 8 * S_CHUNK_LEN + 8 * RLEN + 8 * n_results
#include <stdio.h>
#include <stdlib.h>
#include "../md5/defs.h"
#include "../md5/kernels.cu"
#include "hj.cu"
// #include "hj_kernels.cu"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(call) \
do { \
hipError_t err = call; \
if(hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while (0)
#endif
//#define NR_BUCKETS_DEFAULT 256
// number of records in R
//#define RLEN (40 * 1024L * 1024L)
#define RLEN (10L * 1024L * 1024L)
// max of R's keys
#define RKEY_MAX (1024 * 256)
// seed of R's keys
#define RKEY_SEED 1
// number of buckets for R's hash table; should not be larger than RKEY_MAX
#define RBUCKETS (1024 * 8) // must be power of 2
// max of S's keys
#define SKEY_MAX (1024 * 256)
// seed of S's keys
#define SKEY_SEED 2
// number of records in each chunk read from S
#define S_CHUNK_LEN (64L * 1024L)
// how many chunks to be read from S
#define S_CHUNK_CNT 5
void save_data_to_disk(unsigned char *data, size_t size)
{
FILE *fout = fopen("md5.output", "w");
if (!fout) {
perror("Failed to create output file");
exit(1);
}
fwrite(data, sizeof(unsigned char), size, fout);
fclose(fout);
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
//float multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *A, float *C, int Width)
{
// Each thread computes one element of C by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < Width; ++e)
Cvalue += A[row * Width + e] * A[Width * Width + e * Width + col - 1];
C[row * Width + col] = Cvalue;
}
#define N 3
enum DataType { dt_chr, dt_int, dt_flt };
struct ElementAttr{
enum DataType type;
int dataSize;
int resultSize;
};
struct ElementSet {
union {
unsigned char *chr_data;
int *int_data;
float *flt_data;
};
};
int mallocMemory(struct ElementSet *Data, enum DataType Type, int DataSize){
switch(Type){
case dt_chr:
CUDA_SAFE_CALL( hipHostMalloc((void**)&Data->chr_data, sizeof(char) * DataSize) ); // host pinned
// Data->chr_data = (unsigned char *)malloc(sizeof(char) * DataSize);
break;
case dt_int:
CUDA_SAFE_CALL( hipHostMalloc((void**)&Data->int_data, sizeof(int) * DataSize) ); // host pinned
// Data->int_data = (int *)malloc(sizeof(int) * DataSize);
break;
case dt_flt:
CUDA_SAFE_CALL( hipHostMalloc((void**)&Data->flt_data, sizeof(float) * DataSize) ); // host pinned
// Data->flt_data = (float *)malloc(sizeof(float) * DataSize);
break;
}
return 0;
}
int mallocMemoryOnDevice(struct ElementSet *Data, enum DataType Type, int DataSize){
switch(Type){
case dt_chr:
// printf("%s\n", "1-1");
CUDA_SAFE_CALL(hipMalloc((void **)Data->chr_data, sizeof(char) * DataSize));
break;
case dt_int:
// printf("%s\n", "1-2");
CUDA_SAFE_CALL(hipMalloc((void **)Data->int_data, sizeof(int) * DataSize));
break;
case dt_flt:
// printf("%s\n", "1-3");
CUDA_SAFE_CALL(hipMalloc((void **)Data->flt_data, sizeof(float) * DataSize));
break;
}
return 0;
}
int printElement(struct ElementSet Data, struct ElementAttr Job){
switch(Job.type){
case dt_chr:
for (int j = 0; j < Job.dataSize; ++j)
printf("%c\t", Data.chr_data[j]);
printf("\n");
break;
case dt_int:
for (int j = 0; j < Job.dataSize; ++j)
printf("%d\t", Data.int_data[j]);
printf("\n");
break;
case dt_flt:
for (int j = 0; j < Job.dataSize; ++j)
printf("%f\t", Data.flt_data[j]);
printf("\n");
break;
}
return 0;
}
int read_r(record_t *r_tmp, int *rlen)
{
// hipError_t res;
// record_t *r_tmp = (record_t *)malloc(size_r);
// if(!r_tmp) {
// fprintf(stderr, "malloc failed for R\n");
// return -1;
// }
// record_t *r_tmp;
unsigned int seed = RKEY_SEED;
for(int i = 0; i < RLEN; i++) {
r_tmp[i].y = rand_r(&seed) % RKEY_MAX;
r_tmp[i].x = i;
}
// *r = r_tmp;
*rlen = RLEN;
return 0;
}
// return the number of records actually read
int read_s(record_t *s, int slen, int skey_start)
{
static unsigned int seed = SKEY_SEED;
for(int i = 0; i < slen; i++) {
s[i].y = rand_r(&seed) % (SKEY_MAX - skey_start) + skey_start;
s[i].x = skey_start + i;
}
return slen;
}
// Assume R is the small table, upon which a hash table is built and kept in
// GPU memory. Assume S is the large table, for which data are fetched chunk
// by chunk, with one chunk, after another, joined with R.
// A problem with hash join is that, even though the joined results may be few,
// the number of buckets and sparse memory regions touched by the join may be
// plenty.
int main()
{
hipStream_t *stream = (hipStream_t *) malloc(15 * sizeof(hipStream_t));
record_t *h_r[2], *h_s[2][S_CHUNK_CNT];
hash_table_t ht_r[2];
int rlen, slen[2][S_CHUNK_CNT];
struct timeval t1, t2, t_start, t_end;
gettimeofday(&t_start, NULL);
printf("Time of starting hj: %lf \n", TVAL(t_start));
// Create cuda stream
for (int i = 0; i < 15; ++i)
CUDA_SAFE_CALL( hipStreamCreate(&stream[i]) );
int build_hash_blocks = 64, build_hash_threads_per_block = 128;
int scan_blocks = 512, scan_chunks;
int scan_threads_per_block = 128;
int scan_elems_per_block = 2 * scan_threads_per_block;
int bytes_smem = sizeof(int) * scan_elems_per_block;
int mm_block = 8;
int width = 8 * 256;
int height = width;
// thread per block and block per grid for job n
dim3 dimBlock[N],dimGrid[N];
// dimBlock[0].x = 64, dimBlock[0].y = 1, dimBlock[0].z = 1;
// dimGrid[0].x = 4096 / dimBlock[0].x, dimGrid[0].y = 1, dimGrid[0].z = 1;
dimBlock[0].x = 32, dimBlock[0].y = 1, dimBlock[0].z = 1;
dimGrid[0].x = 4096 / dimBlock[0].x, dimGrid[0].y = 1, dimGrid[0].z = 1;
dimBlock[1].x = 8, dimBlock[1].y = 8, dimBlock[1].z = 1;
dimGrid[1].x = width / dimBlock[1].x, dimGrid[1].y = height / dimBlock[1].y, dimGrid[1].z = 1;
dimBlock[2].x = 32, dimBlock[2].y = 1, dimBlock[2].z = 1;
dimGrid[2].x = 4096 / dimBlock[2].x, dimGrid[2].y = 1, dimGrid[2].z = 1;
// Declare vars for host data and results
struct ElementAttr job[N];
struct ElementSet h_data[N], h_result[N];
// Declare vars for device data and results
struct ElementSet d_data[N], d_result[N];
// Set job attributes
job[0].type = dt_chr, job[0].dataSize = BYTES_INPUT, job[0].resultSize = dimGrid[0].x * dimBlock[0].x * MD5_LEN / sizeof(char);
job[1].type = dt_flt, job[1].dataSize = 2 * height * width, job[1].resultSize = dimGrid[1].x * dimBlock[1].x;
job[2].type = dt_chr, job[2].dataSize = BYTES_INPUT, job[2].resultSize = dimGrid[2].x * dimBlock[2].x * MD5_LEN / sizeof(char);
int n = 1;
gettimeofday(&t1, NULL);
// Allocate memory
for(int i = 0; i < n; i++){
// printf("%s\n", "0-loop-allocateMem");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "0-1");
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_data[i].chr_data, sizeof(char) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_result[i].chr_data, sizeof(char) * job[i].resultSize) ); // host pinned
// Data->chr_data = (unsigned char *)malloc(sizeof(char) * DataSize);
break;
case dt_int:
// printf("%s\n", "0-2");
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_data[i].int_data, sizeof(int) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_result[i].int_data, sizeof(int) * job[i].resultSize) ); // host pinned
// Data->int_data = (int *)malloc(sizeof(int) * DataSize);
break;
case dt_flt:
// printf("%s\n", "0-3");
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_data[i].flt_data, sizeof(float) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( hipHostMalloc((void**)&h_result[i].flt_data, sizeof(float) * job[i].resultSize) ); // host pinned
// Data->flt_data = (float *)malloc(sizeof(float) * DataSize);
break;
}
}
// init
srand(2018);
// initialize host data
for (int i = 0; i < job[0].dataSize; i++)
h_data[0].chr_data[i] = (unsigned char)(rand() % 256);
// Allocate memory
for(int i = 0; i < n; i++){
// printf("%s\n", "1-loop-allocateDeviceMem");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "1-1");
CUDA_SAFE_CALL(hipMalloc((void **)&d_data[i].chr_data, sizeof(char) * job[i].dataSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_result[i].chr_data, sizeof(char) * job[i].resultSize));
break;
case dt_int:
// printf("%s\n", "1-2");
CUDA_SAFE_CALL(hipMalloc((void **)&d_data[i].int_data, sizeof(int) * job[i].dataSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_result[i].int_data, sizeof(int) * job[i].resultSize));
break;
case dt_flt:
// printf("%s\n", "1-3");
CUDA_SAFE_CALL(hipMalloc((void **)&d_data[i].flt_data, sizeof(float) * job[i].dataSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_result[i].flt_data, sizeof(float) * job[i].resultSize));
break;
}
// mallocMemoryOnDevice(&d_data[i], job[i].type, job[i].dataSize);
// mallocMemoryOnDevice(&d_result[i], job[i].type, job[i].resultSize);
}
// read r and build hash table
gettimeofday(&t1, NULL);
CUDA_SAFE_CALL(hipHostMalloc((void**)&h_r[0], sizeof(record_t) * RLEN));
CUDA_SAFE_CALL(hipHostMalloc((void**)&h_r[1], sizeof(record_t) * RLEN));
if(read_r(h_r[0], &rlen)) {
fprintf(stderr, "failed to read r\n");
return -1;
}
if(read_r(h_r[1], &rlen)) {
fprintf(stderr, "failed to read r\n");
return -1;
}
gettimeofday(&t2, NULL);
printf("Time on reading R: %lf ms\n", TIME_DIFF(t1, t2));
gettimeofday(&t1, NULL);
// printf("Begin build_hash_table(r)\n");
// varaibales for building hash table
int *d_hist[2] = {NULL, NULL}, *d_loc[2] = {NULL, NULL};
record_t *d_r[2] = {NULL, NULL};
int ret = 0;
for(int i = 0; i < 1; i++){
ht_r[i].n_buckets = RBUCKETS;
ht_r[i].d_rec = NULL;
ht_r[i].d_idx = NULL;
ht_r[i].n_records = rlen;
if(!ht_r[i].n_buckets) {
ht_r[i].n_buckets = NR_BUCKETS_DEFAULT;
}
}
// for scan
int *d_sumbuf[2]; // the buffer used to store sum updates across subarrays
int *h_sumbuf[2];
int sum_tot[2], sum_delta[2];
// step 1: partition the array into many subarrays,
// each of which is scanned separately
scan_chunks = build_hash_blocks * build_hash_threads_per_block * ht_r[0].n_buckets / scan_elems_per_block;
scan_chunks += (build_hash_blocks * build_hash_threads_per_block * ht_r[0].n_buckets % scan_elems_per_block) ? 1 : 0;
scan_chunks = build_hash_blocks * build_hash_threads_per_block * ht_r[1].n_buckets / scan_elems_per_block;
scan_chunks += (build_hash_blocks * build_hash_threads_per_block * ht_r[1].n_buckets % scan_elems_per_block) ? 1 : 0;
for(int i = 0; i < 1; i++){
// copy records to GPU device memory
CUDA_SAFE_CALL(hipMalloc((void **)&d_r[i], rlen * sizeof(record_t)));
// build histogram matrix to collect how many
// records each thread generates in each bucket
CUDA_SAFE_CALL(hipMalloc((void **)&d_hist[i],
sizeof(int) * build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets));
// prefix sum to get the offsets
CUDA_SAFE_CALL(hipMalloc((void **)&d_loc[i],
sizeof(int) * build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets));
// build the hash table
CUDA_SAFE_CALL(hipMalloc((void **)&ht_r[i].d_rec, rlen * sizeof(record_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&ht_r[i].d_idx, (ht_r[i].n_buckets + 1) * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_sumbuf[i], sizeof(int) * scan_chunks));
// printf("scan: begin hipHostMalloc\n");
CUDA_SAFE_CALL(hipHostMalloc((void**)&h_sumbuf[i], sizeof(int) * scan_chunks));
// printf("scan: finish hipHostMalloc\n");
}
for(int i = 0; i < 1; i++){
// printf("build_hash_table: begin hipMemcpyAsync(r)\n");
CUDA_SAFE_CALL(hipMemcpyAsync(d_r[i], h_r[i], rlen * sizeof(record_t), hipMemcpyHostToDevice, stream[i]));
}
// printf("build_hash_table: finish hipMemcpyAsync(r)\n");
for (int i = 0; i < n; ++i) {
// printf("%s\n", "2-loop-copyHtoD");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "2-1-0");
// CUDA_SAFE_CALL(hipMemcpy(d_data[i].chr_data, h_data[i].chr_data, sizeof(char) * job[i].dataSize, hipMemcpyHostToDevice));
// printf("%s\n", "2-1");
CUDA_SAFE_CALL(hipMemcpyAsync(d_data[i].chr_data, h_data[i].chr_data, sizeof(char) * job[i].dataSize, hipMemcpyHostToDevice, stream[i+2]));
break;
case dt_int:
// CUDA_SAFE_CALL(hipMemcpy(d_data[i].int_data, h_data[i].int_data, sizeof(int) * job[i].dataSize, hipMemcpyHostToDevice));
// printf("%s\n", "2-2");
CUDA_SAFE_CALL(hipMemcpyAsync(d_data[i].int_data, h_data[i].int_data, sizeof(int) * job[i].dataSize, hipMemcpyHostToDevice, stream[i+2]));
break;
case dt_flt:
// CUDA_SAFE_CALL(hipMemcpy(d_data[i].flt_data, h_data[i].flt_data, sizeof(float) * job[i].dataSize, hipMemcpyHostToDevice));
// printf("%s\n", "2-3");
CUDA_SAFE_CALL(hipMemcpyAsync(d_data[i].flt_data, h_data[i].flt_data, sizeof(float) * job[i].dataSize, hipMemcpyHostToDevice, stream[i+2]));
break;
}
}
for(int i = 0; i < 1; i++){
hipLaunchKernelGGL(( hash_build_hist), dim3(build_hash_blocks), dim3(build_hash_threads_per_block), 0, stream[i], d_hist[i], d_r[i], rlen,
ht_r[i].n_buckets);
// printf("build_hash_table: finish hash_build_hist\n");
if(hipStreamSynchronize(stream[i]) != hipSuccess) {
fprintf(stderr, "kernel failed at hash_build_hist\n");
ret = -1;
goto failed;
}
}
for (int i = 0; i < n; ++i) {
// printf("%s\n", "3-loop-execute-kernel");
switch(i){
case 0:
// printf("%s\n", "3-1");
hipLaunchKernelGGL(( md5_kernel), dim3(dimGrid[i]), dim3(dimBlock[i]), 0, stream[i+2], d_data[i].chr_data, d_result[i].chr_data, job[i].dataSize);
CUDA_SAFE_CALL(hipStreamSynchronize(stream[i+2]));
break;
case 1:
// printf("%s\n", "3-2");
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid[i]), dim3(dimBlock[i]), 0, stream[i+2], d_data[i].flt_data, d_result[i].flt_data, width);
break;
}
}
for(int i = 0; i < 1; i++){
// printf("scan: begin prefix_sum\n");
hipLaunchKernelGGL(( prefix_sum), dim3(scan_blocks), dim3(scan_threads_per_block), bytes_smem, stream[i],
d_loc[i], d_sumbuf[i], d_hist[i], scan_chunks, build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets);
// printf("scan: finish prefix_sum\n");
// printf("scan: begin hipDeviceSynchronize\n");
if(hipStreamSynchronize(stream[i]) != hipSuccess) {
fprintf(stderr, "kernel failed at prefix_sum\n");
goto failed;
}
}
for (int i = 0; i < n; ++i) {
// printf("%s\n", "4-copy DtoH");
switch(job[i].type){
case dt_chr:
// CUDA_SAFE_CALL(hipMemcpy(h_result[i].chr_data, d_result[i].chr_data, sizeof(char) * job[i].resultSize, hipMemcpyDeviceToHost));
// printf("%s\n", "4-1");
CUDA_SAFE_CALL(hipMemcpyAsync(h_result[i].chr_data, d_result[i].chr_data, sizeof(char) * job[i].resultSize, hipMemcpyDeviceToHost, stream[i+2]));
break;
case dt_int:
// CUDA_SAFE_CALL(hipMemcpy(h_result[i].int_data, d_result[i].int_data, sizeof(int) * job[i].resultSize, hipMemcpyDeviceToHost));
// printf("%s\n", "4-2");
CUDA_SAFE_CALL(hipMemcpyAsync(h_result[i].int_data, d_result[i].int_data, sizeof(int) * job[i].resultSize, hipMemcpyDeviceToHost, stream[i+2]));
break;
case dt_flt:
// CUDA_SAFE_CALL(hipMemcpy(h_result[i].flt_data, d_result[i].flt_data, sizeof(float) * job[i].resultSize, hipMemcpyDeviceToHost));
// printf("%s\n", "4-3");
CUDA_SAFE_CALL(hipMemcpyAsync(h_result[i].flt_data, d_result[i].flt_data, sizeof(float) * job[i].resultSize, hipMemcpyDeviceToHost, stream[i+2]));
break;
}
}
// printf("scan: finish hipDeviceSynchronize\n");
// free(h_sumbuf);
// hipFree(d_sumbuf);
// step 2: update all scanned subarrays to derive the final result
// res = hipMemcpy(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks,
// hipMemcpyDeviceToHost);
for(int i = 0; i < 1; i++){
// printf("scan: begin hipMemcpyAsync\n");
CUDA_SAFE_CALL(hipMemcpyAsync(h_sumbuf[i], d_sumbuf[i], sizeof(int) * scan_chunks,
hipMemcpyDeviceToHost, stream[i]));
// printf("scan: finish hipMemcpyAsync\n");
}
for(int j = 0; j < 1; j++){
sum_tot[j] = 0;
sum_delta[j] = h_sumbuf[j][0];
for(int i = 1; i < scan_chunks; i++) {
sum_tot[j] += sum_delta[j];
sum_delta[j] = h_sumbuf[j][i];
h_sumbuf[j][i] = sum_tot[j];
}
h_sumbuf[j][0] = 0;
sum_tot[j] += sum_delta[j];
}
for(int i = 0; i < 1; i++){
// printf("scan: begin hipMemcpyAsync\n");
CUDA_SAFE_CALL(hipMemcpyAsync(d_sumbuf[i], h_sumbuf[i], sizeof(int) * scan_chunks,
hipMemcpyHostToDevice, stream[i]));
}
// printf("scan: finish hipMemcpyAsync\n");
for(int i = 0; i < 1; i++){
// printf("scan: begin prefix_sum_update\n");
hipLaunchKernelGGL(( prefix_sum_update), dim3(scan_blocks), dim3(scan_threads_per_block), 0, stream[i], d_loc[i], d_sumbuf[i],
scan_chunks, build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets);
// printf("scan: finish prefix_sum_update\n");
// printf("scan: begin hipDeviceSynchronize\n");
if(hipStreamSynchronize(stream[i]) != hipSuccess) {
fprintf(stderr, "kernel failed at prefix_sum_update\n");
goto failed;
}
// printf("scan: finish hipDeviceSynchronize\n");
hipLaunchKernelGGL(( hash_build), dim3(build_hash_blocks), dim3(build_hash_threads_per_block), 0, stream[i], ht_r[i].d_rec, ht_r[i].d_idx,
d_r[i], rlen, d_loc[i], ht_r[i].n_buckets);
if(hipStreamSynchronize(stream[i]) != hipSuccess) {
fprintf(stderr, "kernel failed at hash_build\n");
ret = -1;
goto failed;
}
}
goto finish;
failed:
free_hash_table(&ht_r[0]);
free_hash_table(&ht_r[1]);
// printf("scan: free\n");
hipFree(h_sumbuf);
// printf("scan: cudafree\n");
hipFree(d_sumbuf);
finish:
CUDA_FREE(d_r);
CUDA_FREE(d_hist);
CUDA_FREE(d_loc);
// printf("scan: free\n");
hipFree(h_sumbuf);
// printf("scan: cudafree\n");
hipFree(d_sumbuf);
// printf("build_hash_table: finish scan\n");
CUDA_FREE(d_hist);
hipFree(h_r); // table R on the host is not needed any more
gettimeofday(&t2, NULL);
printf("Time on building hash table for R: %lf ms\n", TIME_DIFF(t1, t2));
// for each chunk of s, join with r
// h_s = (record_t *)malloc(sizeof(record_t) * S_CHUNK_LEN);
// if(!h_s) {
// fprintf(stderr, "malloc failed for s\n");
// free_hash_table(&ht_r);
// return -1;
// }
gettimeofday(&t1, NULL);
for(int k = 0; k < 1; k++){
for(int i = 0; i < S_CHUNK_CNT; i++){
CUDA_SAFE_CALL(hipHostMalloc((void**)&h_s[k][i], sizeof(record_t) * S_CHUNK_LEN));
slen[k][i] = read_s(h_s[k][i], S_CHUNK_LEN, 0);
}
}
gettimeofday(&t2, NULL);
printf("Time on reading S: %lf ms ( %lf ms per join )\n", TIME_DIFF(t1, t2), TIME_DIFF(t1, t2)/S_CHUNK_CNT);
record_t *h_z[S_CHUNK_CNT];
int zlen[S_CHUNK_CNT];
// The number of result records joined per chunk is approximately:
// RLEN * S_CHUNK_LEN / max(RKEY_MAX, SKEY_MAX)
gettimeofday(&t1, NULL);
for(int i = 0; i < S_CHUNK_CNT; i++) {
// printf("%d\n", i);
// join with r
if(slen[0][i] > 0) {
// printf("Begin hash_join\n");
if(hash_join(NULL, NULL, &ht_r[0], h_s[0][i], slen[0][i], stream, i)) {
fprintf(stderr, "hash join failed for the %dth chunk of S\n",
i);
break;
}
// printf("Finish hash_join\n");
}
}
gettimeofday(&t2, NULL);
printf("Time on hash join: %lf ms ( %lf ms per join )\n", TIME_DIFF(t1, t2), TIME_DIFF(t1, t2)/S_CHUNK_CNT);
free_hash_table(&ht_r[0]);
free_hash_table(&ht_r[1]);
hipFree(h_s);
gettimeofday(&t_end, NULL);
printf("Total time taken: %lf ms\n", TIME_DIFF(t_start, t_end));
printf("Time of ending hj: %lf \n", TVAL(t_end));
return 0;
}
| fe0abd00c7eff7d1196eafe293ce0da7a90632a6.cu | // Memory object sizes:
// 1. hash table build: 2 * 8 * RLEN + 2 * 32 * 1024 * RBUCKETS
// 2. after hash_build before hash_join: 8 * RLEN
// 3. each hash_join: 8 * S_CHUNK_LEN + 8 * RLEN + 8 * n_results
#include <stdio.h>
#include <stdlib.h>
#include "../md5/defs.h"
#include "../md5/kernels.cu"
#include "hj.cu"
// #include "hj_kernels.cu"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(call) \
do { \
cudaError_t err = call; \
if(cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while (0)
#endif
//#define NR_BUCKETS_DEFAULT 256
// number of records in R
//#define RLEN (40 * 1024L * 1024L)
#define RLEN (10L * 1024L * 1024L)
// max of R's keys
#define RKEY_MAX (1024 * 256)
// seed of R's keys
#define RKEY_SEED 1
// number of buckets for R's hash table; should not be larger than RKEY_MAX
#define RBUCKETS (1024 * 8) // must be power of 2
// max of S's keys
#define SKEY_MAX (1024 * 256)
// seed of S's keys
#define SKEY_SEED 2
// number of records in each chunk read from S
#define S_CHUNK_LEN (64L * 1024L)
// how many chunks to be read from S
#define S_CHUNK_CNT 5
void save_data_to_disk(unsigned char *data, size_t size)
{
FILE *fout = fopen("md5.output", "w");
if (!fout) {
perror("Failed to create output file");
exit(1);
}
fwrite(data, sizeof(unsigned char), size, fout);
fclose(fout);
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
//float multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *A, float *C, int Width)
{
// Each thread computes one element of C by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < Width; ++e)
Cvalue += A[row * Width + e] * A[Width * Width + e * Width + col - 1];
C[row * Width + col] = Cvalue;
}
#define N 3
enum DataType { dt_chr, dt_int, dt_flt };
struct ElementAttr{
enum DataType type;
int dataSize;
int resultSize;
};
struct ElementSet {
union {
unsigned char *chr_data;
int *int_data;
float *flt_data;
};
};
int mallocMemory(struct ElementSet *Data, enum DataType Type, int DataSize){
switch(Type){
case dt_chr:
CUDA_SAFE_CALL( cudaMallocHost((void**)&Data->chr_data, sizeof(char) * DataSize) ); // host pinned
// Data->chr_data = (unsigned char *)malloc(sizeof(char) * DataSize);
break;
case dt_int:
CUDA_SAFE_CALL( cudaMallocHost((void**)&Data->int_data, sizeof(int) * DataSize) ); // host pinned
// Data->int_data = (int *)malloc(sizeof(int) * DataSize);
break;
case dt_flt:
CUDA_SAFE_CALL( cudaMallocHost((void**)&Data->flt_data, sizeof(float) * DataSize) ); // host pinned
// Data->flt_data = (float *)malloc(sizeof(float) * DataSize);
break;
}
return 0;
}
int mallocMemoryOnDevice(struct ElementSet *Data, enum DataType Type, int DataSize){
switch(Type){
case dt_chr:
// printf("%s\n", "1-1");
CUDA_SAFE_CALL(cudaMalloc((void **)Data->chr_data, sizeof(char) * DataSize));
break;
case dt_int:
// printf("%s\n", "1-2");
CUDA_SAFE_CALL(cudaMalloc((void **)Data->int_data, sizeof(int) * DataSize));
break;
case dt_flt:
// printf("%s\n", "1-3");
CUDA_SAFE_CALL(cudaMalloc((void **)Data->flt_data, sizeof(float) * DataSize));
break;
}
return 0;
}
int printElement(struct ElementSet Data, struct ElementAttr Job){
switch(Job.type){
case dt_chr:
for (int j = 0; j < Job.dataSize; ++j)
printf("%c\t", Data.chr_data[j]);
printf("\n");
break;
case dt_int:
for (int j = 0; j < Job.dataSize; ++j)
printf("%d\t", Data.int_data[j]);
printf("\n");
break;
case dt_flt:
for (int j = 0; j < Job.dataSize; ++j)
printf("%f\t", Data.flt_data[j]);
printf("\n");
break;
}
return 0;
}
int read_r(record_t *r_tmp, int *rlen)
{
// cudaError_t res;
// record_t *r_tmp = (record_t *)malloc(size_r);
// if(!r_tmp) {
// fprintf(stderr, "malloc failed for R\n");
// return -1;
// }
// record_t *r_tmp;
unsigned int seed = RKEY_SEED;
for(int i = 0; i < RLEN; i++) {
r_tmp[i].y = rand_r(&seed) % RKEY_MAX;
r_tmp[i].x = i;
}
// *r = r_tmp;
*rlen = RLEN;
return 0;
}
// return the number of records actually read
int read_s(record_t *s, int slen, int skey_start)
{
static unsigned int seed = SKEY_SEED;
for(int i = 0; i < slen; i++) {
s[i].y = rand_r(&seed) % (SKEY_MAX - skey_start) + skey_start;
s[i].x = skey_start + i;
}
return slen;
}
// Assume R is the small table, upon which a hash table is built and kept in
// GPU memory. Assume S is the large table, for which data are fetched chunk
// by chunk, with one chunk, after another, joined with R.
// A problem with hash join is that, even though the joined results may be few,
// the number of buckets and sparse memory regions touched by the join may be
// plenty.
int main()
{
cudaStream_t *stream = (cudaStream_t *) malloc(15 * sizeof(cudaStream_t));
record_t *h_r[2], *h_s[2][S_CHUNK_CNT];
hash_table_t ht_r[2];
int rlen, slen[2][S_CHUNK_CNT];
struct timeval t1, t2, t_start, t_end;
gettimeofday(&t_start, NULL);
printf("Time of starting hj: %lf \n", TVAL(t_start));
// Create cuda stream
for (int i = 0; i < 15; ++i)
CUDA_SAFE_CALL( cudaStreamCreate(&stream[i]) );
int build_hash_blocks = 64, build_hash_threads_per_block = 128;
int scan_blocks = 512, scan_chunks;
int scan_threads_per_block = 128;
int scan_elems_per_block = 2 * scan_threads_per_block;
int bytes_smem = sizeof(int) * scan_elems_per_block;
int mm_block = 8;
int width = 8 * 256;
int height = width;
// thread per block and block per grid for job n
dim3 dimBlock[N],dimGrid[N];
// dimBlock[0].x = 64, dimBlock[0].y = 1, dimBlock[0].z = 1;
// dimGrid[0].x = 4096 / dimBlock[0].x, dimGrid[0].y = 1, dimGrid[0].z = 1;
dimBlock[0].x = 32, dimBlock[0].y = 1, dimBlock[0].z = 1;
dimGrid[0].x = 4096 / dimBlock[0].x, dimGrid[0].y = 1, dimGrid[0].z = 1;
dimBlock[1].x = 8, dimBlock[1].y = 8, dimBlock[1].z = 1;
dimGrid[1].x = width / dimBlock[1].x, dimGrid[1].y = height / dimBlock[1].y, dimGrid[1].z = 1;
dimBlock[2].x = 32, dimBlock[2].y = 1, dimBlock[2].z = 1;
dimGrid[2].x = 4096 / dimBlock[2].x, dimGrid[2].y = 1, dimGrid[2].z = 1;
// Declare vars for host data and results
struct ElementAttr job[N];
struct ElementSet h_data[N], h_result[N];
// Declare vars for device data and results
struct ElementSet d_data[N], d_result[N];
// Set job attributes
job[0].type = dt_chr, job[0].dataSize = BYTES_INPUT, job[0].resultSize = dimGrid[0].x * dimBlock[0].x * MD5_LEN / sizeof(char);
job[1].type = dt_flt, job[1].dataSize = 2 * height * width, job[1].resultSize = dimGrid[1].x * dimBlock[1].x;
job[2].type = dt_chr, job[2].dataSize = BYTES_INPUT, job[2].resultSize = dimGrid[2].x * dimBlock[2].x * MD5_LEN / sizeof(char);
int n = 1;
gettimeofday(&t1, NULL);
// Allocate memory
for(int i = 0; i < n; i++){
// printf("%s\n", "0-loop-allocateMem");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "0-1");
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_data[i].chr_data, sizeof(char) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_result[i].chr_data, sizeof(char) * job[i].resultSize) ); // host pinned
// Data->chr_data = (unsigned char *)malloc(sizeof(char) * DataSize);
break;
case dt_int:
// printf("%s\n", "0-2");
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_data[i].int_data, sizeof(int) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_result[i].int_data, sizeof(int) * job[i].resultSize) ); // host pinned
// Data->int_data = (int *)malloc(sizeof(int) * DataSize);
break;
case dt_flt:
// printf("%s\n", "0-3");
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_data[i].flt_data, sizeof(float) * job[i].dataSize) ); // host pinned
CUDA_SAFE_CALL( cudaMallocHost((void**)&h_result[i].flt_data, sizeof(float) * job[i].resultSize) ); // host pinned
// Data->flt_data = (float *)malloc(sizeof(float) * DataSize);
break;
}
}
// init
srand(2018);
// initialize host data
for (int i = 0; i < job[0].dataSize; i++)
h_data[0].chr_data[i] = (unsigned char)(rand() % 256);
// Allocate memory
for(int i = 0; i < n; i++){
// printf("%s\n", "1-loop-allocateDeviceMem");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "1-1");
CUDA_SAFE_CALL(cudaMalloc((void **)&d_data[i].chr_data, sizeof(char) * job[i].dataSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result[i].chr_data, sizeof(char) * job[i].resultSize));
break;
case dt_int:
// printf("%s\n", "1-2");
CUDA_SAFE_CALL(cudaMalloc((void **)&d_data[i].int_data, sizeof(int) * job[i].dataSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result[i].int_data, sizeof(int) * job[i].resultSize));
break;
case dt_flt:
// printf("%s\n", "1-3");
CUDA_SAFE_CALL(cudaMalloc((void **)&d_data[i].flt_data, sizeof(float) * job[i].dataSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result[i].flt_data, sizeof(float) * job[i].resultSize));
break;
}
// mallocMemoryOnDevice(&d_data[i], job[i].type, job[i].dataSize);
// mallocMemoryOnDevice(&d_result[i], job[i].type, job[i].resultSize);
}
// read r and build hash table
gettimeofday(&t1, NULL);
CUDA_SAFE_CALL(cudaMallocHost((void**)&h_r[0], sizeof(record_t) * RLEN));
CUDA_SAFE_CALL(cudaMallocHost((void**)&h_r[1], sizeof(record_t) * RLEN));
if(read_r(h_r[0], &rlen)) {
fprintf(stderr, "failed to read r\n");
return -1;
}
if(read_r(h_r[1], &rlen)) {
fprintf(stderr, "failed to read r\n");
return -1;
}
gettimeofday(&t2, NULL);
printf("Time on reading R: %lf ms\n", TIME_DIFF(t1, t2));
gettimeofday(&t1, NULL);
// printf("Begin build_hash_table(r)\n");
// varaibales for building hash table
int *d_hist[2] = {NULL, NULL}, *d_loc[2] = {NULL, NULL};
record_t *d_r[2] = {NULL, NULL};
int ret = 0;
for(int i = 0; i < 1; i++){
ht_r[i].n_buckets = RBUCKETS;
ht_r[i].d_rec = NULL;
ht_r[i].d_idx = NULL;
ht_r[i].n_records = rlen;
if(!ht_r[i].n_buckets) {
ht_r[i].n_buckets = NR_BUCKETS_DEFAULT;
}
}
// for scan
int *d_sumbuf[2]; // the buffer used to store sum updates across subarrays
int *h_sumbuf[2];
int sum_tot[2], sum_delta[2];
// step 1: partition the array into many subarrays,
// each of which is scanned separately
scan_chunks = build_hash_blocks * build_hash_threads_per_block * ht_r[0].n_buckets / scan_elems_per_block;
scan_chunks += (build_hash_blocks * build_hash_threads_per_block * ht_r[0].n_buckets % scan_elems_per_block) ? 1 : 0;
scan_chunks = build_hash_blocks * build_hash_threads_per_block * ht_r[1].n_buckets / scan_elems_per_block;
scan_chunks += (build_hash_blocks * build_hash_threads_per_block * ht_r[1].n_buckets % scan_elems_per_block) ? 1 : 0;
for(int i = 0; i < 1; i++){
// copy records to GPU device memory
CUDA_SAFE_CALL(cudaMalloc((void **)&d_r[i], rlen * sizeof(record_t)));
// build histogram matrix to collect how many
// records each thread generates in each bucket
CUDA_SAFE_CALL(cudaMalloc((void **)&d_hist[i],
sizeof(int) * build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets));
// prefix sum to get the offsets
CUDA_SAFE_CALL(cudaMalloc((void **)&d_loc[i],
sizeof(int) * build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets));
// build the hash table
CUDA_SAFE_CALL(cudaMalloc((void **)&ht_r[i].d_rec, rlen * sizeof(record_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&ht_r[i].d_idx, (ht_r[i].n_buckets + 1) * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_sumbuf[i], sizeof(int) * scan_chunks));
// printf("scan: begin cudaMallocHost\n");
CUDA_SAFE_CALL(cudaMallocHost((void**)&h_sumbuf[i], sizeof(int) * scan_chunks));
// printf("scan: finish cudaMallocHost\n");
}
for(int i = 0; i < 1; i++){
// printf("build_hash_table: begin cudaMemcpyAsync(r)\n");
CUDA_SAFE_CALL(cudaMemcpyAsync(d_r[i], h_r[i], rlen * sizeof(record_t), cudaMemcpyHostToDevice, stream[i]));
}
// printf("build_hash_table: finish cudaMemcpyAsync(r)\n");
for (int i = 0; i < n; ++i) {
// printf("%s\n", "2-loop-copyHtoD");
switch(job[i].type){
case dt_chr:
// printf("%s\n", "2-1-0");
// CUDA_SAFE_CALL(cudaMemcpy(d_data[i].chr_data, h_data[i].chr_data, sizeof(char) * job[i].dataSize, cudaMemcpyHostToDevice));
// printf("%s\n", "2-1");
CUDA_SAFE_CALL(cudaMemcpyAsync(d_data[i].chr_data, h_data[i].chr_data, sizeof(char) * job[i].dataSize, cudaMemcpyHostToDevice, stream[i+2]));
break;
case dt_int:
// CUDA_SAFE_CALL(cudaMemcpy(d_data[i].int_data, h_data[i].int_data, sizeof(int) * job[i].dataSize, cudaMemcpyHostToDevice));
// printf("%s\n", "2-2");
CUDA_SAFE_CALL(cudaMemcpyAsync(d_data[i].int_data, h_data[i].int_data, sizeof(int) * job[i].dataSize, cudaMemcpyHostToDevice, stream[i+2]));
break;
case dt_flt:
// CUDA_SAFE_CALL(cudaMemcpy(d_data[i].flt_data, h_data[i].flt_data, sizeof(float) * job[i].dataSize, cudaMemcpyHostToDevice));
// printf("%s\n", "2-3");
CUDA_SAFE_CALL(cudaMemcpyAsync(d_data[i].flt_data, h_data[i].flt_data, sizeof(float) * job[i].dataSize, cudaMemcpyHostToDevice, stream[i+2]));
break;
}
}
for(int i = 0; i < 1; i++){
hash_build_hist<<<build_hash_blocks, build_hash_threads_per_block, 0, stream[i]>>>(d_hist[i], d_r[i], rlen,
ht_r[i].n_buckets);
// printf("build_hash_table: finish hash_build_hist\n");
if(cudaStreamSynchronize(stream[i]) != cudaSuccess) {
fprintf(stderr, "kernel failed at hash_build_hist\n");
ret = -1;
goto failed;
}
}
for (int i = 0; i < n; ++i) {
// printf("%s\n", "3-loop-execute-kernel");
switch(i){
case 0:
// printf("%s\n", "3-1");
md5_kernel<<<dimGrid[i], dimBlock[i], 0, stream[i+2]>>>(d_data[i].chr_data, d_result[i].chr_data, job[i].dataSize);
CUDA_SAFE_CALL(cudaStreamSynchronize(stream[i+2]));
break;
case 1:
// printf("%s\n", "3-2");
MatMulKernel<<<dimGrid[i], dimBlock[i], 0, stream[i+2]>>>(d_data[i].flt_data, d_result[i].flt_data, width);
break;
}
}
for(int i = 0; i < 1; i++){
// printf("scan: begin prefix_sum\n");
prefix_sum<<<scan_blocks, scan_threads_per_block, bytes_smem, stream[i]>>>(
d_loc[i], d_sumbuf[i], d_hist[i], scan_chunks, build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets);
// printf("scan: finish prefix_sum\n");
// printf("scan: begin cudaThreadSynchronize\n");
if(cudaStreamSynchronize(stream[i]) != cudaSuccess) {
fprintf(stderr, "kernel failed at prefix_sum\n");
goto failed;
}
}
for (int i = 0; i < n; ++i) {
// printf("%s\n", "4-copy DtoH");
switch(job[i].type){
case dt_chr:
// CUDA_SAFE_CALL(cudaMemcpy(h_result[i].chr_data, d_result[i].chr_data, sizeof(char) * job[i].resultSize, cudaMemcpyDeviceToHost));
// printf("%s\n", "4-1");
CUDA_SAFE_CALL(cudaMemcpyAsync(h_result[i].chr_data, d_result[i].chr_data, sizeof(char) * job[i].resultSize, cudaMemcpyDeviceToHost, stream[i+2]));
break;
case dt_int:
// CUDA_SAFE_CALL(cudaMemcpy(h_result[i].int_data, d_result[i].int_data, sizeof(int) * job[i].resultSize, cudaMemcpyDeviceToHost));
// printf("%s\n", "4-2");
CUDA_SAFE_CALL(cudaMemcpyAsync(h_result[i].int_data, d_result[i].int_data, sizeof(int) * job[i].resultSize, cudaMemcpyDeviceToHost, stream[i+2]));
break;
case dt_flt:
// CUDA_SAFE_CALL(cudaMemcpy(h_result[i].flt_data, d_result[i].flt_data, sizeof(float) * job[i].resultSize, cudaMemcpyDeviceToHost));
// printf("%s\n", "4-3");
CUDA_SAFE_CALL(cudaMemcpyAsync(h_result[i].flt_data, d_result[i].flt_data, sizeof(float) * job[i].resultSize, cudaMemcpyDeviceToHost, stream[i+2]));
break;
}
}
// printf("scan: finish cudaThreadSynchronize\n");
// free(h_sumbuf);
// cudaFree(d_sumbuf);
// step 2: update all scanned subarrays to derive the final result
// res = cudaMemcpy(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks,
// cudaMemcpyDeviceToHost);
for(int i = 0; i < 1; i++){
// printf("scan: begin cudaMemcpyAsync\n");
CUDA_SAFE_CALL(cudaMemcpyAsync(h_sumbuf[i], d_sumbuf[i], sizeof(int) * scan_chunks,
cudaMemcpyDeviceToHost, stream[i]));
// printf("scan: finish cudaMemcpyAsync\n");
}
for(int j = 0; j < 1; j++){
sum_tot[j] = 0;
sum_delta[j] = h_sumbuf[j][0];
for(int i = 1; i < scan_chunks; i++) {
sum_tot[j] += sum_delta[j];
sum_delta[j] = h_sumbuf[j][i];
h_sumbuf[j][i] = sum_tot[j];
}
h_sumbuf[j][0] = 0;
sum_tot[j] += sum_delta[j];
}
for(int i = 0; i < 1; i++){
// printf("scan: begin cudaMemcpyAsync\n");
CUDA_SAFE_CALL(cudaMemcpyAsync(d_sumbuf[i], h_sumbuf[i], sizeof(int) * scan_chunks,
cudaMemcpyHostToDevice, stream[i]));
}
// printf("scan: finish cudaMemcpyAsync\n");
for(int i = 0; i < 1; i++){
// printf("scan: begin prefix_sum_update\n");
prefix_sum_update<<<scan_blocks, scan_threads_per_block, 0, stream[i]>>>(d_loc[i], d_sumbuf[i],
scan_chunks, build_hash_blocks * build_hash_threads_per_block * ht_r[i].n_buckets);
// printf("scan: finish prefix_sum_update\n");
// printf("scan: begin cudaThreadSynchronize\n");
if(cudaStreamSynchronize(stream[i]) != cudaSuccess) {
fprintf(stderr, "kernel failed at prefix_sum_update\n");
goto failed;
}
// printf("scan: finish cudaThreadSynchronize\n");
hash_build<<<build_hash_blocks, build_hash_threads_per_block, 0, stream[i]>>>(ht_r[i].d_rec, ht_r[i].d_idx,
d_r[i], rlen, d_loc[i], ht_r[i].n_buckets);
if(cudaStreamSynchronize(stream[i]) != cudaSuccess) {
fprintf(stderr, "kernel failed at hash_build\n");
ret = -1;
goto failed;
}
}
goto finish;
failed:
free_hash_table(&ht_r[0]);
free_hash_table(&ht_r[1]);
// printf("scan: free\n");
cudaFree(h_sumbuf);
// printf("scan: cudafree\n");
cudaFree(d_sumbuf);
finish:
CUDA_FREE(d_r);
CUDA_FREE(d_hist);
CUDA_FREE(d_loc);
// printf("scan: free\n");
cudaFree(h_sumbuf);
// printf("scan: cudafree\n");
cudaFree(d_sumbuf);
// printf("build_hash_table: finish scan\n");
CUDA_FREE(d_hist);
cudaFree(h_r); // table R on the host is not needed any more
gettimeofday(&t2, NULL);
printf("Time on building hash table for R: %lf ms\n", TIME_DIFF(t1, t2));
// for each chunk of s, join with r
// h_s = (record_t *)malloc(sizeof(record_t) * S_CHUNK_LEN);
// if(!h_s) {
// fprintf(stderr, "malloc failed for s\n");
// free_hash_table(&ht_r);
// return -1;
// }
gettimeofday(&t1, NULL);
for(int k = 0; k < 1; k++){
for(int i = 0; i < S_CHUNK_CNT; i++){
CUDA_SAFE_CALL(cudaMallocHost((void**)&h_s[k][i], sizeof(record_t) * S_CHUNK_LEN));
slen[k][i] = read_s(h_s[k][i], S_CHUNK_LEN, 0);
}
}
gettimeofday(&t2, NULL);
printf("Time on reading S: %lf ms ( %lf ms per join )\n", TIME_DIFF(t1, t2), TIME_DIFF(t1, t2)/S_CHUNK_CNT);
record_t *h_z[S_CHUNK_CNT];
int zlen[S_CHUNK_CNT];
// The number of result records joined per chunk is approximately:
// RLEN * S_CHUNK_LEN / max(RKEY_MAX, SKEY_MAX)
gettimeofday(&t1, NULL);
for(int i = 0; i < S_CHUNK_CNT; i++) {
// printf("%d\n", i);
// join with r
if(slen[0][i] > 0) {
// printf("Begin hash_join\n");
if(hash_join(NULL, NULL, &ht_r[0], h_s[0][i], slen[0][i], stream, i)) {
fprintf(stderr, "hash join failed for the %dth chunk of S\n",
i);
break;
}
// printf("Finish hash_join\n");
}
}
gettimeofday(&t2, NULL);
printf("Time on hash join: %lf ms ( %lf ms per join )\n", TIME_DIFF(t1, t2), TIME_DIFF(t1, t2)/S_CHUNK_CNT);
free_hash_table(&ht_r[0]);
free_hash_table(&ht_r[1]);
cudaFree(h_s);
gettimeofday(&t_end, NULL);
printf("Total time taken: %lf ms\n", TIME_DIFF(t_start, t_end));
printf("Time of ending hj: %lf \n", TVAL(t_end));
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.