hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
32b9ad052ef82b8d65e040e276531dac1c82ba81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CMERGE_CUH__
#define __CMERGE_CUH__
#include "cuCCL.cuh"
#include "GDALRead.h"
#include "compact_template.cuh"
// #define _DEBUG
/*
*/
void initCUDA(int& devIdx)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
if (devIdx < 0) dev = 0;
if (devIdx > deviceCount - 1) dev = deviceCount - 1;
else dev = devIdx;
hipSetDevice(dev);
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, dev) == hipSuccess)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
prop.name, (int)prop.totalGlobalMem, (int)prop.major,
(int)prop.minor, (int)prop.clockRate);
}
if (prop.major < 2)
{
fprintf(stderr, "ERROR: CUDPP hash tables are only supported on "
"devices with compute\n capability 2.0 or greater; "
"exiting.\n");
exit(1);
}
}
__global__ void getIsValid(int* d_label, int* d_isValid, int width, int task_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = x + y * width;//global 1D index;
bool limits = x < width && y < task_height;
if (limits)
{
int center = d_label[gid];
if(center != NO_USE_CLASS)
{
if (d_label[gid] == gid)
{
d_isValid[gid] = 1;
}
}
}
}
__global__ void updateDevLabel(int * dev_labelMap, int labelStart, int task_height, int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = x + y * width;//global 1D index;
bool limits = x < width && y < task_height;
if (limits)
{
int center = dev_labelMap[gid];
if( center != NO_USE_CLASS)
{
dev_labelMap[gid] += labelStart;
}
}
}
int findMerge4(int width, int BGvalue, int* Meg, int* h_subDataFirst, int *h_subDataSecond, int* lastRowLabel, int* firstRowLabel)
{
int Meg_count = 0;//Meg
int center;
for (int i = 0; i < width; i++)
{
int LastLabel = -1;//
int CurLabel = -1;
center = h_subDataFirst[i];//pixel
if (center == BGvalue)
continue;
if (center == h_subDataSecond[i])//
{
LastLabel = lastRowLabel[i];//
CurLabel = firstRowLabel[i];
int repetition = 0;//
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
}
return Meg_count;
}
int findMerge8(int width, int BGvalue, int* Meg, int* h_subDataFirst, int *h_subDataSecond, int* lastRowLabel, int* firstRowLabel)
{
int Meg_count = 0;//Meg
int center;
for (int i = 0; i < width; i++)
{
int LastLabel = -1;//
int CurLabel = -1;
center = h_subDataFirst[i];//pixel
if (center == BGvalue)
continue;
if (center == h_subDataSecond[i])//
{
LastLabel = lastRowLabel[i];//
CurLabel = firstRowLabel[i];
int repetition = 0;//
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
if ((i - 1 >= 0) && (center == h_subDataSecond[i - 1]))//
{
LastLabel = lastRowLabel[i];//
CurLabel = firstRowLabel[i - 1];
int repetition = 0;//
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
if ((i + 1 < width) && (center == h_subDataSecond[i + 1]))//
{
LastLabel = lastRowLabel[i];//
CurLabel = firstRowLabel[i + 1];
int repetition = 0;//
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
}
return Meg_count;
}
template<class PatchType, class MergeType>
void mergePatchUsingUF(int* mergeArr, int mergeCount, CuLSM::UnionFind<PatchType, MergeType> *Quf)
{
for (int i = 0; i < mergeCount; i++)
{
if (mergeArr[2 * i] != -1)
{
int cur_index = mergeArr[2 * i + 1];
int last_index = Quf->qFind(mergeArr[2 * i]);
for (int j = i + 1; j < mergeCount; j++)//cur_indexU
{
if (mergeArr[j * 2 + 1] == cur_index)
{
//merge
int cur_lastindex = Quf->qFind(mergeArr[j * 2]);
Quf->qUnion(cur_lastindex, cur_index);//
mergeArr[j * 2] = mergeArr[j * 2 + 1] = -1;//
}
}
//merge
Quf->qUnion(last_index, cur_index);
mergeArr[i * 2] = mergeArr[i * 2 + 1] = -1;//
}
}
}
class CBigImgCCL
{
private:
cuCCLClass *mpCclObj;
#ifdef _DEBUG
PRead* pread;
int* m_src;
#else
CGDALRead* pread;
#endif
CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>> *Quf;
CuLSM::dataBlock* dataBlockArray;
int blockNum;
GlobalConfiguration G_Config;
private:
int* vecOriginValRow1;
int* vecOriginValRow2;
int* vecLabelValRow1;
int* vecLabelValRow2;
dim3 blockDim1;
dim3 gridDim1;
public:
#ifdef _DEBUG
CBigImgCCL(PRead* _pread, int *_src);
#else
CBigImgCCL(const char* _filename);
#endif
void calInEachBlock();
~CBigImgCCL()
{
delete Quf;
delete pread;
free(dataBlockArray);
if(vecOriginValRow1!=NULL)
free(vecOriginValRow1);
if(vecOriginValRow2!=NULL)
free(vecOriginValRow2);
if(vecLabelValRow1!=NULL)
free(vecLabelValRow1);
if(vecLabelValRow2!=NULL)
free(vecLabelValRow2);
}
private:
int getDevideInfo();
void compactMethod(CuLSM::dataBlock *curBlock);
void recordBoundary(CuLSM::dataBlock &curBlock, int iBlock, int width);
void mergePatch();
};
#ifdef _DEBUG
CBigImgCCL::CBigImgCCL(PRead* _pread, int* _src)
{
pread = _pread;
m_src = _src;
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
blockNum = getDevideInfo();
if(blockNum > 1)
{
vecOriginValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecOriginValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
checkMemAlloc(vecOriginValRow1);
checkMemAlloc(vecOriginValRow2);
checkMemAlloc(vecLabelValRow1);
checkMemAlloc(vecLabelValRow2);
}
else
{
vecOriginValRow1 = NULL;
vecOriginValRow2 = NULL;
vecLabelValRow1 = NULL;
vecLabelValRow2 = NULL;
}
int2 blockSize; blockSize.x = 32; blockSize.y = 16;
blockDim1 = dim3(blockSize.x, blockSize.y, 1);
gridDim1 = dim3((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y, 1);
Quf = new CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>>();
G_Config = Singleton<GlobalConfiguration>::Instance();
}
#else
CBigImgCCL::CBigImgCCL(const char* _filename)
{
pread = new CGDALRead;
if (!pread->loadMetaData(_filename))
{
cout << "load error!" << endl;
}
cout << "rows:" << pread->rows() << endl;
cout << "cols:" << pread->cols() << endl;
cout << "bandnum:" << pread->bandnum() << endl;
cout << "datalength:" << pread->datalength() << endl;
cout << "invalidValue:" << pread->invalidValue() << endl;
cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl;
cout << "projectionRef:" << pread->projectionRef() << endl;
cout << "perPixelSize:" << pread->perPixelSize() << endl;
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
blockNum = getDevideInfo();
if(blockNum > 1)
{
vecOriginValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecOriginValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
checkMemAlloc(vecOriginValRow1);
checkMemAlloc(vecOriginValRow2);
checkMemAlloc(vecLabelValRow1);
checkMemAlloc(vecLabelValRow2);
}
else
{
vecOriginValRow1 = NULL;
vecOriginValRow2 = NULL;
vecLabelValRow1 = NULL;
vecLabelValRow2 = NULL;
}
int2 blockSize; blockSize.x = 32; blockSize.y = 16;
blockDim1 = dim3(blockSize.x, blockSize.y, 1);
gridDim1 = dim3((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y, 1);
Quf = new CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>>();
G_Config = Singleton<GlobalConfiguration>::Instance();
}
#endif
int CBigImgCCL::getDevideInfo()
{
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
int maxnum; //
size_t freeGPU, totalGPU;
hipMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total
cout << "(free,total)" << freeGPU << "," << totalGPU << endl;
maxnum = (freeGPU) / (sizeof(int)* 10);//pixel6int
// maxnum = (freeGPU) / (sizeof(int)* 6 * 2);//pixel6int
int sub_height = maxnum / width - 5; //sub_height
// sub_height = 1000;
#ifdef _DEBUG
sub_height = 2;
#endif
int blockNum = height / sub_height + 1; //
//*dataBlockArray = new CuLSM::dataBlock[blockNum];
dataBlockArray = (CuLSM::dataBlock*)malloc(blockNum*sizeof(CuLSM::dataBlock));
int subIdx = 0;
for (int height_all = 0; height_all < height; height_all += sub_height)
{
int task_start = subIdx*sub_height;
int task_end;
if ((subIdx + 1)*sub_height - height <= 0)
task_end = (subIdx + 1)*sub_height - 1;
else
task_end = height - 1;
int data_start, data_end;
if (task_start - 1 <= 0)
data_start = 0;
else
data_start = task_start - 1;
if (task_end + 1 >= height - 1)
data_end = height - 1;
else
data_end = task_end + 1;
int data_height = data_end - data_start + 1;
int task_height = task_end - task_start + 1;
dataBlockArray[subIdx].mnDataStart = data_start;
dataBlockArray[subIdx].mnDataEnd = data_end;
dataBlockArray[subIdx].mnTaskStart = task_start;
dataBlockArray[subIdx].mnTaskEnd = task_end;
dataBlockArray[subIdx].mnSubTaskHeight = task_height;
dataBlockArray[subIdx].mnSubDataHeight = data_height;
dataBlockArray[subIdx].mnStartTag = task_start*width;//
dataBlockArray[subIdx].mnWidth = width;
dataBlockArray[subIdx].mnNodata = nodata;
subIdx++;
}
return blockNum;
}
void CBigImgCCL::recordBoundary(CuLSM::dataBlock &curBlock, int iBlock, int width)
{
int nBytePerLine = sizeof(int)*width;
if (curBlock.isFirstBlock())
{
memcpy(vecOriginValRow1 + iBlock*width, curBlock.mh_SubData + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
memcpy(vecLabelValRow1 + iBlock*width, curBlock.mh_LabelVal + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
}
else if (curBlock.isLastBlock())
{
memcpy(vecOriginValRow2 + (iBlock - 1)*width, curBlock.mh_SubData, nBytePerLine);
memcpy(vecLabelValRow2 + (iBlock - 1)*width, curBlock.mh_LabelVal, nBytePerLine);
}
else
{
memcpy(vecOriginValRow2 + (iBlock - 1)*width, curBlock.mh_SubData, nBytePerLine);
memcpy(vecLabelValRow2 + (iBlock - 1)*width, curBlock.mh_LabelVal, nBytePerLine);
memcpy(vecOriginValRow1 + iBlock*width, curBlock.mh_SubData + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
memcpy(vecLabelValRow1 + iBlock*width, curBlock.mh_LabelVal + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
}
}
void CBigImgCCL::compactMethod(CuLSM::dataBlock *_curBlock)
{
int _width = mpCclObj->width;
int _taskHeight = mpCclObj->task_height;
int _nBytes_task = sizeof(int) * _width * _taskHeight;
const int numElements = _width * _taskHeight;
int* d_outputLabelOfSubData = mpCclObj->devLabelMap->getDevData();
int* d_inputSrcSubData = mpCclObj->devSrcData->getDevData();
Array2D< Cutype<int> >* d_IsValid = new Array2D< Cutype<int> >(_taskHeight,_width);
_curBlock->mh_curPatchNum = -1; //-1patch
int* d_IsValidData = d_IsValid->getDevData();
getIsValid << <gridDim1,blockDim1 >> >(d_outputLabelOfSubData, d_IsValidData, _width, _taskHeight);
//rootMapd_label[gid]==gid
compact_t_device(&(_curBlock->mh_RootPos), &(_curBlock->mh_curPatchNum),
d_outputLabelOfSubData, d_IsValidData, numElements);
updateDevLabel << <gridDim1,blockDim1 >> > (d_outputLabelOfSubData, _curBlock->mnStartTag, _taskHeight, _width);
_curBlock->mh_LabelVal= (int*)malloc(_nBytes_task);
checkCudaErrors(hipMemcpy(_curBlock->mh_LabelVal, d_outputLabelOfSubData, _nBytes_task, hipMemcpyDeviceToHost));
compact_t_device(&(_curBlock->mh_compactLabel), &(_curBlock->mh_curPatchNum),
d_outputLabelOfSubData, d_IsValidData, numElements);
compact_t_device(&(_curBlock->mh_compactSrc), &(_curBlock->mh_curPatchNum),
d_inputSrcSubData, d_IsValidData, numElements);
cout << "h_outputNumOfValidElements: " << _curBlock->mh_curPatchNum << endl;
for (int i = 0; i < _curBlock->mh_curPatchNum; i++)
{
CuLSM::CPatchLabel temp;
temp.nLabel = _curBlock->mh_compactLabel[i];
temp.nType = _curBlock->mh_compactSrc[i];
temp.isUseful = false;
Quf->rootMap.insert(make_pair(temp.nLabel, temp));
}
if (d_IsValid!=NULL)
{
delete d_IsValid;
d_IsValid = NULL;
}
}
void CBigImgCCL::mergePatch()
{
int*h_rowOneValue = vecOriginValRow1;
int*h_rowTwoValue = vecOriginValRow2;
int*h_rowOneLabel = vecLabelValRow1;
int*h_rowTwoLabel = vecLabelValRow2;
int width = pread->cols();
int BGvalue = (int)pread->invalidValue();
clock_t start1, end1;
start1 = clock();
int *mergeArr = NULL; //
int mergeCount = 0; //
int i;
for (i = 0; i< blockNum - 1; i++) //mergeStructArraySize = blockNum-1
{
mergeArr = (int *)malloc(sizeof(int)* width * 2);
if (mergeArr == NULL)
{
printf("\nERROR! Can not allocate space for mergeArr!");
exit(-1);
}
if (G_Config.USE_DIAGS)
{
mergeCount = findMerge8(width, BGvalue, mergeArr, h_rowOneValue + i*width, h_rowTwoValue + i*width, h_rowOneLabel + i*width, h_rowTwoLabel + i*width);
}
else
{
mergeCount = findMerge4(width, BGvalue, mergeArr, h_rowOneValue + i*width, h_rowTwoValue + i*width, h_rowOneLabel + i*width, h_rowTwoLabel + i*width);
}
mergePatchUsingUF(mergeArr, mergeCount, Quf);
free(mergeArr);
mergeArr = NULL;
mergeCount = 0;
}
end1 = clock();
double dur = (double)(end1 - start1);
printf("LineCCLNoSplit Use Time:%f\n", (dur / CLOCKS_PER_SEC));
}
void CBigImgCCL::calInEachBlock()
{
int width = pread->cols();
if(blockNum > 1)
{
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
//step 1 GDAL READ
#ifdef _DEBUG
curBlock->loadBlockData(pread,m_src);
#else
curBlock->loadBlockData(pread);
#endif
//step 2 run CCL save label result in -----cuCCLClass: devLabelMap;
mpCclObj = new cuCCLClass(curBlock->mh_SubData, curBlock->mnWidth, curBlock->mnSubTaskHeight, curBlock->mnNodata);
mpCclObj->gpuLineUF(blockDim1,gridDim1);
//step 3 compress the sparse matrics for output
//mh_RootPosmh_LabelVal
//Quf->rootMap
compactMethod(curBlock);
//step 4 record boundary between two blocks for UnionFind merge
recordBoundary(dataBlockArray[iBlock], iBlock, width);
if(mpCclObj!=NULL)
{
delete mpCclObj;
mpCclObj = NULL;
}
}
// Quf->initUF(vecAllLabel);
mergePatch();
Quf->qRelabel(); //rootMap
Quf->qOutputRootMap("patchLevelResult");
//,pixel,mh_LabelVal
cout<<"curContinueLabel======================================" << endl;
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
curBlock->getContinueLabelVal(Quf);
}
#ifdef _DEBUG
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
int *curContinueLabel = curBlock->mh_LabelVal;
for (int i = 0; i < curBlock->mnSubTaskHeight; ++i)
{
for (int j = 0; j < curBlock->mnWidth; ++j)
{
cout << curContinueLabel[i*curBlock->mnWidth+j]<<"\t";
}
cout<<endl;
}
}
#endif
}
else
{
CuLSM::dataBlock *curBlock = &dataBlockArray[0];
//step 1 GDAL READ
#ifdef _DEBUG
curBlock->loadBlockData(pread,m_src);
#else
curBlock->loadBlockData(pread);
#endif
//step 2 run CCL save label result in -----cuCCLClass: devLabelMap;
mpCclObj = new cuCCLClass(curBlock->mh_SubData, curBlock->mnWidth, curBlock->mnSubTaskHeight, curBlock->mnNodata);
mpCclObj->gpuLineUF(blockDim1,gridDim1);
//step 3 compress the sparse matrics for output
compactMethod(curBlock);
Quf->qRelabel(); //rootMap
Quf->qOutputRootMap("patchLevelResult");
if(mpCclObj!=NULL)
{
delete mpCclObj;
mpCclObj = NULL;
}
//
cout<<"curContinueLabel======================================" << endl;
curBlock->getContinueLabelVal(Quf);
#ifdef _DEBUG
int *curContinueLabel = curBlock->mh_LabelVal;
for (int i = 0; i < curBlock->mnSubTaskHeight; ++i)
{
for (int j = 0; j < curBlock->mnWidth; ++j)
{
cout << curContinueLabel[i*curBlock->mnWidth+j]<<"\t";
}
cout<<endl;
}
#endif
}
}
#endif
int main(int argc, char const *argv[])
{
int array[25] = { 1, 3, 3, 3, 3,
1, 3, 3, 1, 3,
1, 2, 1, 3, 2,
2, 1, 3, 2, 3,
1, 2, 2, 3, 2 };
int* srcTest = new int[25];
for (int i = 0; i < 25; i++)
{
srcTest[i] = array[i];
}
PRead *pread = new PRead(5, 5, 0);
bool useful_class[10] = {1,1,1,1,1,1,1,1,1,1};
// bool useful_class[10] = {0,0,1,1,1,1,1,0,1,1};
//3,4,5,7
// bool useful_class[10] = {0,0,0,0,1,0,0,0,0,0};
//CUDA
int gpuIdx = 1;//3.5GPU
initCUDA(gpuIdx);
//GPUinitCUDA
GlobalConfiguration& config = Singleton<GlobalConfiguration>::Instance();
config.set_USE(useful_class);
config.set_USE_DIAGS(true);
#ifdef _DEBUG
CBigImgCCL *ccl = new CBigImgCCL(pread, srcTest);
#else
CBigImgCCL *ccl = new CBigImgCCL(argv[1]);
#endif
ccl->calInEachBlock();
delete ccl;
return 0;
}
|
32b9ad052ef82b8d65e040e276531dac1c82ba81.cu
|
#ifndef __CMERGE_CUH__
#define __CMERGE_CUH__
#include "cuCCL.cuh"
#include "GDALRead.h"
#include "compact_template.cuh"
// #define _DEBUG
/*
这个类实现要分块计算的大图像的封装
*/
void initCUDA(int& devIdx)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
if (devIdx < 0) dev = 0;
if (devIdx > deviceCount - 1) dev = deviceCount - 1;
else dev = devIdx;
cudaSetDevice(dev);
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, dev) == cudaSuccess)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
prop.name, (int)prop.totalGlobalMem, (int)prop.major,
(int)prop.minor, (int)prop.clockRate);
}
if (prop.major < 2)
{
fprintf(stderr, "ERROR: CUDPP hash tables are only supported on "
"devices with compute\n capability 2.0 or greater; "
"exiting.\n");
exit(1);
}
}
__global__ void getIsValid(int* d_label, int* d_isValid, int width, int task_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = x + y * width;//global 1D index;
bool limits = x < width && y < task_height;
if (limits)
{
int center = d_label[gid];
if(center != NO_USE_CLASS)
{
if (d_label[gid] == gid)
{
d_isValid[gid] = 1;
}
}
}
}
__global__ void updateDevLabel(int * dev_labelMap, int labelStart, int task_height, int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = x + y * width;//global 1D index;
bool limits = x < width && y < task_height;
if (limits)
{
int center = dev_labelMap[gid];
if( center != NO_USE_CLASS)
{
dev_labelMap[gid] += labelStart;
}
}
}
int findMerge4(int width, int BGvalue, int* Meg, int* h_subDataFirst, int *h_subDataSecond, int* lastRowLabel, int* firstRowLabel)
{
int Meg_count = 0;//开始计数Meg
int center;
for (int i = 0; i < width; i++)
{
int LastLabel = -1;//上次标记序号
int CurLabel = -1;
center = h_subDataFirst[i];//以上一行中每个pixel为中心,构造模板遍历
if (center == BGvalue)
continue;
if (center == h_subDataSecond[i])//同一列中上一行数据与下一行图像数据一致
{
LastLabel = lastRowLabel[i];//上次标记序号
CurLabel = firstRowLabel[i];
int repetition = 0;//是否重复
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
}
return Meg_count;
}
int findMerge8(int width, int BGvalue, int* Meg, int* h_subDataFirst, int *h_subDataSecond, int* lastRowLabel, int* firstRowLabel)
{
int Meg_count = 0;//开始计数Meg
int center;
for (int i = 0; i < width; i++)
{
int LastLabel = -1;//上次标记序号
int CurLabel = -1;
center = h_subDataFirst[i];//以上一行中每个pixel为中心,构造模板遍历
if (center == BGvalue)
continue;
if (center == h_subDataSecond[i])//同一列中上一行数据与下一行图像数据一致
{
LastLabel = lastRowLabel[i];//上次标记序号
CurLabel = firstRowLabel[i];
int repetition = 0;//是否重复
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
if ((i - 1 >= 0) && (center == h_subDataSecond[i - 1]))//上一行数据与左边下一行图像数据一致
{
LastLabel = lastRowLabel[i];//上次标记序号
CurLabel = firstRowLabel[i - 1];
int repetition = 0;//是否重复
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
if ((i + 1 < width) && (center == h_subDataSecond[i + 1]))//上一行数据与右边下一行图像数据一致
{
LastLabel = lastRowLabel[i];//上次标记序号
CurLabel = firstRowLabel[i + 1];
int repetition = 0;//是否重复
for (int i = 0; i < Meg_count; i++)
{
if ((Meg[2 * i] == LastLabel) && (Meg[2 * i + 1] == CurLabel))
{
repetition = 1;
break;
}
}
if (!repetition)
{
Meg[Meg_count * 2] = LastLabel;
Meg[Meg_count * 2 + 1] = CurLabel;
Meg_count++;
}
}
}
return Meg_count;
}
template<class PatchType, class MergeType>
void mergePatchUsingUF(int* mergeArr, int mergeCount, CuLSM::UnionFind<PatchType, MergeType> *Quf)
{
for (int i = 0; i < mergeCount; i++)
{
if (mergeArr[2 * i] != -1)
{
int cur_index = mergeArr[2 * i + 1];
int last_index = Quf->qFind(mergeArr[2 * i]);
for (int j = i + 1; j < mergeCount; j++)//遍历后面的合并数组是否有和当前的cur_index一样的(连通U型)
{
if (mergeArr[j * 2 + 1] == cur_index)
{
//merge
int cur_lastindex = Quf->qFind(mergeArr[j * 2]);
Quf->qUnion(cur_lastindex, cur_index);//合并序号
mergeArr[j * 2] = mergeArr[j * 2 + 1] = -1;//标记无效
}
}
//merge
Quf->qUnion(last_index, cur_index);
mergeArr[i * 2] = mergeArr[i * 2 + 1] = -1;//标记已合并
}
}
}
class CBigImgCCL
{
private:
cuCCLClass *mpCclObj;
#ifdef _DEBUG
PRead* pread;
int* m_src;
#else
CGDALRead* pread;
#endif
CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>> *Quf;
CuLSM::dataBlock* dataBlockArray;
int blockNum;
GlobalConfiguration G_Config;
private:
int* vecOriginValRow1;
int* vecOriginValRow2;
int* vecLabelValRow1;
int* vecLabelValRow2;
dim3 blockDim1;
dim3 gridDim1;
public:
#ifdef _DEBUG
CBigImgCCL(PRead* _pread, int *_src);
#else
CBigImgCCL(const char* _filename);
#endif
void calInEachBlock();
~CBigImgCCL()
{
delete Quf;
delete pread;
free(dataBlockArray);
if(vecOriginValRow1!=NULL)
free(vecOriginValRow1);
if(vecOriginValRow2!=NULL)
free(vecOriginValRow2);
if(vecLabelValRow1!=NULL)
free(vecLabelValRow1);
if(vecLabelValRow2!=NULL)
free(vecLabelValRow2);
}
private:
int getDevideInfo();
void compactMethod(CuLSM::dataBlock *curBlock);
void recordBoundary(CuLSM::dataBlock &curBlock, int iBlock, int width);
void mergePatch();
};
#ifdef _DEBUG
CBigImgCCL::CBigImgCCL(PRead* _pread, int* _src)
{
pread = _pread;
m_src = _src;
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
blockNum = getDevideInfo();
if(blockNum > 1)
{
vecOriginValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecOriginValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
checkMemAlloc(vecOriginValRow1);
checkMemAlloc(vecOriginValRow2);
checkMemAlloc(vecLabelValRow1);
checkMemAlloc(vecLabelValRow2);
}
else
{
vecOriginValRow1 = NULL;
vecOriginValRow2 = NULL;
vecLabelValRow1 = NULL;
vecLabelValRow2 = NULL;
}
int2 blockSize; blockSize.x = 32; blockSize.y = 16;
blockDim1 = dim3(blockSize.x, blockSize.y, 1);
gridDim1 = dim3((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y, 1);
Quf = new CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>>();
G_Config = Singleton<GlobalConfiguration>::Instance();
}
#else
CBigImgCCL::CBigImgCCL(const char* _filename)
{
pread = new CGDALRead;
if (!pread->loadMetaData(_filename))
{
cout << "load error!" << endl;
}
cout << "rows:" << pread->rows() << endl;
cout << "cols:" << pread->cols() << endl;
cout << "bandnum:" << pread->bandnum() << endl;
cout << "datalength:" << pread->datalength() << endl;
cout << "invalidValue:" << pread->invalidValue() << endl;
cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl;
cout << "projectionRef:" << pread->projectionRef() << endl;
cout << "perPixelSize:" << pread->perPixelSize() << endl;
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
blockNum = getDevideInfo();
if(blockNum > 1)
{
vecOriginValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecOriginValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow1 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
vecLabelValRow2 = (int*)malloc(sizeof(int)* width * (blockNum - 1));
checkMemAlloc(vecOriginValRow1);
checkMemAlloc(vecOriginValRow2);
checkMemAlloc(vecLabelValRow1);
checkMemAlloc(vecLabelValRow2);
}
else
{
vecOriginValRow1 = NULL;
vecOriginValRow2 = NULL;
vecLabelValRow1 = NULL;
vecLabelValRow2 = NULL;
}
int2 blockSize; blockSize.x = 32; blockSize.y = 16;
blockDim1 = dim3(blockSize.x, blockSize.y, 1);
gridDim1 = dim3((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y, 1);
Quf = new CuLSM::UnionFind<CuLSM::CPatchLabel, ADD<int>>();
G_Config = Singleton<GlobalConfiguration>::Instance();
}
#endif
int CBigImgCCL::getDevideInfo()
{
int width = pread->cols();
int height = pread->rows();
int nodata = (int)pread->invalidValue();
int maxnum; //可以读入的像元的个数
size_t freeGPU, totalGPU;
cudaMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total
cout << "(free,total)" << freeGPU << "," << totalGPU << endl;
maxnum = (freeGPU) / (sizeof(int)* 10);//每个pixel基本上要开辟6个中间变量,变量类型都是int
// maxnum = (freeGPU) / (sizeof(int)* 6 * 2);//每个pixel基本上要开辟6个中间变量,变量类型都是int
int sub_height = maxnum / width - 5; //每个分块的高度sub_height
// sub_height = 1000;
#ifdef _DEBUG
sub_height = 2;
#endif
int blockNum = height / sub_height + 1; //总的分块个数
//*dataBlockArray = new CuLSM::dataBlock[blockNum];
dataBlockArray = (CuLSM::dataBlock*)malloc(blockNum*sizeof(CuLSM::dataBlock));
int subIdx = 0;
for (int height_all = 0; height_all < height; height_all += sub_height)
{
int task_start = subIdx*sub_height;
int task_end;
if ((subIdx + 1)*sub_height - height <= 0)
task_end = (subIdx + 1)*sub_height - 1;
else
task_end = height - 1;
int data_start, data_end;
if (task_start - 1 <= 0)
data_start = 0;
else
data_start = task_start - 1;
if (task_end + 1 >= height - 1)
data_end = height - 1;
else
data_end = task_end + 1;
int data_height = data_end - data_start + 1;
int task_height = task_end - task_start + 1;
dataBlockArray[subIdx].mnDataStart = data_start;
dataBlockArray[subIdx].mnDataEnd = data_end;
dataBlockArray[subIdx].mnTaskStart = task_start;
dataBlockArray[subIdx].mnTaskEnd = task_end;
dataBlockArray[subIdx].mnSubTaskHeight = task_height;
dataBlockArray[subIdx].mnSubDataHeight = data_height;
dataBlockArray[subIdx].mnStartTag = task_start*width;//当前分块的起始标记值,也就是该分块的第一个栅格的一维索引值
dataBlockArray[subIdx].mnWidth = width;
dataBlockArray[subIdx].mnNodata = nodata;
subIdx++;
}
return blockNum;
}
void CBigImgCCL::recordBoundary(CuLSM::dataBlock &curBlock, int iBlock, int width)
{
int nBytePerLine = sizeof(int)*width;
if (curBlock.isFirstBlock())
{
memcpy(vecOriginValRow1 + iBlock*width, curBlock.mh_SubData + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
memcpy(vecLabelValRow1 + iBlock*width, curBlock.mh_LabelVal + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
}
else if (curBlock.isLastBlock())
{
memcpy(vecOriginValRow2 + (iBlock - 1)*width, curBlock.mh_SubData, nBytePerLine);
memcpy(vecLabelValRow2 + (iBlock - 1)*width, curBlock.mh_LabelVal, nBytePerLine);
}
else
{
memcpy(vecOriginValRow2 + (iBlock - 1)*width, curBlock.mh_SubData, nBytePerLine);
memcpy(vecLabelValRow2 + (iBlock - 1)*width, curBlock.mh_LabelVal, nBytePerLine);
memcpy(vecOriginValRow1 + iBlock*width, curBlock.mh_SubData + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
memcpy(vecLabelValRow1 + iBlock*width, curBlock.mh_LabelVal + width*(curBlock.mnSubTaskHeight - 1), nBytePerLine);
}
}
void CBigImgCCL::compactMethod(CuLSM::dataBlock *_curBlock)
{
int _width = mpCclObj->width;
int _taskHeight = mpCclObj->task_height;
int _nBytes_task = sizeof(int) * _width * _taskHeight;
const int numElements = _width * _taskHeight;
int* d_outputLabelOfSubData = mpCclObj->devLabelMap->getDevData();
int* d_inputSrcSubData = mpCclObj->devSrcData->getDevData();
Array2D< Cutype<int> >* d_IsValid = new Array2D< Cutype<int> >(_taskHeight,_width);
_curBlock->mh_curPatchNum = -1; //置为-1表示还没有经过规约,不知道当前分块有多少个patch
int* d_IsValidData = d_IsValid->getDevData();
getIsValid << <gridDim1,blockDim1 >> >(d_outputLabelOfSubData, d_IsValidData, _width, _taskHeight);
//记录rootMap的一维位置,即d_label[gid]==gid的位置
compact_t_device(&(_curBlock->mh_RootPos), &(_curBlock->mh_curPatchNum),
d_outputLabelOfSubData, d_IsValidData, numElements);
updateDevLabel << <gridDim1,blockDim1 >> > (d_outputLabelOfSubData, _curBlock->mnStartTag, _taskHeight, _width);
_curBlock->mh_LabelVal= (int*)malloc(_nBytes_task);
checkCudaErrors(cudaMemcpy(_curBlock->mh_LabelVal, d_outputLabelOfSubData, _nBytes_task, cudaMemcpyDeviceToHost));
compact_t_device(&(_curBlock->mh_compactLabel), &(_curBlock->mh_curPatchNum),
d_outputLabelOfSubData, d_IsValidData, numElements);
compact_t_device(&(_curBlock->mh_compactSrc), &(_curBlock->mh_curPatchNum),
d_inputSrcSubData, d_IsValidData, numElements);
cout << "h_outputNumOfValidElements: " << _curBlock->mh_curPatchNum << endl;
for (int i = 0; i < _curBlock->mh_curPatchNum; i++)
{
CuLSM::CPatchLabel temp;
temp.nLabel = _curBlock->mh_compactLabel[i];
temp.nType = _curBlock->mh_compactSrc[i];
temp.isUseful = false;
Quf->rootMap.insert(make_pair(temp.nLabel, temp));
}
if (d_IsValid!=NULL)
{
delete d_IsValid;
d_IsValid = NULL;
}
}
void CBigImgCCL::mergePatch()
{
int*h_rowOneValue = vecOriginValRow1;
int*h_rowTwoValue = vecOriginValRow2;
int*h_rowOneLabel = vecLabelValRow1;
int*h_rowTwoLabel = vecLabelValRow2;
int width = pread->cols();
int BGvalue = (int)pread->invalidValue();
clock_t start1, end1;
start1 = clock();
int *mergeArr = NULL; //合并数组
int mergeCount = 0; //合并计数
int i;
for (i = 0; i< blockNum - 1; i++) //mergeStructArraySize = blockNum-1
{
mergeArr = (int *)malloc(sizeof(int)* width * 2);
if (mergeArr == NULL)
{
printf("\nERROR! Can not allocate space for mergeArr!");
exit(-1);
}
if (G_Config.USE_DIAGS)
{
mergeCount = findMerge8(width, BGvalue, mergeArr, h_rowOneValue + i*width, h_rowTwoValue + i*width, h_rowOneLabel + i*width, h_rowTwoLabel + i*width);
}
else
{
mergeCount = findMerge4(width, BGvalue, mergeArr, h_rowOneValue + i*width, h_rowTwoValue + i*width, h_rowOneLabel + i*width, h_rowTwoLabel + i*width);
}
mergePatchUsingUF(mergeArr, mergeCount, Quf);
free(mergeArr);
mergeArr = NULL;
mergeCount = 0;
}
end1 = clock();
double dur = (double)(end1 - start1);
printf("LineCCLNoSplit Use Time:%f\n", (dur / CLOCKS_PER_SEC));
}
void CBigImgCCL::calInEachBlock()
{
int width = pread->cols();
if(blockNum > 1)
{
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
//step 1 GDAL READ
#ifdef _DEBUG
curBlock->loadBlockData(pread,m_src);
#else
curBlock->loadBlockData(pread);
#endif
//step 2 run CCL save label result in -----cuCCLClass: devLabelMap;
mpCclObj = new cuCCLClass(curBlock->mh_SubData, curBlock->mnWidth, curBlock->mnSubTaskHeight, curBlock->mnNodata);
mpCclObj->gpuLineUF(blockDim1,gridDim1);
//step 3 compress the sparse matrics for output
//将所有可能的根节点的位置保存到mh_RootPos中,将标记值存在mh_LabelVal中
//构造节点放在Quf->rootMap中,为合并分块做准备
compactMethod(curBlock);
//step 4 record boundary between two blocks for UnionFind merge
recordBoundary(dataBlockArray[iBlock], iBlock, width);
if(mpCclObj!=NULL)
{
delete mpCclObj;
mpCclObj = NULL;
}
}
// Quf->initUF(vecAllLabel);
mergePatch();
Quf->qRelabel(); //对rootMap进行重标记,现在生成的是连续的序号。
Quf->qOutputRootMap("patchLevelResult");
//将连通域标记为连续值,修改每个pixel,覆盖存储到mh_LabelVal中
cout<<"curContinueLabel======================================" << endl;
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
curBlock->getContinueLabelVal(Quf);
}
#ifdef _DEBUG
for (int iBlock = 0; iBlock < blockNum; iBlock++)
{
CuLSM::dataBlock *curBlock = &(dataBlockArray[iBlock]);
int *curContinueLabel = curBlock->mh_LabelVal;
for (int i = 0; i < curBlock->mnSubTaskHeight; ++i)
{
for (int j = 0; j < curBlock->mnWidth; ++j)
{
cout << curContinueLabel[i*curBlock->mnWidth+j]<<"\t";
}
cout<<endl;
}
}
#endif
}
else
{
CuLSM::dataBlock *curBlock = &dataBlockArray[0];
//step 1 GDAL READ
#ifdef _DEBUG
curBlock->loadBlockData(pread,m_src);
#else
curBlock->loadBlockData(pread);
#endif
//step 2 run CCL save label result in -----cuCCLClass: devLabelMap;
mpCclObj = new cuCCLClass(curBlock->mh_SubData, curBlock->mnWidth, curBlock->mnSubTaskHeight, curBlock->mnNodata);
mpCclObj->gpuLineUF(blockDim1,gridDim1);
//step 3 compress the sparse matrics for output
compactMethod(curBlock);
Quf->qRelabel(); //对rootMap进行重标记,现在生成的是连续的序号。
Quf->qOutputRootMap("patchLevelResult");
if(mpCclObj!=NULL)
{
delete mpCclObj;
mpCclObj = NULL;
}
//将连通域标记为连续值
cout<<"curContinueLabel======================================" << endl;
curBlock->getContinueLabelVal(Quf);
#ifdef _DEBUG
int *curContinueLabel = curBlock->mh_LabelVal;
for (int i = 0; i < curBlock->mnSubTaskHeight; ++i)
{
for (int j = 0; j < curBlock->mnWidth; ++j)
{
cout << curContinueLabel[i*curBlock->mnWidth+j]<<"\t";
}
cout<<endl;
}
#endif
}
}
#endif
int main(int argc, char const *argv[])
{
int array[25] = { 1, 3, 3, 3, 3,
1, 3, 3, 1, 3,
1, 2, 1, 3, 2,
2, 1, 3, 2, 3,
1, 2, 2, 3, 2 };
int* srcTest = new int[25];
for (int i = 0; i < 25; i++)
{
srcTest[i] = array[i];
}
PRead *pread = new PRead(5, 5, 0);
bool useful_class[10] = {1,1,1,1,1,1,1,1,1,1};
// bool useful_class[10] = {0,0,1,1,1,1,1,0,1,1};
//3,4,5,7
// bool useful_class[10] = {0,0,0,0,1,0,0,0,0,0};
//初始化CUDA
int gpuIdx = 1;//设置计算能力大于3.5的GPU
initCUDA(gpuIdx);
//所有关于GPU显存的初始化都要放在initCUDA之后进行,否则会出现随机值
GlobalConfiguration& config = Singleton<GlobalConfiguration>::Instance();
config.set_USE(useful_class);
config.set_USE_DIAGS(true);
#ifdef _DEBUG
CBigImgCCL *ccl = new CBigImgCCL(pread, srcTest);
#else
CBigImgCCL *ccl = new CBigImgCCL(argv[1]);
#endif
ccl->calInEachBlock();
delete ccl;
return 0;
}
|
7ef4372af58870f55e2ba96ba40c481ced6000df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <fstream>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__
void histogram(unsigned int* array, unsigned int* hist, const unsigned int bit, const unsigned int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
//if(array[tid] == 0) printf("tid %d\n", tid);
int bin = ((int)array[tid] >> bit) & 0x1;
atomicAdd(&(hist[bin]), 1);
}
__global__
void preda(unsigned int* array, unsigned int* pred_array, const unsigned int bit, const unsigned int n, const unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pred = ((int)array[tid] >> bit) & 0x1;
pred_array[tid] = (pred == val) ? 1 : 0;
}
__global__
void prefixSum(unsigned int* array, const unsigned int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
tmp[tid] = (tid>0) ? array[tid-1] : 0;
__syncthreads();
for(int offset = 1; offset < n; offset *= 2)
{
unsigned int lv = tmp[tid];
__syncthreads();
if(tid + offset < n)
{
tmp[tid + offset] += lv;
}
__syncthreads();
}
array[tid] = tmp[tid];
}
__global__
void prefixSumBlock(unsigned int* array, unsigned int* max_array, const unsigned int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
int toff = blockIdx.x * blockDim.x;
unsigned int orig = array[tid + toff];
tmp[tid] = (tid >0) ? array[tid + toff -1] : 0;
__syncthreads();
for(int offset = 1; offset < blockDim.x; offset *= 2)
{
unsigned int lv = tmp[tid];
__syncthreads();
if(tid + offset < blockDim.x)
{
tmp[tid + offset] += lv;
}
__syncthreads();
}
array[tid + toff] = tmp[tid];
if(tid == blockDim.x - 1) max_array[blockIdx.x] = tmp[tid] + orig;
}
__global__
void prefixSumAdd(unsigned int* array, unsigned int* max_array)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = max_array[blockIdx.x];
array[tid] += offset;
}
__global__
void reorder(unsigned int* in, unsigned int* out, unsigned int* inpos, unsigned int* outpos, unsigned int* hist, unsigned int* preda, const unsigned int bit, const unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pred = ((int)in[tid] >> bit) & 0x1;
if(pred == val)
{
int pos = hist[val] + preda[tid];
out[pos] = in[tid];
outpos[pos] = inpos[tid];
}
}
__global__
void pada(unsigned int* in, unsigned int numElems, unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= numElems)
in[tid] = val;
}
//#define SAVEI 1
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
const unsigned int threadsPerBlock = 1024;
const unsigned int n = exp2((float)((int)log2((float)numElems))+1);
const unsigned int bins = 2;
#ifdef SAVEI
std::ofstream outs;
outs.open("sf.out", std::ofstream::out);
#endif
unsigned int *d_in, *d_inp;
unsigned int *d_out, *d_outp;
hipMalloc((void**)&d_in, sizeof(unsigned int)*n);
hipMemcpy(d_in, d_inputVals, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( pada), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, numElems, (unsigned int)(-1));
//hipMemcpy(d_in, h_in, sizeof(unsigned int)*n, hipMemcpyHostToDevice);
hipMalloc((void**)&d_inp, sizeof(unsigned int)*n);
hipMemcpy(d_inp, d_inputPos, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice);
hipMalloc((void**)&d_out, sizeof(unsigned int)*n);
hipMalloc((void**)&d_outp, sizeof(unsigned int)*n);
#ifdef SAVEI
unsigned int *h_out = new unsigned int[n];
unsigned int* h_hist = new unsigned int[bins];
unsigned int* h_preda = new unsigned int[n];
unsigned int* h_maxa = new unsigned int[n / threadsPerBlock];
#endif
unsigned int *d_hist, *d_preda, *d_maxa;
hipMalloc((void**)&d_hist, sizeof(unsigned int) * bins);
hipMalloc((void**)&d_preda, sizeof(unsigned int) * n);
hipMalloc((void**)&d_maxa, sizeof(unsigned int) * n / threadsPerBlock);
for(unsigned int bit = 0; bit < 32; ++bit)
{
hipMemset(d_hist, 0, sizeof(unsigned int) * bins);
hipLaunchKernelGGL(( histogram), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_hist, bit, n);
#ifdef SAVEI
hipMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, hipMemcpyDeviceToHost);
outs << "Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(bins), sizeof(unsigned int) * bins, 0, d_hist, bins);
#ifdef SAVEI
hipMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, hipMemcpyDeviceToHost);
outs << "PrefSum Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
// pred val = 0
hipLaunchKernelGGL(( preda), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_preda, bit, n, 0);
#ifdef SAVEI
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( prefixSumBlock), dim3(n / threadsPerBlock), dim3(threadsPerBlock), sizeof(unsigned int) * threadsPerBlock, 0, d_preda, d_maxa, n);
#ifdef SAVEI
hipMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, hipMemcpyDeviceToHost);
outs << "Max array: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array pref sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
outs << n / threadsPerBlock << std::endl;
#endif
hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(n / threadsPerBlock), sizeof(unsigned int) * threadsPerBlock, 0, d_maxa, n / threadsPerBlock);
#ifdef SAVEI
hipMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, hipMemcpyDeviceToHost);
outs << "Max array pref sum: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( prefixSumAdd), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_preda, d_maxa);
#ifdef SAVEI
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( reorder), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_out, d_inp, d_outp, d_hist, d_preda, bit, 0);
#ifdef SAVEI
hipMemcpy(h_out, d_out, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Reordered array along bit " << bit << " pred val: " << 0 << ": " ;
for(unsigned int i = 0; i < n; ++i)
{
outs << h_out[i] << ", ";
}
outs << std::endl;
#endif
hipMemset(d_hist, 0, sizeof(unsigned int) * bins);
hipLaunchKernelGGL(( histogram), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_hist, bit, n);
#ifdef SAVEI
hipMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, hipMemcpyDeviceToHost);
outs << "Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(bins), sizeof(unsigned int) * bins, 0, d_hist, bins);
#ifdef SAVEI
hipMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, hipMemcpyDeviceToHost);
outs << "PrefSum Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
// pred val = 1
hipLaunchKernelGGL(( preda), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_preda, bit, n, 1);
#ifdef SAVEI
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( prefixSumBlock), dim3(n / threadsPerBlock), dim3(threadsPerBlock), sizeof(unsigned int) * threadsPerBlock, 0, d_preda, d_maxa, n);
#ifdef SAVEI
hipMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, hipMemcpyDeviceToHost);
outs << "Max array: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array pref sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(n / threadsPerBlock), sizeof(unsigned int) * threadsPerBlock, 0, d_maxa, n / threadsPerBlock);
#ifdef SAVEI
hipMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, hipMemcpyDeviceToHost);
outs << "Max array pref sum: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( prefixSumAdd), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_preda, d_maxa);
#ifdef SAVEI
hipMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Predicate array sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
hipLaunchKernelGGL(( reorder), dim3(n / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_in, d_out, d_inp, d_outp, d_hist, d_preda, bit, 1);
#ifdef SAVEI
hipMemcpy(h_out, d_out, sizeof(unsigned int) * n, hipMemcpyDeviceToHost);
outs << "Reordered array along bit " << bit << " pred val: " << 1 << ": " ;
for(unsigned int i = 0; i < n; ++i)
{
outs << h_out[i] << ", ";
}
outs << std::endl;
#endif
hipMemcpy(d_in, d_out, sizeof(unsigned int) * n, hipMemcpyDeviceToDevice);
hipMemcpy(d_inp, d_outp, sizeof(unsigned int) * n, hipMemcpyDeviceToDevice);
}
hipMemcpy(d_outputVals, d_out, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice);
hipMemcpy(d_outputPos, d_outp, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice);
#ifdef SAVEI
outs.close();
delete[] h_out;
delete[] h_hist;
delete[] h_preda;
delete[] h_maxa;
#endif
hipFree(d_in);
hipFree(d_inp);
hipFree(d_out);
hipFree(d_outp);
hipFree(d_hist);
hipFree(d_preda);
hipFree(d_maxa);
}
|
7ef4372af58870f55e2ba96ba40c481ced6000df.cu
|
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <fstream>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__
void histogram(unsigned int* array, unsigned int* hist, const unsigned int bit, const unsigned int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
//if(array[tid] == 0) printf("tid %d\n", tid);
int bin = ((int)array[tid] >> bit) & 0x1;
atomicAdd(&(hist[bin]), 1);
}
__global__
void preda(unsigned int* array, unsigned int* pred_array, const unsigned int bit, const unsigned int n, const unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pred = ((int)array[tid] >> bit) & 0x1;
pred_array[tid] = (pred == val) ? 1 : 0;
}
__global__
void prefixSum(unsigned int* array, const unsigned int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
tmp[tid] = (tid>0) ? array[tid-1] : 0;
__syncthreads();
for(int offset = 1; offset < n; offset *= 2)
{
unsigned int lv = tmp[tid];
__syncthreads();
if(tid + offset < n)
{
tmp[tid + offset] += lv;
}
__syncthreads();
}
array[tid] = tmp[tid];
}
__global__
void prefixSumBlock(unsigned int* array, unsigned int* max_array, const unsigned int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
int toff = blockIdx.x * blockDim.x;
unsigned int orig = array[tid + toff];
tmp[tid] = (tid >0) ? array[tid + toff -1] : 0;
__syncthreads();
for(int offset = 1; offset < blockDim.x; offset *= 2)
{
unsigned int lv = tmp[tid];
__syncthreads();
if(tid + offset < blockDim.x)
{
tmp[tid + offset] += lv;
}
__syncthreads();
}
array[tid + toff] = tmp[tid];
if(tid == blockDim.x - 1) max_array[blockIdx.x] = tmp[tid] + orig;
}
__global__
void prefixSumAdd(unsigned int* array, unsigned int* max_array)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = max_array[blockIdx.x];
array[tid] += offset;
}
__global__
void reorder(unsigned int* in, unsigned int* out, unsigned int* inpos, unsigned int* outpos, unsigned int* hist, unsigned int* preda, const unsigned int bit, const unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pred = ((int)in[tid] >> bit) & 0x1;
if(pred == val)
{
int pos = hist[val] + preda[tid];
out[pos] = in[tid];
outpos[pos] = inpos[tid];
}
}
__global__
void pada(unsigned int* in, unsigned int numElems, unsigned int val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= numElems)
in[tid] = val;
}
//#define SAVEI 1
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
const unsigned int threadsPerBlock = 1024;
const unsigned int n = exp2((float)((int)log2((float)numElems))+1);
const unsigned int bins = 2;
#ifdef SAVEI
std::ofstream outs;
outs.open("sf.out", std::ofstream::out);
#endif
unsigned int *d_in, *d_inp;
unsigned int *d_out, *d_outp;
cudaMalloc((void**)&d_in, sizeof(unsigned int)*n);
cudaMemcpy(d_in, d_inputVals, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice);
pada<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, numElems, (unsigned int)(-1));
//cudaMemcpy(d_in, h_in, sizeof(unsigned int)*n, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_inp, sizeof(unsigned int)*n);
cudaMemcpy(d_inp, d_inputPos, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice);
cudaMalloc((void**)&d_out, sizeof(unsigned int)*n);
cudaMalloc((void**)&d_outp, sizeof(unsigned int)*n);
#ifdef SAVEI
unsigned int *h_out = new unsigned int[n];
unsigned int* h_hist = new unsigned int[bins];
unsigned int* h_preda = new unsigned int[n];
unsigned int* h_maxa = new unsigned int[n / threadsPerBlock];
#endif
unsigned int *d_hist, *d_preda, *d_maxa;
cudaMalloc((void**)&d_hist, sizeof(unsigned int) * bins);
cudaMalloc((void**)&d_preda, sizeof(unsigned int) * n);
cudaMalloc((void**)&d_maxa, sizeof(unsigned int) * n / threadsPerBlock);
for(unsigned int bit = 0; bit < 32; ++bit)
{
cudaMemset(d_hist, 0, sizeof(unsigned int) * bins);
histogram<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_hist, bit, n);
#ifdef SAVEI
cudaMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, cudaMemcpyDeviceToHost);
outs << "Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
prefixSum<<<1, bins, sizeof(unsigned int) * bins>>>(d_hist, bins);
#ifdef SAVEI
cudaMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, cudaMemcpyDeviceToHost);
outs << "PrefSum Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
// pred val = 0
preda<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_preda, bit, n, 0);
#ifdef SAVEI
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
prefixSumBlock<<<n / threadsPerBlock, threadsPerBlock, sizeof(unsigned int) * threadsPerBlock>>>(d_preda, d_maxa, n);
#ifdef SAVEI
cudaMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, cudaMemcpyDeviceToHost);
outs << "Max array: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array pref sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
outs << n / threadsPerBlock << std::endl;
#endif
prefixSum<<<1, n / threadsPerBlock, sizeof(unsigned int) * threadsPerBlock>>>(d_maxa, n / threadsPerBlock);
#ifdef SAVEI
cudaMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, cudaMemcpyDeviceToHost);
outs << "Max array pref sum: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
#endif
prefixSumAdd<<<n / threadsPerBlock, threadsPerBlock>>>(d_preda, d_maxa);
#ifdef SAVEI
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
reorder<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_out, d_inp, d_outp, d_hist, d_preda, bit, 0);
#ifdef SAVEI
cudaMemcpy(h_out, d_out, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Reordered array along bit " << bit << " pred val: " << 0 << ": " ;
for(unsigned int i = 0; i < n; ++i)
{
outs << h_out[i] << ", ";
}
outs << std::endl;
#endif
cudaMemset(d_hist, 0, sizeof(unsigned int) * bins);
histogram<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_hist, bit, n);
#ifdef SAVEI
cudaMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, cudaMemcpyDeviceToHost);
outs << "Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
prefixSum<<<1, bins, sizeof(unsigned int) * bins>>>(d_hist, bins);
#ifdef SAVEI
cudaMemcpy(h_hist, d_hist, sizeof(unsigned int)* bins, cudaMemcpyDeviceToHost);
outs << "PrefSum Hist of bit " << bit << ": " << h_hist[0] << ", " << h_hist[1] << std::endl;
#endif
// pred val = 1
preda<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_preda, bit, n, 1);
#ifdef SAVEI
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
prefixSumBlock<<<n / threadsPerBlock, threadsPerBlock, sizeof(unsigned int) * threadsPerBlock>>>(d_preda, d_maxa, n);
#ifdef SAVEI
cudaMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, cudaMemcpyDeviceToHost);
outs << "Max array: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array pref sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
prefixSum<<<1, n / threadsPerBlock, sizeof(unsigned int) * threadsPerBlock>>>(d_maxa, n / threadsPerBlock);
#ifdef SAVEI
cudaMemcpy(h_maxa, d_maxa, sizeof(unsigned int) * n / threadsPerBlock, cudaMemcpyDeviceToHost);
outs << "Max array pref sum: ";
for(unsigned int i = 0; i < n /threadsPerBlock; ++i)
{
outs << h_maxa[i] << ", ";
}
outs << std::endl;
#endif
prefixSumAdd<<<n / threadsPerBlock, threadsPerBlock>>>(d_preda, d_maxa);
#ifdef SAVEI
cudaMemcpy(h_preda, d_preda, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Predicate array sum: ";
for(unsigned int i = 0; i < n; ++i)
{
outs << h_preda[i] << ", ";
}
outs << std::endl;
#endif
reorder<<<n / threadsPerBlock, threadsPerBlock>>>(d_in, d_out, d_inp, d_outp, d_hist, d_preda, bit, 1);
#ifdef SAVEI
cudaMemcpy(h_out, d_out, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost);
outs << "Reordered array along bit " << bit << " pred val: " << 1 << ": " ;
for(unsigned int i = 0; i < n; ++i)
{
outs << h_out[i] << ", ";
}
outs << std::endl;
#endif
cudaMemcpy(d_in, d_out, sizeof(unsigned int) * n, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_inp, d_outp, sizeof(unsigned int) * n, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(d_outputVals, d_out, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_outputPos, d_outp, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice);
#ifdef SAVEI
outs.close();
delete[] h_out;
delete[] h_hist;
delete[] h_preda;
delete[] h_maxa;
#endif
cudaFree(d_in);
cudaFree(d_inp);
cudaFree(d_out);
cudaFree(d_outp);
cudaFree(d_hist);
cudaFree(d_preda);
cudaFree(d_maxa);
}
|
52387a1d95700bb9d06888e8aeb7e086178f9252.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "worldCreator.cuh"
#include "worldDefinition.h"
#include <cstdlib>
#include <iostream>
#include <cutil.h>
using namespace std;
/*
Initialize variables
*/
template <class T>
worldCreator<T>::worldCreator(){
this->field = NULL;
worldDefinition<T> world;
this->min_x = world.getMin_X();
this->max_x = world.getMax_X();
this->min_y = world.getMin_Y();
this->max_y = world.getMax_Y();
T x_range = this->max_x - this->min_x + 1;
T y_range = this->max_y - this->min_y + 1;
this->N = x_range * y_range;
this->field = (int*) malloc(sizeof(T)*(this->N));//
if (this->field == NULL){
cout << "Simulator can't get a pointer in worldCreator." << endl;
}
}
template worldCreator<int>::worldCreator();
template worldCreator<float>::worldCreator();
template worldCreator<double>::worldCreator();
/*
Free the malloced pointer
*/
template <class T>
worldCreator<T>::~worldCreator(){
if (field != NULL){
free(field);
}
}
template worldCreator<int>::~worldCreator();
template worldCreator<float>::~worldCreator();
template worldCreator<double>::~worldCreator();
/*
Set the range defined in worldDefinition
*/
template <class T>
void worldCreator<T>::setRange(T minX, T maxX, T minY, T maxY){
this->min_x = minX;
this->max_x = maxX;
this->min_y = minY;
this->max_y = maxY;
}
template void worldCreator<int>::setRange(int minX, int maxX, int minY, int maxY);
template void worldCreator<float>::setRange(float minX, float maxX, float minY, float maxY);
template void worldCreator<double>::setRange(double minX, double maxX, double minY, double maxY);
template <class T>
__global__ void fieldSet(T* field, int N){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N){
field[tid] = 0;
}
}
template __global__ void fieldSet<int>(int* field, int N);
template __global__ void fieldSet<float>(float* field, int N);
template __global__ void fieldSet<double>(double* field, int N);
/*
Make field with the range!
We must set detail information at the other place.
*/
template <class T>
void worldCreator<T>::make_2D_World(){
T* dev_field;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_field, sizeof(T)*N));
dim3 blocks(1024, 1 ,1);
dim3 grids((N + 1023)/1024, 1, 1);
hipLaunchKernelGGL(( fieldSet), dim3(grids), dim3(blocks), 0, 0, dev_field, N);
CUDA_SAFE_CALL(hipMemcpy(field, dev_field, sizeof(T)*N, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(dev_field));
}
template void worldCreator<int>::make_2D_World();
template void worldCreator<float>::make_2D_World();
template void worldCreator<double>::make_2D_World();
template <class T>
T worldCreator<T>::getMax_X(){
return this->max_x;
}
template int worldCreator<int>::getMax_X();
template float worldCreator<float>::getMax_X();
template double worldCreator<double>::getMax_X();
template <class T>
T worldCreator<T>::getMin_X(){
return this->min_x;
}
template int worldCreator<int>::getMin_X();
template float worldCreator<float>::getMin_X();
template double worldCreator<double>::getMin_X();
template <class T>
T worldCreator<T>::getMax_Y(){
return this->max_y;
}
template int worldCreator<int>::getMax_Y();
template float worldCreator<float>::getMax_Y();
template double worldCreator<double>::getMax_Y();
template <class T>
T worldCreator<T>::getMin_Y(){
return this->min_y;
}
template int worldCreator<int>::getMin_Y();
template float worldCreator<float>::getMin_Y();
template double worldCreator<double>::getMin_Y();
template <class T>
int* worldCreator<T>::getField(){
return &(this->field[0]);
}
template int* worldCreator<int>::getField();
template int* worldCreator<float>::getField();
template int* worldCreator<double>::getField();
template <class T>
void worldCreator<T>::setField(int* dataP, int N){
this->field = (int*)malloc(sizeof(int)*N);
for (int i=0; i<N; ++i){
this->field[i] = dataP[i];
}
}
template void worldCreator<int>::setField(int* dataP, int N);
template void worldCreator<float>::setField(int* dataP, int N);
template void worldCreator<double>::setField(int* dataP, int N);
|
52387a1d95700bb9d06888e8aeb7e086178f9252.cu
|
#include "worldCreator.cuh"
#include "worldDefinition.h"
#include <cstdlib>
#include <iostream>
#include <cutil.h>
using namespace std;
/*
Initialize variables
*/
template <class T>
worldCreator<T>::worldCreator(){
this->field = NULL;
worldDefinition<T> world;
this->min_x = world.getMin_X();
this->max_x = world.getMax_X();
this->min_y = world.getMin_Y();
this->max_y = world.getMax_Y();
T x_range = this->max_x - this->min_x + 1;
T y_range = this->max_y - this->min_y + 1;
this->N = x_range * y_range;
this->field = (int*) malloc(sizeof(T)*(this->N));//デストラクタで解放
if (this->field == NULL){
cout << "Simulator can't get a pointer in worldCreator." << endl;
}
}
template worldCreator<int>::worldCreator();
template worldCreator<float>::worldCreator();
template worldCreator<double>::worldCreator();
/*
Free the malloced pointer
*/
template <class T>
worldCreator<T>::~worldCreator(){
if (field != NULL){
free(field);
}
}
template worldCreator<int>::~worldCreator();
template worldCreator<float>::~worldCreator();
template worldCreator<double>::~worldCreator();
/*
Set the range defined in worldDefinition
*/
template <class T>
void worldCreator<T>::setRange(T minX, T maxX, T minY, T maxY){
this->min_x = minX;
this->max_x = maxX;
this->min_y = minY;
this->max_y = maxY;
}
template void worldCreator<int>::setRange(int minX, int maxX, int minY, int maxY);
template void worldCreator<float>::setRange(float minX, float maxX, float minY, float maxY);
template void worldCreator<double>::setRange(double minX, double maxX, double minY, double maxY);
template <class T>
__global__ void fieldSet(T* field, int N){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N){
field[tid] = 0;
}
}
template __global__ void fieldSet<int>(int* field, int N);
template __global__ void fieldSet<float>(float* field, int N);
template __global__ void fieldSet<double>(double* field, int N);
/*
Make field with the range!
We must set detail information at the other place.
*/
template <class T>
void worldCreator<T>::make_2D_World(){
T* dev_field;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_field, sizeof(T)*N));
dim3 blocks(1024, 1 ,1);
dim3 grids((N + 1023)/1024, 1, 1);
fieldSet<<<grids, blocks>>>(dev_field, N);
CUDA_SAFE_CALL(cudaMemcpy(field, dev_field, sizeof(T)*N, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(dev_field));
}
template void worldCreator<int>::make_2D_World();
template void worldCreator<float>::make_2D_World();
template void worldCreator<double>::make_2D_World();
template <class T>
T worldCreator<T>::getMax_X(){
return this->max_x;
}
template int worldCreator<int>::getMax_X();
template float worldCreator<float>::getMax_X();
template double worldCreator<double>::getMax_X();
template <class T>
T worldCreator<T>::getMin_X(){
return this->min_x;
}
template int worldCreator<int>::getMin_X();
template float worldCreator<float>::getMin_X();
template double worldCreator<double>::getMin_X();
template <class T>
T worldCreator<T>::getMax_Y(){
return this->max_y;
}
template int worldCreator<int>::getMax_Y();
template float worldCreator<float>::getMax_Y();
template double worldCreator<double>::getMax_Y();
template <class T>
T worldCreator<T>::getMin_Y(){
return this->min_y;
}
template int worldCreator<int>::getMin_Y();
template float worldCreator<float>::getMin_Y();
template double worldCreator<double>::getMin_Y();
template <class T>
int* worldCreator<T>::getField(){
return &(this->field[0]);
}
template int* worldCreator<int>::getField();
template int* worldCreator<float>::getField();
template int* worldCreator<double>::getField();
template <class T>
void worldCreator<T>::setField(int* dataP, int N){
this->field = (int*)malloc(sizeof(int)*N);
for (int i=0; i<N; ++i){
this->field[i] = dataP[i];
}
}
template void worldCreator<int>::setField(int* dataP, int N);
template void worldCreator<float>::setField(int* dataP, int N);
template void worldCreator<double>::setField(int* dataP, int N);
|
c7ec1772b92dce1c218f809c878d0cca0feedea1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "AdaptRefVectorKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int cell = 1;
float *referenceVector = NULL;
hipMalloc(&referenceVector, XSIZE*YSIZE);
float oldErrorFraction = 1;
float youngErrorFraction = 1;
float decayFactor = 1;
int *winningCount = NULL;
hipMalloc(&winningCount, XSIZE*YSIZE);
float *difference = NULL;
hipMalloc(&difference, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
AdaptRefVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
AdaptRefVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
AdaptRefVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c7ec1772b92dce1c218f809c878d0cca0feedea1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "AdaptRefVectorKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int cell = 1;
float *referenceVector = NULL;
cudaMalloc(&referenceVector, XSIZE*YSIZE);
float oldErrorFraction = 1;
float youngErrorFraction = 1;
float decayFactor = 1;
int *winningCount = NULL;
cudaMalloc(&winningCount, XSIZE*YSIZE);
float *difference = NULL;
cudaMalloc(&difference, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
AdaptRefVectorKernel<<<gridBlock,threadBlock>>>(cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
AdaptRefVectorKernel<<<gridBlock,threadBlock>>>(cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
AdaptRefVectorKernel<<<gridBlock,threadBlock>>>(cell,referenceVector,oldErrorFraction,youngErrorFraction,decayFactor,winningCount,difference,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ad13631b46ddeaa0d577fca1f542498c12577211.hip
|
// !!! This is a file automatically generated by hipify!!!
#undef ALTCPU
#include <sys/time.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <ctime>
#include <vector>
#include <iostream>
#include "../../ml_optimiser.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "../acc_ptr.h"
#include "../acc_projector.h"
#include "../acc_backprojector.h"
#include "../acc_projector_plan.h"
#include "cuda_benchmark_utils.h"
#include "cuda_kernels/helper.cuh"
#include "cuda_kernels/diff2.cuh"
#include "cuda_kernels/wavg.cuh"
#include "cuda_mem_utils.h"
#include "cuda_fft.h"
#include "../data_types.h"
#include "../../complex.h"
#include "../../helix.h"
#include "../../error.h"
#include <fstream>
#include "../../parallel.h"
#include <signal.h>
#include <map>
#ifdef CUDA_FORCESTL
#include "cuda_utils_stl.cuh"
#else
#include "cuda_utils_cub.cuh"
#endif
#include "../utilities.h"
#include "../utilities_impl.h"
#include "../acc_ml_optimiser.h"
#include "cuda_ml_optimiser.h"
#include "../acc_helper_functions.h"
#include "../acc_ml_optimiser_impl.h"
// ------------------------------- Some explicit template instantiations
template __global__ void CudaKernels::cuda_kernel_translate2D<XFLOAT>(XFLOAT *,
XFLOAT*, int, int, int, int, int);
template __global__ void CudaKernels::cuda_kernel_translate3D<XFLOAT>(XFLOAT *,
XFLOAT *, int, int, int, int, int, int, int);
template __global__ void cuda_kernel_multi<XFLOAT>( XFLOAT *,
XFLOAT *, XFLOAT, int);
template __global__ void CudaKernels::cuda_kernel_multi<XFLOAT>( XFLOAT *,
XFLOAT, int);
template __global__ void cuda_kernel_multi<XFLOAT>( XFLOAT *, XFLOAT *,
XFLOAT *, XFLOAT, int);
// ----------------------------------------------------------------------
// High-level CUDA objects
size_t MlDeviceBundle::checkFixedSizedObjects(int shares)
{
int devCount;
size_t BoxLimit;
HANDLE_ERROR(hipGetDeviceCount(&devCount));
if(device_id >= devCount)
CRITICAL(ERR_GPUID);
HANDLE_ERROR(hipSetDevice(device_id));
size_t free(0), total(0);
DEBUG_HANDLE_ERROR(hipMemGetInfo( &free, &total ));
float margin(1.05);
BoxLimit = pow(free/(margin*2.5*sizeof(XFLOAT)*((float)shares)),(1/3.0)) / ((float) baseMLO->mymodel.padding_factor);
// printf("device_id : %d \n",device_id);
// printf("free :%ld margin %f shares %d BoxLimit %ld baseMLO->mymodel.padding_factor %f \n",free,margin, shares, BoxLimit,baseMLO->mymodel.padding_factor);
//size_t BytesNeeded = ((float)shares)*margin*2.5*sizeof(XFLOAT)*pow((baseMLO->mymodel.ori_size*baseMLO->mymodel.padding_factor),3);
return(BoxLimit);
}
void MlDeviceBundle::setupFixedSizedObjects(int iter,FileName fn_out)
{
int devCount;
HANDLE_ERROR(hipGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(hipSetDevice(device_id));
//Can we pre-generate projector plan and corresponding euler matrices for all particles
if (baseMLO->do_skip_align || baseMLO->do_skip_rotate || baseMLO->do_auto_refine || baseMLO->mymodel.orientational_prior_mode != NOPRIOR)
generateProjectionPlanOnTheFly = true;
else
generateProjectionPlanOnTheFly = false;
unsigned nr_proj = baseMLO->mymodel.PPref.size();
unsigned nr_bproj = baseMLO->wsum_model.BPref.size();
projectors.resize(nr_proj);
backprojectors.resize(nr_bproj);
/*======================================================
PROJECTOR AND BACKPROJECTOR
======================================================*/
for (int imodel = 0; imodel < nr_proj; imodel++)
{
#ifdef COMFORGPU
projectors[imodel].setcompressMdlDim(
baseMLO->mymodel.PPref[imodel].data.xdim,
baseMLO->mymodel.PPref[imodel].data.ydim,
baseMLO->mymodel.PPref[imodel].data.zdim,
baseMLO->mymodel.PPref[imodel].data.yinit,
baseMLO->mymodel.PPref[imodel].data.zinit,
baseMLO->mymodel.PPref[imodel].r_max,
baseMLO->mymodel.PPref[imodel].padding_factor,
baseMLO->mymodel.PPref[imodel].pad_size,
baseMLO->mymodel.PPref[imodel].sumalldata,
baseMLO->mymodel.PPref[imodel].yoffsetdata);
projectors[imodel].initcompressMdl(baseMLO->mymodel.PPref[imodel].compdatareal.data,
baseMLO->mymodel.PPref[imodel].compdatareal.data);
#else
projectors[imodel].setMdlDim(
baseMLO->mymodel.PPref[imodel].data.xdim,
baseMLO->mymodel.PPref[imodel].data.ydim,
baseMLO->mymodel.PPref[imodel].data.zdim,
baseMLO->mymodel.PPref[imodel].data.yinit,
baseMLO->mymodel.PPref[imodel].data.zinit,
baseMLO->mymodel.PPref[imodel].r_max,
baseMLO->mymodel.PPref[imodel].padding_factor);
projectors[imodel].initMdl(baseMLO->mymodel.PPref[imodel].data.data);
#endif
}
for (int imodel = 0; imodel < nr_bproj; imodel++)
{
printf("====padsize=======%d===\n",baseMLO->wsum_model.BPref[imodel].pad_size);
fflush(stdout);
#ifdef COMGPU
backprojectors[imodel].setcompressMdlDim(
baseMLO->wsum_model.BPref[imodel].data.xdim,
baseMLO->wsum_model.BPref[imodel].data.ydim,
baseMLO->wsum_model.BPref[imodel].data.zdim,
baseMLO->wsum_model.BPref[imodel].data.yinit,
baseMLO->wsum_model.BPref[imodel].data.zinit,
baseMLO->wsum_model.BPref[imodel].r_max,
baseMLO->wsum_model.BPref[imodel].padding_factor,
baseMLO->wsum_model.BPref[imodel].pad_size,
baseMLO->wsum_model.BPref[imodel].sumalldata,
baseMLO->wsum_model.BPref[imodel].yoffsetdata);
backprojectors[imodel].initcompressMdl();
#else
backprojectors[imodel].setMdlDim(
baseMLO->wsum_model.BPref[imodel].data.xdim,
baseMLO->wsum_model.BPref[imodel].data.ydim,
baseMLO->wsum_model.BPref[imodel].data.zdim,
baseMLO->wsum_model.BPref[imodel].data.yinit,
baseMLO->wsum_model.BPref[imodel].data.zinit,
baseMLO->wsum_model.BPref[imodel].r_max,
baseMLO->wsum_model.BPref[imodel].padding_factor
#ifdef FILTERSLICE
,baseMLO->wsum_model.BPref[imodel].fxsize,
baseMLO->wsum_model.BPref[imodel].fysize,
baseMLO->wsum_model.BPref[imodel].fzsize,
baseMLO->wsum_model.BPref[imodel].fstartx,
baseMLO->wsum_model.BPref[imodel].fstarty,
baseMLO->wsum_model.BPref[imodel].fstartz,
baseMLO->wsum_model.BPref[imodel].fendx,
baseMLO->wsum_model.BPref[imodel].fendy,
baseMLO->wsum_model.BPref[imodel].fendz
#endif
);
backprojectors[imodel].initMdl();
#endif
/* struct timeval tv1,tv2;
struct timezone tz;
gettimeofday (&tv1, &tz);
//hipMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),hipMemcpyDeviceToHost);
#ifdef PINMEM
for(int i=0;i<100;i++)
hipMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),hipMemcpyDeviceToHost);
#else
for(int i=0;i<100;i++)
hipMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex.data,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),hipMemcpyDeviceToHost);
#endif
gettimeofday (&tv2, &tz);
printf("\nsize is %d and After first allocation : %f\n",backprojectors[imodel].mdlXYZ*sizeof(int),(float)(1000 * (tv2.tv_sec-tv1.tv_sec)+ (tv2.tv_usec-tv1.tv_usec)/1000.0));
fflush(stdout);
#endif*/
}
/*======================================================
CUSTOM ALLOCATOR
======================================================*/
int memAlignmentSize;
hipDeviceGetAttribute ( &memAlignmentSize, hipDeviceAttributeTextureAlignment, device_id );
allocator = new CudaCustomAllocator(0, memAlignmentSize);
}
void MlDeviceBundle::setupTunableSizedObjects(size_t allocationSize)
{
unsigned nr_models = baseMLO->mymodel.nr_classes;
int devCount;
HANDLE_ERROR(hipGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(hipSetDevice(device_id));
/*======================================================
CUSTOM ALLOCATOR
======================================================*/
#ifdef DEBUG_CUDA
printf("DEBUG: Total GPU allocation size set to %zu MB on device id %d.\n", allocationSize / (1000*1000), device_id);
#endif
#ifndef CUDA_NO_CUSTOM_ALLOCATION
allocator->resize(allocationSize);
#endif
/*======================================================
PROJECTION PLAN
======================================================*/
coarseProjectionPlans.resize(nr_models, allocator);
for (int iclass = 0; iclass < nr_models; iclass++)
{
//If doing predefined projector plan at all and is this class significant
if (!generateProjectionPlanOnTheFly && baseMLO->mymodel.pdf_class[iclass] > 0.)
{
std::vector<int> exp_pointer_dir_nonzeroprior;
std::vector<int> exp_pointer_psi_nonzeroprior;
std::vector<RFLOAT> exp_directions_prior;
std::vector<RFLOAT> exp_psi_prior;
long unsigned itrans_max = baseMLO->sampling.NrTranslationalSamplings() - 1;
long unsigned nr_idir = baseMLO->sampling.NrDirections(0, &exp_pointer_dir_nonzeroprior);
long unsigned nr_ipsi = baseMLO->sampling.NrPsiSamplings(0, &exp_pointer_psi_nonzeroprior );
coarseProjectionPlans[iclass].setup(
baseMLO->sampling,
exp_directions_prior,
exp_psi_prior,
exp_pointer_dir_nonzeroprior,
exp_pointer_psi_nonzeroprior,
NULL, //Mcoarse_significant
baseMLO->mymodel.pdf_class,
baseMLO->mymodel.pdf_direction,
nr_idir,
nr_ipsi,
0, //idir_min
nr_idir - 1, //idir_max
0, //ipsi_min
nr_ipsi - 1, //ipsi_max
0, //itrans_min
itrans_max,
0, //current_oversampling
1, //nr_oversampled_rot
iclass,
true, //coarse
!IS_NOT_INV,
baseMLO->do_skip_align,
baseMLO->do_skip_rotate,
baseMLO->mymodel.orientational_prior_mode
);
}
}
};
void MlOptimiserCuda::resetData()
{
int devCount;
HANDLE_ERROR(hipGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(hipSetDevice(device_id));
unsigned nr_classes = baseMLO->mymodel.nr_classes;
classStreams.resize(nr_classes, 0);
for (int i = 0; i < nr_classes; i++)
HANDLE_ERROR(hipStreamCreate(&classStreams[i])); //HANDLE_ERROR(hipStreamCreateWithFlags(&classStreams[i],hipStreamNonBlocking));
transformer1.clear();
transformer2.clear();
};
void MlOptimiserCuda::doThreadExpectationSomeParticles(int thread_id)
{
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.tic(baseMLO->TIMING_ESP_THR);
#endif
// CTOC(cudaMLO->timer,"interParticle");
int devCount;
HANDLE_ERROR(hipGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
DEBUG_HANDLE_ERROR(hipSetDevice(device_id));
//std::cerr << " calling on device " << device_id << std::endl;
//put mweight allocation here
size_t first_ipart = 0, last_ipart = 0;
while (baseMLO->exp_ipart_ThreadTaskDistributor->getTasks(first_ipart, last_ipart))
{
//CTIC(timer,"oneTask");
for (long unsigned ipart = first_ipart; ipart <= last_ipart; ipart++)
{
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.tic(baseMLO->TIMING_ESP_DIFF2_A);
#endif
unsigned my_ori_particle = baseMLO->exp_my_first_ori_particle + ipart;
AccPtrFactory ptrFactory(allocator, cudaStreamPerThread);
accDoExpectationOneParticle<MlOptimiserCuda>(this, my_ori_particle, thread_id, ptrFactory);
}
//CTOC(timer,"oneTask");
}
// CTIC(cudaMLO->timer,"interParticle");
// exit(0);
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.toc(baseMLO->TIMING_ESP_THR);
#endif
}
|
ad13631b46ddeaa0d577fca1f542498c12577211.cu
|
#undef ALTCPU
#include <sys/time.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <ctime>
#include <vector>
#include <iostream>
#include "../../ml_optimiser.h"
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include "../acc_ptr.h"
#include "../acc_projector.h"
#include "../acc_backprojector.h"
#include "../acc_projector_plan.h"
#include "cuda_benchmark_utils.h"
#include "cuda_kernels/helper.cuh"
#include "cuda_kernels/diff2.cuh"
#include "cuda_kernels/wavg.cuh"
#include "cuda_mem_utils.h"
#include "cuda_fft.h"
#include "../data_types.h"
#include "../../complex.h"
#include "../../helix.h"
#include "../../error.h"
#include <fstream>
#include "../../parallel.h"
#include <signal.h>
#include <map>
#ifdef CUDA_FORCESTL
#include "cuda_utils_stl.cuh"
#else
#include "cuda_utils_cub.cuh"
#endif
#include "../utilities.h"
#include "../utilities_impl.h"
#include "../acc_ml_optimiser.h"
#include "cuda_ml_optimiser.h"
#include "../acc_helper_functions.h"
#include "../acc_ml_optimiser_impl.h"
// ------------------------------- Some explicit template instantiations
template __global__ void CudaKernels::cuda_kernel_translate2D<XFLOAT>(XFLOAT *,
XFLOAT*, int, int, int, int, int);
template __global__ void CudaKernels::cuda_kernel_translate3D<XFLOAT>(XFLOAT *,
XFLOAT *, int, int, int, int, int, int, int);
template __global__ void cuda_kernel_multi<XFLOAT>( XFLOAT *,
XFLOAT *, XFLOAT, int);
template __global__ void CudaKernels::cuda_kernel_multi<XFLOAT>( XFLOAT *,
XFLOAT, int);
template __global__ void cuda_kernel_multi<XFLOAT>( XFLOAT *, XFLOAT *,
XFLOAT *, XFLOAT, int);
// ----------------------------------------------------------------------
// High-level CUDA objects
size_t MlDeviceBundle::checkFixedSizedObjects(int shares)
{
int devCount;
size_t BoxLimit;
HANDLE_ERROR(cudaGetDeviceCount(&devCount));
if(device_id >= devCount)
CRITICAL(ERR_GPUID);
HANDLE_ERROR(cudaSetDevice(device_id));
size_t free(0), total(0);
DEBUG_HANDLE_ERROR(cudaMemGetInfo( &free, &total ));
float margin(1.05);
BoxLimit = pow(free/(margin*2.5*sizeof(XFLOAT)*((float)shares)),(1/3.0)) / ((float) baseMLO->mymodel.padding_factor);
// printf("device_id : %d \n",device_id);
// printf("free :%ld margin %f shares %d BoxLimit %ld baseMLO->mymodel.padding_factor %f \n",free,margin, shares, BoxLimit,baseMLO->mymodel.padding_factor);
//size_t BytesNeeded = ((float)shares)*margin*2.5*sizeof(XFLOAT)*pow((baseMLO->mymodel.ori_size*baseMLO->mymodel.padding_factor),3);
return(BoxLimit);
}
void MlDeviceBundle::setupFixedSizedObjects(int iter,FileName fn_out)
{
int devCount;
HANDLE_ERROR(cudaGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(cudaSetDevice(device_id));
//Can we pre-generate projector plan and corresponding euler matrices for all particles
if (baseMLO->do_skip_align || baseMLO->do_skip_rotate || baseMLO->do_auto_refine || baseMLO->mymodel.orientational_prior_mode != NOPRIOR)
generateProjectionPlanOnTheFly = true;
else
generateProjectionPlanOnTheFly = false;
unsigned nr_proj = baseMLO->mymodel.PPref.size();
unsigned nr_bproj = baseMLO->wsum_model.BPref.size();
projectors.resize(nr_proj);
backprojectors.resize(nr_bproj);
/*======================================================
PROJECTOR AND BACKPROJECTOR
======================================================*/
for (int imodel = 0; imodel < nr_proj; imodel++)
{
#ifdef COMFORGPU
projectors[imodel].setcompressMdlDim(
baseMLO->mymodel.PPref[imodel].data.xdim,
baseMLO->mymodel.PPref[imodel].data.ydim,
baseMLO->mymodel.PPref[imodel].data.zdim,
baseMLO->mymodel.PPref[imodel].data.yinit,
baseMLO->mymodel.PPref[imodel].data.zinit,
baseMLO->mymodel.PPref[imodel].r_max,
baseMLO->mymodel.PPref[imodel].padding_factor,
baseMLO->mymodel.PPref[imodel].pad_size,
baseMLO->mymodel.PPref[imodel].sumalldata,
baseMLO->mymodel.PPref[imodel].yoffsetdata);
projectors[imodel].initcompressMdl(baseMLO->mymodel.PPref[imodel].compdatareal.data,
baseMLO->mymodel.PPref[imodel].compdatareal.data);
#else
projectors[imodel].setMdlDim(
baseMLO->mymodel.PPref[imodel].data.xdim,
baseMLO->mymodel.PPref[imodel].data.ydim,
baseMLO->mymodel.PPref[imodel].data.zdim,
baseMLO->mymodel.PPref[imodel].data.yinit,
baseMLO->mymodel.PPref[imodel].data.zinit,
baseMLO->mymodel.PPref[imodel].r_max,
baseMLO->mymodel.PPref[imodel].padding_factor);
projectors[imodel].initMdl(baseMLO->mymodel.PPref[imodel].data.data);
#endif
}
for (int imodel = 0; imodel < nr_bproj; imodel++)
{
printf("====padsize=======%d===\n",baseMLO->wsum_model.BPref[imodel].pad_size);
fflush(stdout);
#ifdef COMGPU
backprojectors[imodel].setcompressMdlDim(
baseMLO->wsum_model.BPref[imodel].data.xdim,
baseMLO->wsum_model.BPref[imodel].data.ydim,
baseMLO->wsum_model.BPref[imodel].data.zdim,
baseMLO->wsum_model.BPref[imodel].data.yinit,
baseMLO->wsum_model.BPref[imodel].data.zinit,
baseMLO->wsum_model.BPref[imodel].r_max,
baseMLO->wsum_model.BPref[imodel].padding_factor,
baseMLO->wsum_model.BPref[imodel].pad_size,
baseMLO->wsum_model.BPref[imodel].sumalldata,
baseMLO->wsum_model.BPref[imodel].yoffsetdata);
backprojectors[imodel].initcompressMdl();
#else
backprojectors[imodel].setMdlDim(
baseMLO->wsum_model.BPref[imodel].data.xdim,
baseMLO->wsum_model.BPref[imodel].data.ydim,
baseMLO->wsum_model.BPref[imodel].data.zdim,
baseMLO->wsum_model.BPref[imodel].data.yinit,
baseMLO->wsum_model.BPref[imodel].data.zinit,
baseMLO->wsum_model.BPref[imodel].r_max,
baseMLO->wsum_model.BPref[imodel].padding_factor
#ifdef FILTERSLICE
,baseMLO->wsum_model.BPref[imodel].fxsize,
baseMLO->wsum_model.BPref[imodel].fysize,
baseMLO->wsum_model.BPref[imodel].fzsize,
baseMLO->wsum_model.BPref[imodel].fstartx,
baseMLO->wsum_model.BPref[imodel].fstarty,
baseMLO->wsum_model.BPref[imodel].fstartz,
baseMLO->wsum_model.BPref[imodel].fendx,
baseMLO->wsum_model.BPref[imodel].fendy,
baseMLO->wsum_model.BPref[imodel].fendz
#endif
);
backprojectors[imodel].initMdl();
#endif
/* struct timeval tv1,tv2;
struct timezone tz;
gettimeofday (&tv1, &tz);
//cudaMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),cudaMemcpyDeviceToHost);
#ifdef PINMEM
for(int i=0;i<100;i++)
cudaMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),cudaMemcpyDeviceToHost);
#else
for(int i=0;i<100;i++)
cudaMemcpy(baseMLO->wsum_model.BPref[imodel].cpuindex.data,backprojectors[imodel].d_index,backprojectors[imodel].mdlXYZ*sizeof(int),cudaMemcpyDeviceToHost);
#endif
gettimeofday (&tv2, &tz);
printf("\nsize is %d and After first allocation : %f\n",backprojectors[imodel].mdlXYZ*sizeof(int),(float)(1000 * (tv2.tv_sec-tv1.tv_sec)+ (tv2.tv_usec-tv1.tv_usec)/1000.0));
fflush(stdout);
#endif*/
}
/*======================================================
CUSTOM ALLOCATOR
======================================================*/
int memAlignmentSize;
cudaDeviceGetAttribute ( &memAlignmentSize, cudaDevAttrTextureAlignment, device_id );
allocator = new CudaCustomAllocator(0, memAlignmentSize);
}
void MlDeviceBundle::setupTunableSizedObjects(size_t allocationSize)
{
unsigned nr_models = baseMLO->mymodel.nr_classes;
int devCount;
HANDLE_ERROR(cudaGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(cudaSetDevice(device_id));
/*======================================================
CUSTOM ALLOCATOR
======================================================*/
#ifdef DEBUG_CUDA
printf("DEBUG: Total GPU allocation size set to %zu MB on device id %d.\n", allocationSize / (1000*1000), device_id);
#endif
#ifndef CUDA_NO_CUSTOM_ALLOCATION
allocator->resize(allocationSize);
#endif
/*======================================================
PROJECTION PLAN
======================================================*/
coarseProjectionPlans.resize(nr_models, allocator);
for (int iclass = 0; iclass < nr_models; iclass++)
{
//If doing predefined projector plan at all and is this class significant
if (!generateProjectionPlanOnTheFly && baseMLO->mymodel.pdf_class[iclass] > 0.)
{
std::vector<int> exp_pointer_dir_nonzeroprior;
std::vector<int> exp_pointer_psi_nonzeroprior;
std::vector<RFLOAT> exp_directions_prior;
std::vector<RFLOAT> exp_psi_prior;
long unsigned itrans_max = baseMLO->sampling.NrTranslationalSamplings() - 1;
long unsigned nr_idir = baseMLO->sampling.NrDirections(0, &exp_pointer_dir_nonzeroprior);
long unsigned nr_ipsi = baseMLO->sampling.NrPsiSamplings(0, &exp_pointer_psi_nonzeroprior );
coarseProjectionPlans[iclass].setup(
baseMLO->sampling,
exp_directions_prior,
exp_psi_prior,
exp_pointer_dir_nonzeroprior,
exp_pointer_psi_nonzeroprior,
NULL, //Mcoarse_significant
baseMLO->mymodel.pdf_class,
baseMLO->mymodel.pdf_direction,
nr_idir,
nr_ipsi,
0, //idir_min
nr_idir - 1, //idir_max
0, //ipsi_min
nr_ipsi - 1, //ipsi_max
0, //itrans_min
itrans_max,
0, //current_oversampling
1, //nr_oversampled_rot
iclass,
true, //coarse
!IS_NOT_INV,
baseMLO->do_skip_align,
baseMLO->do_skip_rotate,
baseMLO->mymodel.orientational_prior_mode
);
}
}
};
void MlOptimiserCuda::resetData()
{
int devCount;
HANDLE_ERROR(cudaGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
HANDLE_ERROR(cudaSetDevice(device_id));
unsigned nr_classes = baseMLO->mymodel.nr_classes;
classStreams.resize(nr_classes, 0);
for (int i = 0; i < nr_classes; i++)
HANDLE_ERROR(cudaStreamCreate(&classStreams[i])); //HANDLE_ERROR(cudaStreamCreateWithFlags(&classStreams[i],cudaStreamNonBlocking));
transformer1.clear();
transformer2.clear();
};
void MlOptimiserCuda::doThreadExpectationSomeParticles(int thread_id)
{
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.tic(baseMLO->TIMING_ESP_THR);
#endif
// CTOC(cudaMLO->timer,"interParticle");
int devCount;
HANDLE_ERROR(cudaGetDeviceCount(&devCount));
if(device_id >= devCount)
{
//std::cerr << " using device_id=" << device_id << " (device no. " << device_id+1 << ") which is higher than the available number of devices=" << devCount << std::endl;
CRITICAL(ERR_GPUID);
}
else
DEBUG_HANDLE_ERROR(cudaSetDevice(device_id));
//std::cerr << " calling on device " << device_id << std::endl;
//put mweight allocation here
size_t first_ipart = 0, last_ipart = 0;
while (baseMLO->exp_ipart_ThreadTaskDistributor->getTasks(first_ipart, last_ipart))
{
//CTIC(timer,"oneTask");
for (long unsigned ipart = first_ipart; ipart <= last_ipart; ipart++)
{
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.tic(baseMLO->TIMING_ESP_DIFF2_A);
#endif
unsigned my_ori_particle = baseMLO->exp_my_first_ori_particle + ipart;
AccPtrFactory ptrFactory(allocator, cudaStreamPerThread);
accDoExpectationOneParticle<MlOptimiserCuda>(this, my_ori_particle, thread_id, ptrFactory);
}
//CTOC(timer,"oneTask");
}
// CTIC(cudaMLO->timer,"interParticle");
// exit(0);
#ifdef TIMING
// Only time one thread
if (thread_id == 0)
baseMLO->timer.toc(baseMLO->TIMING_ESP_THR);
#endif
}
|
8aaef2e0e853dbd33d0f4039f3080d502e3e49dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rmi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 64;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y / (threads_per_block * cells_per_word);
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(hipMemcpy(domain_gpu[0], domain_cpu, domain_size, hipMemcpyHostToDevice));
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Kernel execution
int shared_mem_size = (domain_x / blocks_x + 2) * (domain_y / blocks_y + 2) * sizeof(int);
for(int i = 0; i < steps; i++) {
hipLaunchKernelGGL(( life_kernel), dim3(grid), dim3(threads), shared_mem_size , 0, domain_gpu[i%2], domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(hipMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(domain_gpu[0]));
CUDA_SAFE_CALL(hipFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
|
8aaef2e0e853dbd33d0f4039f3080d502e3e49dc.cu
|
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rémi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 64;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y / (threads_per_block * cells_per_word);
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(cudaMemcpy(domain_gpu[0], domain_cpu, domain_size, cudaMemcpyHostToDevice));
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Kernel execution
int shared_mem_size = (domain_x / blocks_x + 2) * (domain_y / blocks_y + 2) * sizeof(int);
for(int i = 0; i < steps; i++) {
life_kernel<<< grid, threads, shared_mem_size >>>(domain_gpu[i%2], domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(cudaMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(domain_gpu[0]));
CUDA_SAFE_CALL(cudaFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
|
f8c7868a5be44ab76095a6282613bd7b729186f7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/hip/cub.h>
#include <ATen/native/hip/Randperm.cuh>
#include <limits>
namespace at {
namespace native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = ::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = ::min(64,
static_cast<int>(::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int, dtype>(
keys.data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int64_t, dtype>(
keys.data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
}} // namespace at::native
|
f8c7868a5be44ab76095a6282613bd7b729186f7.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/cuda/cub.h>
#include <ATen/native/cuda/Randperm.cuh>
#include <limits>
namespace at {
namespace native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = std::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = std::min(64,
static_cast<int>(std::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int, dtype>(
keys.data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int64_t, dtype>(
keys.data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
}} // namespace at::native
|
da7349218d4c94057496a5bbdb1437a481e4c741.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
if(matches[src[i]] == -1){//check inside the src to be sure
if(matches[dst[i]] == -1){
//now we can establish than edge should be ketp
keepEdges[i] = 1;
}else{
keepEdges[i] = 0;
}
}else{
keepEdges[i] = 0;
}
}
}
|
da7349218d4c94057496a5bbdb1437a481e4c741.cu
|
/*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
if(matches[src[i]] == -1){//check inside the src to be sure
if(matches[dst[i]] == -1){
//now we can establish than edge should be ketp
keepEdges[i] = 1;
}else{
keepEdges[i] = 0;
}
}else{
keepEdges[i] = 0;
}
}
}
|
2d7f8bc44f2b99eacf086b3b4a04883da323410c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
DODAC!
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64"
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
__global__ void
vectorAddAZPY(const float *A, const float *B, float *C, int numElements, float alpham1x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + alpham1x*B[i];
}
}
__global__ void
vectorAddSCAL(float *A, float x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = A[i]*x;
}
}
__global__ void
vectorAddCOPY(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorAddPoziom2(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorSparseMatrix(int N, int *rowptr, int *colind, float *values, float *x, float *y) {
/*for (int i = 0; i < N; i++) { // 1 watek - 1 wiersz
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i + 1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}*/
//to moze byc nazwane blocksPerGrid, threadsPerBlock ale raczej nie (porwnaj z vectoradd.cu
// i wskazuje na to ktry wtek aktualnie pracuje - ktry rzd macierzy jest liczony
// reszta jak w instrukcji
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i+1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
// sparse matrix vector product: d_Ax = A * d_x
hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
//vectorSparseMatrix <<<blocksPerGrid, threadsPerBlock >>> (N, d_row, d_col, d_val, d_x, d_Ax);
//azpy: d_r = d_r + alpham1 * d_Ax
//hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
int threadsPerBlock = 256;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAddAZPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, d_r, N, alpham1);
//err = hipGetLastError();
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAddSCAL), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, b, N);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAddAZPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, d_r, d_p, N, alpha);
}
else
{
//cpy: d_p = d_r
//cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
//threadsPerBlock = 256;
//blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vectorAddCOPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_p, N);
}
//sparse matrix-vector product: d_Ax = A * d_p
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
//vectorSparseMatrix <<<blocksPerGrid, threadsPerBlock >>> (N, d_row, d_col, d_val, d_p, d_Ax);
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAddAZPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, d_p, d_x, N, a);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAddAZPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, d_r, N, na);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
//cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
|
2d7f8bc44f2b99eacf086b3b4a04883da323410c.cu
|
/*
DODAC!
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64"
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
__global__ void
vectorAddAZPY(const float *A, const float *B, float *C, int numElements, float alpham1x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + alpham1x*B[i];
}
}
__global__ void
vectorAddSCAL(float *A, float x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = A[i]*x;
}
}
__global__ void
vectorAddCOPY(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorAddPoziom2(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorSparseMatrix(int N, int *rowptr, int *colind, float *values, float *x, float *y) {
/*for (int i = 0; i < N; i++) { // 1 watek - 1 wiersz
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i + 1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}*/
//to moze byc nazwane blocksPerGrid, threadsPerBlock ale raczej nie (porównaj z vectoradd.cu
// i wskazuje na to który wątek aktualnie pracuje - który rząd macierzy jest liczony
// reszta jak w instrukcji
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i+1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
// sparse matrix vector product: d_Ax = A * d_x
cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
//vectorSparseMatrix <<<blocksPerGrid, threadsPerBlock >>> (N, d_row, d_col, d_val, d_x, d_Ax);
//azpy: d_r = d_r + alpham1 * d_Ax
//cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
int threadsPerBlock = 256;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAddAZPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, d_r, N, alpham1);
//err = cudaGetLastError();
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAddSCAL<<<blocksPerGrid, threadsPerBlock>>>(d_p, b, N);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAddAZPY<<<blocksPerGrid, threadsPerBlock>>>(d_p, d_r, d_p, N, alpha);
}
else
{
//cpy: d_p = d_r
//cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
//threadsPerBlock = 256;
//blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
vectorAddCOPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_p, N);
}
//sparse matrix-vector product: d_Ax = A * d_p
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
//vectorSparseMatrix <<<blocksPerGrid, threadsPerBlock >>> (N, d_row, d_col, d_val, d_p, d_Ax);
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAddAZPY<<<blocksPerGrid, threadsPerBlock>>>(d_x, d_p, d_x, N, a);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAddAZPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, d_r, N, na);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
//cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
|
4345c30daba086c8abd8436fedd467f12aec9b11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
/**
* @author Brij Mohan Lal Srivastava
*/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
static const char DELIMITER = ' ';
typedef struct node node;
struct node {
int nodenum;
int fidparts[68][2];
int pose;
int nfid;
};
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/**
* CUDA kernel function that calculates minsum of nodes
*/
__global__ void sum(int * nodeData, float * sum, int * combs, int * pose, int * filters, int * cparts, int nodeCount, int combsCount, int dataCount) {
int combIdx = blockIdx.x *blockDim.x + threadIdx.x;
//printf("Thread no. : %d\n", combIdx);
if (combIdx < combsCount * 2 - 2) {
//printf("pass 1\n");
int node1Idx = combs[combIdx * 2];
int node2Idx = combs[combIdx * 2 + 1];
printf("Node indexes %d, %d ... \n", node1Idx, node2Idx);
int node1startIdx = node1Idx * dataCount;
int node2startIdx = node2Idx * dataCount;
int node1pose = pose[node1Idx];
int node2pose = pose[node2Idx];
//printf("pass2\n");
if(abs(node1pose - node2pose) > 3) {
//printf("pass3\n");
sum[combIdx] = -1;
}
else
{
//printf("pass4\n");
int i, j, k;
int node1data[68][2], node2data[68][2], node1fdata[99][2], node2fdata[99][2];
int cnt = 0, start = node1startIdx, end = node1startIdx + dataCount;
for (i = start; i < end; i+=2) {
node1data[cnt][0] = nodeData[i];
node1data[cnt][1] = nodeData[i + 1];
cnt++;
}
cnt = 0; start = node2startIdx; end = node2startIdx + dataCount;
for (i = start; i < end; i+=2) {
node2data[cnt][0] = nodeData[i];
node2data[cnt][1] = nodeData[i + 1];
cnt++;
}
int node1posedata[68], node2posedata[68];
cnt = 0; start = node1pose * 68; end = node1pose * 68 + 68;
for (i = start; i < end; i++) {
node1posedata[cnt] = filters[i];
cnt++;
}
cnt = 0; start = node2pose * 68; end = node2pose * 68 + 68;
for (i = start; i < end; i++) {
node2posedata[cnt] = filters[i];
cnt++;
}
// Re-organise node data
for (i = 0; i < 68; i++) {
if (node1posedata[i] != -1) {
node1fdata[node1posedata[i]][0] = node1data[i][0];
node1fdata[node1posedata[i]][1] = node1data[i][1];
}
}
for (i = 0; i < 68; i++) {
if (node2posedata[i] != -1) {
node2fdata[node2posedata[i]][0] = node2data[i][0];
node2fdata[node2posedata[i]][1] = node2data[i][1];
}
}
// Match and calculate sum
int pose1, pose2;
if(node1pose < node2pose) {
pose1 = node1pose;
pose2 = node2pose;
}
else
{
pose1 = node2pose;
pose2 = node1pose;
}
int cpIdx;
if (pose1 < 11) {
cpIdx = ((4 * (pose1 - 1)) + (pose2 - pose1)) * 68;
}
else
{
if (pose1 == 11) {
cpIdx = 68 * (40 + pose2 - pose1);
}
else if (pose1 == 12) {
cpIdx = 68 * (43 + pose2 - pose1);
}
else
{
cpIdx = 68 * 45;
}
}
int ncparts = 0;
while(cparts[cpIdx] != -1 && ncparts < 68) {
ncparts++;
}
int commonp[68];
int ncpIdx = 0;
for (i = cpIdx; i < cpIdx + 68; i++) {
commonp[ncpIdx] = cparts[i];
ncpIdx++;
}
float min = FLT_MAX;
float csum;
// i, j for local area survey
for (i = -4; i < 5; i++) {
for (j = -4; j < 5; j++) {
csum = 0.0;
// k for matching only common parts
for (k = 0; k < ncparts; k++) {
int x1 = node1fdata[commonp[k]][0] + i;
int x2 = node2fdata[commonp[k]][0];
int y1 = node1fdata[k][1] + j;
int y2 = node2fdata[k][1];
csum += ((x1 - x2) * (x1 - x2)) + ((y1 - y2) * (y1 - y2));
}
csum = sqrtf(csum) / ncparts;
min = (csum < min) ? csum : min;
}
}
sum[combIdx] = min;
}
}
}
/**
* Util function to split up the string into tokens
*/
char** str_split(char* a_str, const char a_delim) {
char** result = 0;
size_t count = 0;
char* tmp = a_str;
char* last_comma = 0;
char delim[2];
delim[0] = a_delim;
delim[1] = 0;
/* Count how many elements will be extracted. */
while (*tmp) {
if (a_delim == *tmp) {
count++;
last_comma = tmp;
}
tmp++;
}
/* Add space for trailing token. */
count += last_comma < (a_str + strlen(a_str) - 1);
/* Add space for terminating null string so caller
knows where the list of returned strings ends. */
count++;
if(result) {
free(result);
}
result = (char **) malloc(sizeof(char *) * count);
if (result) {
size_t idx = 0;
char* token = strtok(a_str, delim);
while (token) {
*(result + idx++) = strdup(token);
token = strtok(0, delim);
}
*(result + idx) = 0;
}
return result;
}
/**
* Util to calculate nCr combinations
*/
int nCr(int n, int r) {
if(r > n / 2) r = n - r; // because C(n, r) == C(n, n - r)
long long ans = 1;
int i;
for(i = 1; i <= r; i++) {
ans *= n - r + i;
ans /= i;
}
return ans;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(int argc, char **argv) {
for (int i = 0; i < argc; ++i)
{
printf("argv[%d]: %s\n", i, argv[i]);
}
int i, j;
char line[80];
char ** tokens;
FILE * fr;
FILE * fFilters;
FILE * fCommon;
int NODE_COUNT;
node ** nodes;
int * pose;
bool meta = true;
bool first = true;
int dataidx;
int nodeidx = 0;
int x, y, w, h;
printf("Started ... \n");
printf("Reading bounding boxes ... \n");
// Read input
fr = fopen("/home/brij/Downloads/bounding_boxes.txt", "rt");
while (fgets(line, 80, fr) != NULL) {
//printf("line = %s\n", line);
if (first) {
sscanf(line, "%d", &NODE_COUNT);
//printf("1 : %d\n", NODE_COUNT);
nodes = (node **) malloc(sizeof(node *) * NODE_COUNT);
pose = (int *) malloc(sizeof(int) * NODE_COUNT);
first = false;
} else if (!first && meta) {
//printf("2\n");
nodes[nodeidx] = (node *) malloc(sizeof(node));
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 0), "%d", &(nodes[nodeidx]->nodenum));
sscanf(*(tokens + 1), "%d", &(nodes[nodeidx]->pose));
sscanf(*(tokens + 2), "%d", &(nodes[nodeidx]->nfid));
pose[nodeidx] = nodes[nodeidx]->pose;
for (i = 0; *(tokens + i); i++)
{
//printf("month=[%s]\n", *(tokens + i));
free(*(tokens + i));
}
free(tokens);
//printf("%d, %d, %d\n", nodes[nodeidx]->nodenum, nodes[nodeidx]->pose, nodes[nodeidx]->nfid);
dataidx = 0;
//nodes[nodeidx]->fidparts = (int **)malloc(sizeof(int *) * 68 * 2);
memset(nodes[nodeidx]->fidparts, 0, sizeof(nodes[nodeidx]->fidparts[0][0]) * 68 * 2);
}
meta = false;
} else {
//printf("3\n");
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
//printf("Printing tokens...\n");
sscanf(*(tokens + 0), "%d", &x);
sscanf(*(tokens + 1), "%d", &y);
sscanf(*(tokens + 2), "%d", &w);
sscanf(*(tokens + 3), "%d", &h);
for (i = 0; *(tokens + i); i++)
{
//printf("month=[%s]\n", *(tokens + i));
free(*(tokens + i));
}
free(tokens);
//printf("%d, %d, %d, %d\n", x, y, w, h);
}
//printf("4\n");
//nodes[nodeidx]->fidparts[dataidx] = (int *) malloc(sizeof(int) * 2);
nodes[nodeidx]->fidparts[dataidx][0] = x + w / 2;
nodes[nodeidx]->fidparts[dataidx][1] = y + h / 2;
dataidx++;
//printf("data idx : %d\n", dataidx);
if (dataidx == nodes[nodeidx]->nfid) {
meta = true;
nodeidx++;
}
}
}
printf("Reading filter ids ... \n");
fFilters = fopen("/home/brij/Downloads/filter_ids.txt", "rt");
int * filter = (int *) malloc(sizeof(int) * 68 * 13);
int filIdx = 0;
meta = true;
int dataCnt = 0, filpoints = 0, tofill = 0, temp;
while (fgets(line, 80, fFilters) != NULL) {
if (meta) {
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 1), "%d", &dataCnt);
filpoints = dataCnt;
for (i = 0; *(tokens + i); i++)
{
free(*(tokens + i));
}
free(tokens);
}
meta = false;
}
else
{
sscanf(line, "%d", &temp);
filter[filIdx] = temp - 1; // To account for 1-indexing in matlab (Thanks to mallik)
dataCnt--;
filIdx++;
if (dataCnt == 0) {
meta = true;
if (filpoints < 68) {
tofill = 68 - filpoints;
for (i = 0; i < tofill; i++) {
filter[filIdx] = -1;
filIdx++;
}
}
}
}
}
fclose(fFilters);
printf("Reading common parts ... \n");
fCommon = fopen("/home/brij/Downloads/common_parts.txt", "rt");
int * cparts = (int *) malloc(sizeof(int) * 46 * 68);
meta = true; filIdx = 0;
while (fgets(line, 80, fCommon) != NULL) {
if (meta) {
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 2), "%d", &dataCnt);
filpoints = dataCnt;
for (i = 0; *(tokens + i); i++)
{
free(*(tokens + i));
}
free(tokens);
}
meta = false;
}
else
{
sscanf(line, "%d", &temp);
cparts[filIdx] = temp - 1; // To account for 1-indexing in matlab (Thanks to mallik)
dataCnt--;
filIdx++;
if (dataCnt == 0) {
meta = true;
if (filpoints < 68) {
tofill = 68 - filpoints;
for (i = 0; i < tofill; i++) {
cparts[filIdx] = -1;
filIdx++;
}
}
}
}
}
fclose(fCommon);
//for (i = 0; i < 68*13; i++) {
//printf("fil : %d\n", filter[i]);
//}
int combCount = nCr(NODE_COUNT, 2);
int * combs = (int *) malloc(sizeof(int) * combCount * 2);
int combIdx = 0;
for (i = 0; i < NODE_COUNT - 1; i++) {
for (j = i + 1; j < NODE_COUNT; j++) {
combs[combIdx] = i;
combs[combIdx + 1] = j;
combIdx += 2;
}
}
//printf("combs = %d, last comb index = %d\n", combCount, combIdx);
/*
for (i = 0; i < combCount * 2; i+=2) {
printf("%d, %d\n", combs[i], combs[i + 1]);
}
*/
printf("Nodes = %d\n", NODE_COUNT);
// Flatten 3-d array
int arrSize = sizeof(int) * NODE_COUNT * 68 * 2;
int * nodeData = (int *) malloc(arrSize);
for (i = 0; i < NODE_COUNT; i++) {
for (j = 0; j < 68; j++) {
nodeData[(i * 68 * 2) + (j * 2) + 0] = nodes[i]->fidparts[j][0];
nodeData[(i * 68 * 2) + (j * 2) + 1] = nodes[i]->fidparts[j][1];
}
}
printf("Loading data into GPU ... \n");
// Nodes size
int * d_nodeData;
int * d_combs;
float * h_sums;
float * d_sums;
int * d_pose;
int * d_filters;
int * d_cparts;
h_sums = (float *) malloc(sizeof(float) * combCount);
CUDA_CHECK_RETURN(hipMalloc(&d_nodeData, arrSize));
CUDA_CHECK_RETURN(hipMalloc(&d_sums, sizeof(float) * combCount));
CUDA_CHECK_RETURN(hipMalloc(&d_combs, sizeof(int) * combCount * 2));
CUDA_CHECK_RETURN(hipMalloc(&d_pose, sizeof(int) * NODE_COUNT));
CUDA_CHECK_RETURN(hipMalloc(&d_filters, sizeof(int) * 68 * 13));
CUDA_CHECK_RETURN(hipMalloc(&d_cparts, sizeof(int) * 68 * 46));
CUDA_CHECK_RETURN(hipMemcpy(d_nodeData, nodeData, arrSize, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_combs, combs, sizeof(int) * combCount * 2, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_pose, pose, sizeof(int) * NODE_COUNT, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_filters, filter, sizeof(int) * 68 * 13, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_cparts, cparts, sizeof(int) * 68 * 46, hipMemcpyHostToDevice));
int gridSize, threads;
printf("Combination count = %d \n", combCount);
if (combCount < 1000) {
gridSize = 1;
threads = combCount;
}
else
{
gridSize = (combCount % 1000 == 0) ? combCount / 1000 : combCount / 1000 + 1;
threads = 1000;
}
printf("Launching kernel gridsize = %d, threads = %d... \n", gridSize, threads);
hipLaunchKernelGGL(( sum), dim3(gridSize), dim3(threads), 0, 0, d_nodeData, d_sums, d_combs, d_pose, d_filters, d_cparts, NODE_COUNT, combCount, 68 * 2);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy(h_sums, d_sums, sizeof(float) * combCount, hipMemcpyDeviceToHost));
printf("Printing result ... \n");
for (i = 0; i < combCount; i++) {
printf("Sum %d = %f\n", i, h_sums[i]);
}
CUDA_CHECK_RETURN(hipFree((void* ) d_nodeData));
CUDA_CHECK_RETURN(hipFree((void* ) d_combs));
CUDA_CHECK_RETURN(hipFree((void* ) d_sums));
CUDA_CHECK_RETURN(hipFree((void* ) d_pose));
CUDA_CHECK_RETURN(hipFree((void* ) d_filters));
CUDA_CHECK_RETURN(hipFree((void* ) d_cparts));
CUDA_CHECK_RETURN(hipDeviceReset());
fclose(fr);
return 0;
}
|
4345c30daba086c8abd8436fedd467f12aec9b11.cu
|
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
/**
* @author Brij Mohan Lal Srivastava
*/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
static const char DELIMITER = ' ';
typedef struct node node;
struct node {
int nodenum;
int fidparts[68][2];
int pose;
int nfid;
};
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/**
* CUDA kernel function that calculates minsum of nodes
*/
__global__ void sum(int * nodeData, float * sum, int * combs, int * pose, int * filters, int * cparts, int nodeCount, int combsCount, int dataCount) {
int combIdx = blockIdx.x *blockDim.x + threadIdx.x;
//printf("Thread no. : %d\n", combIdx);
if (combIdx < combsCount * 2 - 2) {
//printf("pass 1\n");
int node1Idx = combs[combIdx * 2];
int node2Idx = combs[combIdx * 2 + 1];
printf("Node indexes %d, %d ... \n", node1Idx, node2Idx);
int node1startIdx = node1Idx * dataCount;
int node2startIdx = node2Idx * dataCount;
int node1pose = pose[node1Idx];
int node2pose = pose[node2Idx];
//printf("pass2\n");
if(abs(node1pose - node2pose) > 3) {
//printf("pass3\n");
sum[combIdx] = -1;
}
else
{
//printf("pass4\n");
int i, j, k;
int node1data[68][2], node2data[68][2], node1fdata[99][2], node2fdata[99][2];
int cnt = 0, start = node1startIdx, end = node1startIdx + dataCount;
for (i = start; i < end; i+=2) {
node1data[cnt][0] = nodeData[i];
node1data[cnt][1] = nodeData[i + 1];
cnt++;
}
cnt = 0; start = node2startIdx; end = node2startIdx + dataCount;
for (i = start; i < end; i+=2) {
node2data[cnt][0] = nodeData[i];
node2data[cnt][1] = nodeData[i + 1];
cnt++;
}
int node1posedata[68], node2posedata[68];
cnt = 0; start = node1pose * 68; end = node1pose * 68 + 68;
for (i = start; i < end; i++) {
node1posedata[cnt] = filters[i];
cnt++;
}
cnt = 0; start = node2pose * 68; end = node2pose * 68 + 68;
for (i = start; i < end; i++) {
node2posedata[cnt] = filters[i];
cnt++;
}
// Re-organise node data
for (i = 0; i < 68; i++) {
if (node1posedata[i] != -1) {
node1fdata[node1posedata[i]][0] = node1data[i][0];
node1fdata[node1posedata[i]][1] = node1data[i][1];
}
}
for (i = 0; i < 68; i++) {
if (node2posedata[i] != -1) {
node2fdata[node2posedata[i]][0] = node2data[i][0];
node2fdata[node2posedata[i]][1] = node2data[i][1];
}
}
// Match and calculate sum
int pose1, pose2;
if(node1pose < node2pose) {
pose1 = node1pose;
pose2 = node2pose;
}
else
{
pose1 = node2pose;
pose2 = node1pose;
}
int cpIdx;
if (pose1 < 11) {
cpIdx = ((4 * (pose1 - 1)) + (pose2 - pose1)) * 68;
}
else
{
if (pose1 == 11) {
cpIdx = 68 * (40 + pose2 - pose1);
}
else if (pose1 == 12) {
cpIdx = 68 * (43 + pose2 - pose1);
}
else
{
cpIdx = 68 * 45;
}
}
int ncparts = 0;
while(cparts[cpIdx] != -1 && ncparts < 68) {
ncparts++;
}
int commonp[68];
int ncpIdx = 0;
for (i = cpIdx; i < cpIdx + 68; i++) {
commonp[ncpIdx] = cparts[i];
ncpIdx++;
}
float min = FLT_MAX;
float csum;
// i, j for local area survey
for (i = -4; i < 5; i++) {
for (j = -4; j < 5; j++) {
csum = 0.0;
// k for matching only common parts
for (k = 0; k < ncparts; k++) {
int x1 = node1fdata[commonp[k]][0] + i;
int x2 = node2fdata[commonp[k]][0];
int y1 = node1fdata[k][1] + j;
int y2 = node2fdata[k][1];
csum += ((x1 - x2) * (x1 - x2)) + ((y1 - y2) * (y1 - y2));
}
csum = sqrtf(csum) / ncparts;
min = (csum < min) ? csum : min;
}
}
sum[combIdx] = min;
}
}
}
/**
* Util function to split up the string into tokens
*/
char** str_split(char* a_str, const char a_delim) {
char** result = 0;
size_t count = 0;
char* tmp = a_str;
char* last_comma = 0;
char delim[2];
delim[0] = a_delim;
delim[1] = 0;
/* Count how many elements will be extracted. */
while (*tmp) {
if (a_delim == *tmp) {
count++;
last_comma = tmp;
}
tmp++;
}
/* Add space for trailing token. */
count += last_comma < (a_str + strlen(a_str) - 1);
/* Add space for terminating null string so caller
knows where the list of returned strings ends. */
count++;
if(result) {
free(result);
}
result = (char **) malloc(sizeof(char *) * count);
if (result) {
size_t idx = 0;
char* token = strtok(a_str, delim);
while (token) {
*(result + idx++) = strdup(token);
token = strtok(0, delim);
}
*(result + idx) = 0;
}
return result;
}
/**
* Util to calculate nCr combinations
*/
int nCr(int n, int r) {
if(r > n / 2) r = n - r; // because C(n, r) == C(n, n - r)
long long ans = 1;
int i;
for(i = 1; i <= r; i++) {
ans *= n - r + i;
ans /= i;
}
return ans;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(int argc, char **argv) {
for (int i = 0; i < argc; ++i)
{
printf("argv[%d]: %s\n", i, argv[i]);
}
int i, j;
char line[80];
char ** tokens;
FILE * fr;
FILE * fFilters;
FILE * fCommon;
int NODE_COUNT;
node ** nodes;
int * pose;
bool meta = true;
bool first = true;
int dataidx;
int nodeidx = 0;
int x, y, w, h;
printf("Started ... \n");
printf("Reading bounding boxes ... \n");
// Read input
fr = fopen("/home/brij/Downloads/bounding_boxes.txt", "rt");
while (fgets(line, 80, fr) != NULL) {
//printf("line = %s\n", line);
if (first) {
sscanf(line, "%d", &NODE_COUNT);
//printf("1 : %d\n", NODE_COUNT);
nodes = (node **) malloc(sizeof(node *) * NODE_COUNT);
pose = (int *) malloc(sizeof(int) * NODE_COUNT);
first = false;
} else if (!first && meta) {
//printf("2\n");
nodes[nodeidx] = (node *) malloc(sizeof(node));
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 0), "%d", &(nodes[nodeidx]->nodenum));
sscanf(*(tokens + 1), "%d", &(nodes[nodeidx]->pose));
sscanf(*(tokens + 2), "%d", &(nodes[nodeidx]->nfid));
pose[nodeidx] = nodes[nodeidx]->pose;
for (i = 0; *(tokens + i); i++)
{
//printf("month=[%s]\n", *(tokens + i));
free(*(tokens + i));
}
free(tokens);
//printf("%d, %d, %d\n", nodes[nodeidx]->nodenum, nodes[nodeidx]->pose, nodes[nodeidx]->nfid);
dataidx = 0;
//nodes[nodeidx]->fidparts = (int **)malloc(sizeof(int *) * 68 * 2);
memset(nodes[nodeidx]->fidparts, 0, sizeof(nodes[nodeidx]->fidparts[0][0]) * 68 * 2);
}
meta = false;
} else {
//printf("3\n");
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
//printf("Printing tokens...\n");
sscanf(*(tokens + 0), "%d", &x);
sscanf(*(tokens + 1), "%d", &y);
sscanf(*(tokens + 2), "%d", &w);
sscanf(*(tokens + 3), "%d", &h);
for (i = 0; *(tokens + i); i++)
{
//printf("month=[%s]\n", *(tokens + i));
free(*(tokens + i));
}
free(tokens);
//printf("%d, %d, %d, %d\n", x, y, w, h);
}
//printf("4\n");
//nodes[nodeidx]->fidparts[dataidx] = (int *) malloc(sizeof(int) * 2);
nodes[nodeidx]->fidparts[dataidx][0] = x + w / 2;
nodes[nodeidx]->fidparts[dataidx][1] = y + h / 2;
dataidx++;
//printf("data idx : %d\n", dataidx);
if (dataidx == nodes[nodeidx]->nfid) {
meta = true;
nodeidx++;
}
}
}
printf("Reading filter ids ... \n");
fFilters = fopen("/home/brij/Downloads/filter_ids.txt", "rt");
int * filter = (int *) malloc(sizeof(int) * 68 * 13);
int filIdx = 0;
meta = true;
int dataCnt = 0, filpoints = 0, tofill = 0, temp;
while (fgets(line, 80, fFilters) != NULL) {
if (meta) {
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 1), "%d", &dataCnt);
filpoints = dataCnt;
for (i = 0; *(tokens + i); i++)
{
free(*(tokens + i));
}
free(tokens);
}
meta = false;
}
else
{
sscanf(line, "%d", &temp);
filter[filIdx] = temp - 1; // To account for 1-indexing in matlab (Thanks to mallik)
dataCnt--;
filIdx++;
if (dataCnt == 0) {
meta = true;
if (filpoints < 68) {
tofill = 68 - filpoints;
for (i = 0; i < tofill; i++) {
filter[filIdx] = -1;
filIdx++;
}
}
}
}
}
fclose(fFilters);
printf("Reading common parts ... \n");
fCommon = fopen("/home/brij/Downloads/common_parts.txt", "rt");
int * cparts = (int *) malloc(sizeof(int) * 46 * 68);
meta = true; filIdx = 0;
while (fgets(line, 80, fCommon) != NULL) {
if (meta) {
strtok(line, "\n");
tokens = str_split(line, DELIMITER);
if (tokens) {
sscanf(*(tokens + 2), "%d", &dataCnt);
filpoints = dataCnt;
for (i = 0; *(tokens + i); i++)
{
free(*(tokens + i));
}
free(tokens);
}
meta = false;
}
else
{
sscanf(line, "%d", &temp);
cparts[filIdx] = temp - 1; // To account for 1-indexing in matlab (Thanks to mallik)
dataCnt--;
filIdx++;
if (dataCnt == 0) {
meta = true;
if (filpoints < 68) {
tofill = 68 - filpoints;
for (i = 0; i < tofill; i++) {
cparts[filIdx] = -1;
filIdx++;
}
}
}
}
}
fclose(fCommon);
//for (i = 0; i < 68*13; i++) {
//printf("fil : %d\n", filter[i]);
//}
int combCount = nCr(NODE_COUNT, 2);
int * combs = (int *) malloc(sizeof(int) * combCount * 2);
int combIdx = 0;
for (i = 0; i < NODE_COUNT - 1; i++) {
for (j = i + 1; j < NODE_COUNT; j++) {
combs[combIdx] = i;
combs[combIdx + 1] = j;
combIdx += 2;
}
}
//printf("combs = %d, last comb index = %d\n", combCount, combIdx);
/*
for (i = 0; i < combCount * 2; i+=2) {
printf("%d, %d\n", combs[i], combs[i + 1]);
}
*/
printf("Nodes = %d\n", NODE_COUNT);
// Flatten 3-d array
int arrSize = sizeof(int) * NODE_COUNT * 68 * 2;
int * nodeData = (int *) malloc(arrSize);
for (i = 0; i < NODE_COUNT; i++) {
for (j = 0; j < 68; j++) {
nodeData[(i * 68 * 2) + (j * 2) + 0] = nodes[i]->fidparts[j][0];
nodeData[(i * 68 * 2) + (j * 2) + 1] = nodes[i]->fidparts[j][1];
}
}
printf("Loading data into GPU ... \n");
// Nodes size
int * d_nodeData;
int * d_combs;
float * h_sums;
float * d_sums;
int * d_pose;
int * d_filters;
int * d_cparts;
h_sums = (float *) malloc(sizeof(float) * combCount);
CUDA_CHECK_RETURN(cudaMalloc(&d_nodeData, arrSize));
CUDA_CHECK_RETURN(cudaMalloc(&d_sums, sizeof(float) * combCount));
CUDA_CHECK_RETURN(cudaMalloc(&d_combs, sizeof(int) * combCount * 2));
CUDA_CHECK_RETURN(cudaMalloc(&d_pose, sizeof(int) * NODE_COUNT));
CUDA_CHECK_RETURN(cudaMalloc(&d_filters, sizeof(int) * 68 * 13));
CUDA_CHECK_RETURN(cudaMalloc(&d_cparts, sizeof(int) * 68 * 46));
CUDA_CHECK_RETURN(cudaMemcpy(d_nodeData, nodeData, arrSize, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_combs, combs, sizeof(int) * combCount * 2, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_pose, pose, sizeof(int) * NODE_COUNT, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_filters, filter, sizeof(int) * 68 * 13, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_cparts, cparts, sizeof(int) * 68 * 46, cudaMemcpyHostToDevice));
int gridSize, threads;
printf("Combination count = %d \n", combCount);
if (combCount < 1000) {
gridSize = 1;
threads = combCount;
}
else
{
gridSize = (combCount % 1000 == 0) ? combCount / 1000 : combCount / 1000 + 1;
threads = 1000;
}
printf("Launching kernel gridsize = %d, threads = %d... \n", gridSize, threads);
sum<<<gridSize, threads>>> (d_nodeData, d_sums, d_combs, d_pose, d_filters, d_cparts, NODE_COUNT, combCount, 68 * 2);
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(h_sums, d_sums, sizeof(float) * combCount, cudaMemcpyDeviceToHost));
printf("Printing result ... \n");
for (i = 0; i < combCount; i++) {
printf("Sum %d = %f\n", i, h_sums[i]);
}
CUDA_CHECK_RETURN(cudaFree((void* ) d_nodeData));
CUDA_CHECK_RETURN(cudaFree((void* ) d_combs));
CUDA_CHECK_RETURN(cudaFree((void* ) d_sums));
CUDA_CHECK_RETURN(cudaFree((void* ) d_pose));
CUDA_CHECK_RETURN(cudaFree((void* ) d_filters));
CUDA_CHECK_RETURN(cudaFree((void* ) d_cparts));
CUDA_CHECK_RETURN(cudaDeviceReset());
fclose(fr);
return 0;
}
|
e73d9d2b34d1f1cd9486dfb75f1817150524ac5d.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass: checka um retorno do tipo "ponteiro pra funo"
//--blockDim=1024 --gridDim=1 --no-inline
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2//1024
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
__device__ funcType grabFunction(int i) {
__requires(i != 0);
//__ensures(__return_val_funptr(funcType) == divideByTwo);
if (i == 0)
return multiplyByTwo;
else
return divideByTwo;
}
__global__ void foo(float *v, unsigned int size, int i)
{
__requires(i != 0);
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f = grabFunction(i);
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
v[threadIdx.x] = x;
}
}
|
e73d9d2b34d1f1cd9486dfb75f1817150524ac5d.cu
|
//pass: checka um retorno do tipo "ponteiro pra função"
//--blockDim=1024 --gridDim=1 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//1024
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
__device__ funcType grabFunction(int i) {
__requires(i != 0);
//__ensures(__return_val_funptr(funcType) == divideByTwo);
if (i == 0)
return multiplyByTwo;
else
return divideByTwo;
}
__global__ void foo(float *v, unsigned int size, int i)
{
__requires(i != 0);
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f = grabFunction(i);
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
v[threadIdx.x] = x;
}
}
|
8f3af7a3f036a00ca727805b22940fa47a67322f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS 256
__global__ void select_matches(
unsigned* idx_ptr,
int* dist_ptr,
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[sid] = max_dist;
if (f < nfeat) {
for (unsigned i = threadIdx.y; i < nelem; i += blockDim.y) {
int dist = in_dist[f * nelem + i];
// Copy all best matches previously found in nearest_neighbour() to
// shared memory
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = in_idx[f * nelem + i];
}
}
}
__syncthreads();
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
// Store best matches and indexes to training dataset
if (threadIdx.y == 0 && f < nfeat) {
dist_ptr[f] = s_dist[threadIdx.x * blockDim.y];
idx_ptr[f] = s_idx[threadIdx.x * blockDim.y];
}
}
|
8f3af7a3f036a00ca727805b22940fa47a67322f.cu
|
#define THREADS 256
__global__ void select_matches(
unsigned* idx_ptr,
int* dist_ptr,
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[sid] = max_dist;
if (f < nfeat) {
for (unsigned i = threadIdx.y; i < nelem; i += blockDim.y) {
int dist = in_dist[f * nelem + i];
// Copy all best matches previously found in nearest_neighbour() to
// shared memory
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = in_idx[f * nelem + i];
}
}
}
__syncthreads();
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
// Store best matches and indexes to training dataset
if (threadIdx.y == 0 && f < nfeat) {
dist_ptr[f] = s_dist[threadIdx.x * blockDim.y];
idx_ptr[f] = s_idx[threadIdx.x * blockDim.y];
}
}
|
7be31df37bd8fc4ea78e6a4ad4b550893685ff1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_2000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_2000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_2000::PATH_TO_CENTROIDS;
const auto LOG_SM = mean_shift::cuda::bench_3d::case_2000::LOG_SM;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_2000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_2000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_2000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_2000::BLOCKS;
constexpr auto TILE_WIDTH = mean_shift::cuda::bench_3d::TILE_WIDTH;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_tiling(const float* data, float* data_next) {
// Shared memory allocation
__shared__ float local_data[TILE_WIDTH * D];
__shared__ float valid_data[TILE_WIDTH];
// A few convenient variables
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = tid * D;
int local_row = threadIdx.x * D;
float new_position[D] = {0.};
float tot_weight = 0.;
// Load data in shared memory
for (int t = 0; t < BLOCKS; ++t) {
int tid_in_tile = t * TILE_WIDTH + threadIdx.x;
if (tid_in_tile < N) {
int row_in_tile = tid_in_tile * D;
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = data[row_in_tile + j];
}
valid_data[threadIdx.x] = 1;
}
else {
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = 0;
valid_data[threadIdx.x] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
int local_row_tile = i * D;
float valid_radius = RADIUS * valid_data[i];
float sq_dist = 0.;
for (int j = 0; j < D; ++j) {
sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]);
}
if (sq_dist <= valid_radius) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (int j = 0; j < D; ++j) {
new_position[j] += (weight * local_data[local_row_tile + j]);
}
tot_weight += (weight * valid_data[i]);
}
}
__syncthreads();
}
if (tid < N) {
for (int j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
hipMalloc(&dev_data, data_bytes);
hipMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
hipLaunchKernelGGL(( mean_shift_tiling), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next);
hipDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_SM, ',');
return 0;
}
|
7be31df37bd8fc4ea78e6a4ad4b550893685ff1f.cu
|
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <cuda.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_2000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_2000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_2000::PATH_TO_CENTROIDS;
const auto LOG_SM = mean_shift::cuda::bench_3d::case_2000::LOG_SM;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_2000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_2000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_2000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_2000::BLOCKS;
constexpr auto TILE_WIDTH = mean_shift::cuda::bench_3d::TILE_WIDTH;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_tiling(const float* data, float* data_next) {
// Shared memory allocation
__shared__ float local_data[TILE_WIDTH * D];
__shared__ float valid_data[TILE_WIDTH];
// A few convenient variables
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = tid * D;
int local_row = threadIdx.x * D;
float new_position[D] = {0.};
float tot_weight = 0.;
// Load data in shared memory
for (int t = 0; t < BLOCKS; ++t) {
int tid_in_tile = t * TILE_WIDTH + threadIdx.x;
if (tid_in_tile < N) {
int row_in_tile = tid_in_tile * D;
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = data[row_in_tile + j];
}
valid_data[threadIdx.x] = 1;
}
else {
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = 0;
valid_data[threadIdx.x] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
int local_row_tile = i * D;
float valid_radius = RADIUS * valid_data[i];
float sq_dist = 0.;
for (int j = 0; j < D; ++j) {
sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]);
}
if (sq_dist <= valid_radius) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (int j = 0; j < D; ++j) {
new_position[j] += (weight * local_data[local_row_tile + j]);
}
tot_weight += (weight * valid_data[i]);
}
}
__syncthreads();
}
if (tid < N) {
for (int j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
cudaMalloc(&dev_data, data_bytes);
cudaMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
mean_shift_tiling<<<BLOCKS, THREADS>>>(dev_data, dev_data_next);
cudaDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_SM, ',');
return 0;
}
|
788adbdf4d79d751fd17fcaa394c06f5d0f5b492.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/ztrtri_lower.cu, normal z -> s, Mon Jun 25 18:24:13 2018
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "strtri.cuh"
#include "strtri_lower_device.cuh"
/******************************************************************************/
__global__ void
strtri_diag_lower_kernel(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
strtri_diag_lower_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_sgemm16_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm16_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm32_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm32_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part3_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
|
788adbdf4d79d751fd17fcaa394c06f5d0f5b492.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/ztrtri_lower.cu, normal z -> s, Mon Jun 25 18:24:13 2018
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "strtri.cuh"
#include "strtri_lower_device.cuh"
/******************************************************************************/
__global__ void
strtri_diag_lower_kernel(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
strtri_diag_lower_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_sgemm16_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm16_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm32_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm32_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_sgemm_above64_part3_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
|
3da726ebfda375768e46bbf30924b84b17afe3d5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "./knn_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/brute_force.cuh>
#include <raft/random/rng.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/distance/distance.cuh>
#include <rmm/device_buffer.hpp>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <vector>
namespace raft {
namespace spatial {
namespace knn {
struct FusedL2KNNInputs {
int num_queries;
int num_db_vecs;
int dim;
int k;
raft::distance::DistanceType metric_;
};
template <typename T>
class FusedL2KNNTest : public ::testing::TestWithParam<FusedL2KNNInputs> {
public:
FusedL2KNNTest()
: stream_(resource::get_cuda_stream(handle_)),
params_(::testing::TestWithParam<FusedL2KNNInputs>::GetParam()),
database(params_.num_db_vecs * params_.dim, stream_),
search_queries(params_.num_queries * params_.dim, stream_),
raft_indices_(params_.num_queries * params_.k, stream_),
raft_distances_(params_.num_queries * params_.k, stream_),
ref_indices_(params_.num_queries * params_.k, stream_),
ref_distances_(params_.num_queries * params_.k, stream_)
{
RAFT_CUDA_TRY(hipMemsetAsync(database.data(), 0, database.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
hipMemsetAsync(search_queries.data(), 0, search_queries.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
hipMemsetAsync(raft_indices_.data(), 0, raft_indices_.size() * sizeof(int64_t), stream_));
RAFT_CUDA_TRY(
hipMemsetAsync(raft_distances_.data(), 0, raft_distances_.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
hipMemsetAsync(ref_indices_.data(), 0, ref_indices_.size() * sizeof(int64_t), stream_));
RAFT_CUDA_TRY(
hipMemsetAsync(ref_distances_.data(), 0, ref_distances_.size() * sizeof(T), stream_));
}
protected:
void testBruteForce()
{
// calculate the naive knn, by calculating the full pairwise distances and doing a k-select
rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_);
distance::pairwise_distance(
handle_,
raft::make_device_matrix_view<T, int32_t>(search_queries.data(), num_queries, dim),
raft::make_device_matrix_view<T, int32_t>(database.data(), num_db_vecs, dim),
raft::make_device_matrix_view<T, int32_t>(temp_distances.data(), num_queries, num_db_vecs),
metric);
spatial::knn::select_k<int64_t, T>(temp_distances.data(),
nullptr,
num_queries,
num_db_vecs,
ref_distances_.data(),
ref_indices_.data(),
true,
k_,
stream_);
auto index_view =
raft::make_device_matrix_view<const T, int64_t>(database.data(), num_db_vecs, dim);
auto query_view =
raft::make_device_matrix_view<const T, int64_t>(search_queries.data(), num_queries, dim);
auto out_indices_view =
raft::make_device_matrix_view<int64_t, int64_t>(raft_indices_.data(), num_queries, k_);
auto out_dists_view =
raft::make_device_matrix_view<T, int64_t>(raft_distances_.data(), num_queries, k_);
raft::neighbors::brute_force::fused_l2_knn(
handle_, index_view, query_view, out_indices_view, out_dists_view, metric);
// verify.
ASSERT_TRUE(devArrMatchKnnPair(ref_indices_.data(),
raft_indices_.data(),
ref_distances_.data(),
raft_distances_.data(),
num_queries,
k_,
float(0.001),
stream_));
}
void SetUp() override
{
num_queries = params_.num_queries;
num_db_vecs = params_.num_db_vecs;
dim = params_.dim;
k_ = params_.k;
metric = params_.metric_;
unsigned long long int seed = 1234ULL;
raft::random::RngState r(seed);
uniform(handle_, r, database.data(), num_db_vecs * dim, T(-1.0), T(1.0));
uniform(handle_, r, search_queries.data(), num_queries * dim, T(-1.0), T(1.0));
}
private:
raft::resources handle_;
hipStream_t stream_ = 0;
FusedL2KNNInputs params_;
int num_queries;
int num_db_vecs;
int dim;
rmm::device_uvector<T> database;
rmm::device_uvector<T> search_queries;
rmm::device_uvector<int64_t> raft_indices_;
rmm::device_uvector<T> raft_distances_;
rmm::device_uvector<int64_t> ref_indices_;
rmm::device_uvector<T> ref_distances_;
int k_;
raft::distance::DistanceType metric;
};
const std::vector<FusedL2KNNInputs> inputs = {
{100, 1000, 16, 10, raft::distance::DistanceType::L2Expanded},
{256, 256, 30, 10, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 16, 10, raft::distance::DistanceType::L2Expanded},
{100, 1000, 16, 50, raft::distance::DistanceType::L2Expanded},
{20, 10000, 16, 10, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 16, 50, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 32, 50, raft::distance::DistanceType::L2Expanded},
{10000, 40000, 32, 30, raft::distance::DistanceType::L2Expanded},
// L2 unexpanded
{100, 1000, 16, 10, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 16, 10, raft::distance::DistanceType::L2Unexpanded},
{100, 1000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{20, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 32, 50, raft::distance::DistanceType::L2Unexpanded},
{10000, 40000, 32, 30, raft::distance::DistanceType::L2Unexpanded},
};
typedef FusedL2KNNTest<float> FusedL2KNNTestF;
TEST_P(FusedL2KNNTestF, FusedBruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(FusedL2KNNTest, FusedL2KNNTestF, ::testing::ValuesIn(inputs));
} // namespace knn
} // namespace spatial
} // namespace raft
|
3da726ebfda375768e46bbf30924b84b17afe3d5.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "./knn_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/brute_force.cuh>
#include <raft/random/rng.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/distance/distance.cuh>
#include <rmm/device_buffer.hpp>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <vector>
namespace raft {
namespace spatial {
namespace knn {
struct FusedL2KNNInputs {
int num_queries;
int num_db_vecs;
int dim;
int k;
raft::distance::DistanceType metric_;
};
template <typename T>
class FusedL2KNNTest : public ::testing::TestWithParam<FusedL2KNNInputs> {
public:
FusedL2KNNTest()
: stream_(resource::get_cuda_stream(handle_)),
params_(::testing::TestWithParam<FusedL2KNNInputs>::GetParam()),
database(params_.num_db_vecs * params_.dim, stream_),
search_queries(params_.num_queries * params_.dim, stream_),
raft_indices_(params_.num_queries * params_.k, stream_),
raft_distances_(params_.num_queries * params_.k, stream_),
ref_indices_(params_.num_queries * params_.k, stream_),
ref_distances_(params_.num_queries * params_.k, stream_)
{
RAFT_CUDA_TRY(cudaMemsetAsync(database.data(), 0, database.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
cudaMemsetAsync(search_queries.data(), 0, search_queries.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
cudaMemsetAsync(raft_indices_.data(), 0, raft_indices_.size() * sizeof(int64_t), stream_));
RAFT_CUDA_TRY(
cudaMemsetAsync(raft_distances_.data(), 0, raft_distances_.size() * sizeof(T), stream_));
RAFT_CUDA_TRY(
cudaMemsetAsync(ref_indices_.data(), 0, ref_indices_.size() * sizeof(int64_t), stream_));
RAFT_CUDA_TRY(
cudaMemsetAsync(ref_distances_.data(), 0, ref_distances_.size() * sizeof(T), stream_));
}
protected:
void testBruteForce()
{
// calculate the naive knn, by calculating the full pairwise distances and doing a k-select
rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_);
distance::pairwise_distance(
handle_,
raft::make_device_matrix_view<T, int32_t>(search_queries.data(), num_queries, dim),
raft::make_device_matrix_view<T, int32_t>(database.data(), num_db_vecs, dim),
raft::make_device_matrix_view<T, int32_t>(temp_distances.data(), num_queries, num_db_vecs),
metric);
spatial::knn::select_k<int64_t, T>(temp_distances.data(),
nullptr,
num_queries,
num_db_vecs,
ref_distances_.data(),
ref_indices_.data(),
true,
k_,
stream_);
auto index_view =
raft::make_device_matrix_view<const T, int64_t>(database.data(), num_db_vecs, dim);
auto query_view =
raft::make_device_matrix_view<const T, int64_t>(search_queries.data(), num_queries, dim);
auto out_indices_view =
raft::make_device_matrix_view<int64_t, int64_t>(raft_indices_.data(), num_queries, k_);
auto out_dists_view =
raft::make_device_matrix_view<T, int64_t>(raft_distances_.data(), num_queries, k_);
raft::neighbors::brute_force::fused_l2_knn(
handle_, index_view, query_view, out_indices_view, out_dists_view, metric);
// verify.
ASSERT_TRUE(devArrMatchKnnPair(ref_indices_.data(),
raft_indices_.data(),
ref_distances_.data(),
raft_distances_.data(),
num_queries,
k_,
float(0.001),
stream_));
}
void SetUp() override
{
num_queries = params_.num_queries;
num_db_vecs = params_.num_db_vecs;
dim = params_.dim;
k_ = params_.k;
metric = params_.metric_;
unsigned long long int seed = 1234ULL;
raft::random::RngState r(seed);
uniform(handle_, r, database.data(), num_db_vecs * dim, T(-1.0), T(1.0));
uniform(handle_, r, search_queries.data(), num_queries * dim, T(-1.0), T(1.0));
}
private:
raft::resources handle_;
cudaStream_t stream_ = 0;
FusedL2KNNInputs params_;
int num_queries;
int num_db_vecs;
int dim;
rmm::device_uvector<T> database;
rmm::device_uvector<T> search_queries;
rmm::device_uvector<int64_t> raft_indices_;
rmm::device_uvector<T> raft_distances_;
rmm::device_uvector<int64_t> ref_indices_;
rmm::device_uvector<T> ref_distances_;
int k_;
raft::distance::DistanceType metric;
};
const std::vector<FusedL2KNNInputs> inputs = {
{100, 1000, 16, 10, raft::distance::DistanceType::L2Expanded},
{256, 256, 30, 10, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 16, 10, raft::distance::DistanceType::L2Expanded},
{100, 1000, 16, 50, raft::distance::DistanceType::L2Expanded},
{20, 10000, 16, 10, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 16, 50, raft::distance::DistanceType::L2Expanded},
{1000, 10000, 32, 50, raft::distance::DistanceType::L2Expanded},
{10000, 40000, 32, 30, raft::distance::DistanceType::L2Expanded},
// L2 unexpanded
{100, 1000, 16, 10, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 16, 10, raft::distance::DistanceType::L2Unexpanded},
{100, 1000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{20, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded},
{1000, 10000, 32, 50, raft::distance::DistanceType::L2Unexpanded},
{10000, 40000, 32, 30, raft::distance::DistanceType::L2Unexpanded},
};
typedef FusedL2KNNTest<float> FusedL2KNNTestF;
TEST_P(FusedL2KNNTestF, FusedBruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(FusedL2KNNTest, FusedL2KNNTestF, ::testing::ValuesIn(inputs));
} // namespace knn
} // namespace spatial
} // namespace raft
|
e8518a42e21f903feea25d5c72a55accb7efb20f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
//Blocks
#define BLOCKS 32
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
__global__ void sum_matrix(double *transition, double *gradient, int N, int M){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < N && y < M){
transition[x*M+y] = transition[x*M+y] + gradient[x*M+y];
}
}
|
e8518a42e21f903feea25d5c72a55accb7efb20f.cu
|
#include "includes.h"
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
//Blocks
#define BLOCKS 32
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
__global__ void sum_matrix(double *transition, double *gradient, int N, int M){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < N && y < M){
transition[x*M+y] = transition[x*M+y] + gradient[x*M+y];
}
}
|
BIT07_N-Queen.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
CUDA
([email protected])
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu
-g GPU
-s SGPU()
BOUND1
BOUND1,2
7+(n27)
(n27)
n
n(x1)
(n27)
O(N)O(1)
2*N-1N
flags -flags & flags
===================
1
===================
(ON
)
0
ON
-----Q-- 00000100 0
---Q---- 00010000 1
------ Q- 00000010 2
Q------- 10000000 3
-------Q 00000001 4
-Q------ 01000000 5
---- Q--- 00001000 6
-- Q----- 00100000 7
===================
2
===================
1. : left
2. : down
3. : right
1 3 (0x08)
2 2 (0x04)
0 0x10 1
1 5 (0x20) 2 6 (0x40)
1
right left
rightleft1
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 (Q 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit left)
|. . . +3. . 0x80 -|
*-------------
nn+1
n(bit)OR
leftdownright
n+1
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
3
===================
n+1OR
ON
OR
ON
bitmap
bit=-bitmap & bitmap;//
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
n-nn+1
n=22n-nANDn
ON
1
00010110 22
AND 11101010 -22
------------------
00000010
while bitmap ON
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//(bit)
}
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
CPUR
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
CPU
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
SGPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
GPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int x[MAX];
int y[MAX];
}Board ;
//
Board B;
Board b1[3915200];
Board b2[3915200];
// GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
// SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
// CPU
void TimeFormat(clock_t utime,char *form);
// CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
// CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=01
//mask
//n=8 mask==2
register int row=0;
register unsigned int bit;
//
//
//
//ID
register unsigned const int tid=threadIdx.x;
//ID
register unsigned const int bid=blockIdx.x;
//ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//
//GPUstepstotalCond
if(idx<totalCond){
//totalDown,totalLeft,totalRight
//down,left,right
//CPU t_steps
//
// idx
//
/**06 **********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000
//1
/**06 **********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//
bitmap[tid][row]
^=bit
/**06 **********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//
if((bit&mask)!=0){
//?
//
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07register ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
}else{
//totalCondtotal
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//GPUGPU
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmap
//stack1
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bitGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
hipHostMalloc((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
hipHostMalloc((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
hipHostMalloc((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
hipHostMalloc((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//123CPU->row==mark 3
//down,left,right totalDown,totalLeft,totalRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
/***06 bit*********************/
//06GPU
if(bitmap[row]==0){ row--; }
/************************/
/***06 bit*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//
//06SGPU
/***06 bit*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bit*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bit*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//totalDown,totalLeft,totalRight
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//totalDown,totalLeft,totalRight1
// row=2
//totalDown,totalLeft,totalRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
//steps
//totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
/***06 cudaFreeHost**/
//delete[] totalDown;
hipHostFree(totalDown);
//delete[] totalLeft;
hipHostFree(totalLeft);
//delete[] totalRight;
hipHostFree(totalRight);
//delete[] h_results;
hipHostFree(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//TOTAL
TOTAL=TOTAL*2;
//
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//
const int tid=threadIdx.x;//ID
const int bid=blockIdx.x;//ID
const int idx=bid*blockDim.x+tid;//ID
//
__shared__ unsigned int down[THREAD_NUM][10];//shared
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUM
__shared__ unsigned int right[THREAD_NUM][10];//10maskGPU10
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=01mask n=8 mask==2
unsigned int bit;
if(idx<totalCond){// GPUstepstotalCond
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightdown,left,right
left[tid][row]=totalLeft[idx];//CPU t_steps idx
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightbitmap
while(row>=0){
//
//06GPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 1
//06SGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 1
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//
if(row+1==mark){//?
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//
row--;
}
}
}
sum[tid]=total;//sum[tid]
}else{//totalCond total
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();1
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
//hh:mm:ss.ms
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
bool board_placement(int si,int x,int y)
{
//
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////OK
return true;
}
B.x[x]=y;
//x y p.N-1-x+y x+y
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv[row],left[row],down[row],right[row]);
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//
left[row]=left[n]<<1;//left
right[row]=right[n]>>1;//right
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right){
uint64 bitmap=0;
uint64 bit=0;
//
while((bv&1)!=0) {
bv>>=1;//
left<<=1;//left
right>>=1;//right
row++;
}
bv>>=1;
if(row==size){
TOTAL++;
}else{
//bitmap=mask&~(left|down|right);//mask10
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1);
}
}
}
//
long prepare_1(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt=0;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<=(size/2)*(size-3);w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b1[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//
long prepare_2(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt=0;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=(size/2)*(size-3)+1;w<=(size/2+1)*(size-3);w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b2[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** */
if(cpu){
printf("\n\nCPU \n");
}else if(cpur){
printf("\n\nCPUR \n");
}else if(gpu){
printf("\n\nGPU \n");
}else if(sgpu){
printf("\n\nSGPU \n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //
char t[20]; //hh:mm:ss.ms
int min=5;
int targetN=14;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
long bcnt1=prepare_1(size);
long bcnt2=0;
if ( size%2==1){
bcnt2=prepare_2(size);
}
st=clock();
for (long bc=0;bc<=bcnt1;bc++){
B=b1[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
TOTAL=TOTAL*2;
if ( size%2==1){
for (long bc=0;bc<=bcnt2;bc++){
B=b2[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
}
//
//
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); //
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); //
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
BIT07_N-Queen.cu
|
/**
CUDAで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎([email protected])
コンパイルと実行
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu再帰
-g GPU
-s SGPU(サマーズ版と思われる)
1.ブルートフォース
2.:
3.バックトラック(配列)
4.:
5.:
6.バックトラック(ビットマップ)
7.ミラー
8.対象解除
9.クイーンの位置による分岐BOUND1
10.クイーンの位置による分岐BOUND1,2
11.枝刈り
12.最適化
13.並列処理
7.ミラー+ビット(n27)
上下左右2行にクイーンを配置したのち(ビット(n27))ミラーで解を求めます。
ミラーはクイーンの配置は左右対称になるので、
1行目の右半分だけクイーンを置き結果を2倍するというものです。
nが偶数・奇数にかかわらず右半分にクイーンを置き結果を2倍します。
nが奇数の場合でクイーンを中央に置く場合は左右対称にならないので2倍にしません(x1)
ビット(n27)
ビット演算を使って高速化 状態をビットマップにパックし、処理する
単純なバックトラックよりも20〜30倍高速
ビットマップであれば、シフトにより高速にデータを移動できる。
フラグ配列ではデータの移動にO(N)の時間がかかるが、ビットマップであればO(1)
フラグ配列のように、斜め方向に 2*N-1の要素を用意するのではなく、Nビットで充
分。
配置可能なビット列を flags に入れ、-flags & flags で順にビットを取り出し処理。
バックトラックよりも20−30倍高速。
===================
考え方 1
===================
N×NのチェスボードをN個のビットフィールドで表し、ひとつの横列の状態をひと
つのビットフィールドに対応させます。(クイーンが置いてある位置のビットをONに
する)
そしてバックトラッキングは0番目のビットフィールドから「下に向かって」順にい
ずれかのビット位置をひとつだけONにして進めていきます。
-----Q-- 00000100 0番目のビットフィールド
---Q---- 00010000 1番目のビットフィールド
------ Q- 00000010 2番目のビットフィールド
Q------- 10000000 3番目のビットフィールド
-------Q 00000001 4番目のビットフィールド
-Q------ 01000000 5番目のビットフィールド
---- Q--- 00001000 6番目のビットフィールド
-- Q----- 00100000 7番目のビットフィールド
===================
考え方 2
===================
次に、効き筋をチェックするためにさらに3つのビットフィールドを用意します。
1. 左下に効き筋が進むもの: left
2. 真下に効き筋が進むもの: down
3. 右下に効き筋が進むもの: right
次に、斜めの利き筋を考えます。
上図の場合、
1列目の右斜め上の利き筋は 3 番目(0x08)
2列目の右斜め上の利き筋は 2 番目(0x04) になります。
この値は 0 列目のクイーンの位置 0x10 を 1 ビットずつ「右シフト」すれば求める
ことができます。
また、左斜め上の利き筋の場合、1 列目では 5 番目(0x20) で 2 列目では 6 番目(0x40)
になるので、今度は 1 ビットずつ「左シフト」すれば求めることができます。
つまり、右シフトの利き筋を right、左シフトの利き筋を left で表すことで、クイー
ンの効き筋はrightとleftを1 ビットシフトするだけで求めることができるわけです。
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit 右シフト right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 ←(Q の位置は 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit 左シフト left)
|. . . +3. . 0x80 -|
*-------------
図:斜めの利き筋のチェック
n番目のビットフィールドからn+1番目のビットフィールドに探索を進めるときに、そ
の3つのビットフィールドとn番目のビットフィールド(bit)とのOR演算をそれぞれ行
います。leftは左にひとつシフトし、downはそのまま、rightは右にひとつシフトして
n+1番目のビットフィールド探索に渡してやります。
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
考え方 3
===================
n+1番目のビットフィールドの探索では、この3つのビットフィールドをOR演算した
ビットフィールドを作り、それがONになっている位置は効き筋に当たるので置くことが
できない位置ということになります。次にその3つのビットフィールドをORしたビッ
トフィールドをビット反転させます。つまり「配置可能なビットがONになったビットフィー
ルド」に変換します。そしてこの配置可能なビットフィールドを bitmap と呼ぶとして、
次の演算を行なってみます。
bit=-bitmap & bitmap;//一番右のビットを取り出す
この演算式の意味を理解するには負の値がコンピュータにおける2進法ではどのよう
に表現されているのかを知る必要があります。負の値を2進法で具体的に表わしてみる
と次のようになります。
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
正の値nを負の値-nにするときは、nをビット反転してから+1されています。そして、
例えばn=22としてnと-nをAND演算すると下のようになります。nを2進法で表したときの
一番下位のONビットがひとつだけ抽出される結果が得られるのです。極めて簡単な演算
によって1ビット抽出を実現させていることが重要です。
00010110 22
AND 11101010 -22
------------------
00000010
さて、そこで下のようなwhile文を書けば、このループは bitmap のONビットの数の
回数だけループすることになります。配置可能なパターンをひとつずつ全く無駄がなく
生成されることになります。
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//ここでは配置可能なパターンがひとつずつ生成される(bit)
}
実行結果
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
6.CPUR 再帰 バックトラック+ビットマップ
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
6.CPU 非再帰 バックトラック+ビットマップ
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
6.SGPU 非再帰 バックトラック+ビットマップ
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
6.GPU 非再帰 バックトラック+ビットマップ
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//変数宣言
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int x[MAX];
int y[MAX];
}Board ;
//
Board B;
Board b1[3915200];
Board b2[3915200];
//関数宣言 GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
//関数宣言 SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
//関数宣言 CPU
void TimeFormat(clock_t utime,char *form);
//関数宣言 CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
//関数宣言 CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=0となってるが1行目からやっているわけではなく
//mask行目以降からスタート
//n=8 なら mask==2 なので そこからスタート
register int row=0;
register unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
register unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
register unsigned const int bid=blockIdx.x;
//全体通してのID
register unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//余分なスレッドは動かさない
//GPUはsteps数起動するがtotalCond以上は空回しする
if(idx<totalCond){
//totalDown,totalLeft,totalRightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
/**06 スカラー変数に置き換えた**********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]をスカラー変数に置き換え
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
/**06 スカラー変数に置き換えた**********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//クイーンを置く
bitmap[tid][row]
^=bit
/**06 スカラー変数に置き換えた**********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//置く場所があるかどうか
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07スカラー変数に置き換えてregister対応 ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
}else{
//totalCond未満は空回しするのでtotalは加算しない
sum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bit処理をGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
cudaMallocHost((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
cudaMallocHost((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
cudaMallocHost((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
cudaMallocHost((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を totalDown,totalLeft,totalRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
/***06 bit操作変更*********************/
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
/************************/
/***06 bit操作変更でコメント*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//おける場所があれば進む
//06SGPU
/***06 bit操作変更でコメント*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU こっちのほうが優秀
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //クイーンを置く
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bit操作変更でコメント*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bit操作変更*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//totalDown,totalLeft,totalRightに格納する
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark は何行GPUを実行するか totalCondはスレッド数
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//totalDown,totalLeft,totalRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark は何行GPUを実行するか totalCondはスレッド数
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
/***06 cudaFreeHostへ変更**/
//delete[] totalDown;
cudaFreeHost(totalDown);
//delete[] totalLeft;
cudaFreeHost(totalLeft);
//delete[] totalRight;
cudaFreeHost(totalRight);
//delete[] h_results;
cudaFreeHost(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//偶数、奇数共通 右側半分だけクイーンを置く
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//ミラーなのでTOTALを2倍する
TOTAL=TOTAL*2;
//奇数の場合はさらに中央にクイーンを置く
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//スレッド
const int tid=threadIdx.x;//ブロック内のスレッドID
const int bid=blockIdx.x;//グリッド内のブロックID
const int idx=bid*blockDim.x+tid;//全体通してのID
//シェアードメモリ
__shared__ unsigned int down[THREAD_NUM][10];//sharedメモリを使う ブロック内スレッドで共有
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int right[THREAD_NUM][10];//10で固定なのは現在のmaskの設定でGPUで実行するのは最大10だから
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=0となってるが1行目からやっているわけではなくmask行目以降からスタート n=8 なら mask==2 なので そこからスタート
unsigned int bit;
if(idx<totalCond){//余分なスレッドは動かさない GPUはsteps数起動するがtotalCond以上は空回しする
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightの情報をdown,left,rightに詰め直す
left[tid][row]=totalLeft[idx];//CPU で詰め込んだ t_はsteps個あるがブロック内ではブロックあたりのスレッドすうに限定されるので idxでよい
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightからbitmapを出す
while(row>=0){
//
//06のGPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 クイーンをどこにも置けないので1行上に戻る
//06のSGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 クイーンをどこにも置けないので1行上に戻る
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //クイーンを置く
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//置く場所があるかどうか
if(row+1==mark){//最終行?最終行から1個前の行まで無事到達したら 加算する
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//置く場所がなければ1個上に
row--;
}
}
}
sum[tid]=total;//最後sum[tid]に加算する
}else{//totalCond未満は空回しするので当然 totalは加算しない
sum[tid]=0;
}
//__syncthreads()で、ブロック内のスレッド間の同期をとれます。
//同期を取るということは、全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();は複数個必要1個だけ記述したら数が違った
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//
//hh:mm:ss.ms形式に処理時間を出力
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
bool board_placement(int si,int x,int y)
{
//同じ場所に置くかチェック
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////同じ場所に置くのはOK
return true;
}
B.x[x]=y;
//xは行 yは列 p.N-1-x+yは右上から左下 x+yは左上から右下
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU 非再帰版 ロジックメソッド
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv[row],left[row],down[row],right[row]);
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//右に1ビットシフト
left[row]=left[n]<<1;//left 左に1ビットシフト
right[row]=right[n]>>1;//right 右に1ビットシフト
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR 再帰版 ロジックメソッド
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right){
uint64 bitmap=0;
uint64 bit=0;
//既にクイーンを置いている行はスキップする
while((bv&1)!=0) {
bv>>=1;//右に1ビットシフト
left<<=1;//left 左に1ビットシフト
right>>=1;//right 右に1ビットシフト
row++;
}
bv>>=1;
if(row==size){
TOTAL++;
}else{
//bitmap=mask&~(left|down|right);//maskつけると10桁目以降数が出なくなるので外した
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1);
}
}
}
//
long prepare_1(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt=0;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<=(size/2)*(size-3);w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b1[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//
long prepare_2(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt=0;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=(size/2)*(size-3)+1;w<=(size/2+1)*(size-3);w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b2[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//メインメソッド
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** パラメータの処理 */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** 出力と実行 */
if(cpu){
printf("\n\n6.CPU 非再帰 バックトラック+ビットマップ\n");
}else if(cpur){
printf("\n\n6.CPUR 再帰 バックトラック+ビットマップ\n");
}else if(gpu){
printf("\n\n6.GPU 非再帰 バックトラック+ビットマップ\n");
}else if(sgpu){
printf("\n\n6.SGPU 非再帰 バックトラック+ビットマップ\n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //速度計測用
char t[20]; //hh:mm:ss.msを格納
int min=5;
int targetN=14;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
long bcnt1=prepare_1(size);
long bcnt2=0;
if ( size%2==1){
bcnt2=prepare_2(size);
}
st=clock();
for (long bc=0;bc<=bcnt1;bc++){
B=b1[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
TOTAL=TOTAL*2;
if ( size%2==1){
for (long bc=0;bc<=bcnt2;bc++){
B=b2[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
}
//
//
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // 計測開始
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); // 計測終了
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
8fee339703852a21cd5e24f090a04d2bcc438f8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* PullingPlanePotential.cu
*
* Created on: Sep 19, 2013
* Author: zip
*/
#include "../Core/global.h"
#include "../Util/Log.h"
#include "PullingPlanePotential.cuh"
namespace pulling_plane_potential
{
class Log: public ILog
{
virtual void Write(const char* message) const
{
std::cout << makeTimePrefix() << "<pulling_plane_potential> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
void create()
{
if(getYesNoParameter(PARAMETER_PULLING_PLANE, DEFAULT_PULLING_PLANE))
{
potential.destroy = &destroy;
sprintf(potential.name, "Pulling-Plane potential");
potentials[potentialsCount] = &potential;
potentialsCount++;
init();
char pullProtocol[PARAMETER_LENGTH];
getMaskedParameter(pullProtocol, PARAMETER_PULLING_PLANE_PROTOCOL);
if(strcmp(pullProtocol, PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FCONST) == 0)
{
potential.compute = &computePlanePulling;
potentialData.pullForce = getFloatParameter(PARAMETER_PULLING_PLANE_PULLFORCE, 0, 0);
LOG << "Constant Force Plane pulling will be performed. Pulling force value is set to " << potentialData.pullForce << "pN.";
//convert pN to kJ/(mol*nm)
potentialData.pullForce = 0.6*potentialData.pullForce;
potentialData.pullSpring = 0;
potentialData.pullSpeed = 0;
sprintf(planeLocationUpdater.name, "Pulling-Plane Constant Force updater");
planeLocationUpdater.update = updatePlaneLocation;
planeLocationUpdater.destroy = destroyPlaneLocationUpdater;
planeLocationUpdater.frequency = getIntegerParameter(PARAMETER_PULLING_PLANE_UPDATE_FREQ);
updaters[updatersCount] = &planeLocationUpdater;
updatersCount ++;
}
else
if(strcmp(pullProtocol, PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FRAMP) == 0)
{
potential.compute = &computePlanePulling;
potentialData.pullForce = 0;
potentialData.pullSpring = getFloatParameter(PARAMETER_PULLING_PLANE_PULLSPRING, 0, 0);
potentialData.pullSpeed = getFloatParameter(PARAMETER_PULLING_PLANE_PULLSPEED, 0, 0);
LOG << "Constant Velocity Plane pulling will be performed. Pulling speed is set to " << potentialData.pullSpeed << " um/s, spring constant is set to " << potentialData.pullSpring << " pN/nm.";
//convert um/s to nm/ps
potentialData.pullSpeed = potentialData.pullSpeed / 1000000000.0;
//convert pN/nm to kJ/(mol*nm^2)
potentialData.pullSpring = 0.6*potentialData.pullSpring;
sprintf(planeLocationUpdater.name, "Pulling-Plane Constant Velocity updater");
planeLocationUpdater.update = updatePlaneLocation;
planeLocationUpdater.destroy = destroyPlaneLocationUpdater;
planeLocationUpdater.frequency = getIntegerParameter(PARAMETER_PULLING_PLANE_UPDATE_FREQ);
updaters[updatersCount] = &planeLocationUpdater;
updatersCount ++;
}
else
{
DIE("Pulling-Plane protocol parameter should be either %s for constant force pulling or %s for force-ramp (constant speed) pulling.",
PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FCONST,
PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FRAMP);
}
hipMemcpy(potentialData.d_workList, potentialData.h_workList, potentialData.Ntotal*sizeof(WorkList), hipMemcpyHostToDevice);
hipMemcpy(potentialData.d_force, potentialData.h_force, potentialData.Ntotal*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(potentialData.d_planeDisplacement, potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_potentialData, &potentialData, sizeof(PotentialData), 0, hipMemcpyHostToDevice);
//init output
allocateCPU((void**)&outputFilename, parameters.Ntr*sizeof(char*));
char trajnum[10];
int traj;
for(traj = 0; traj < parameters.Ntr; traj++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
outputFilename[traj] = (char*)calloc(PARAMETER_LENGTH, sizeof(char));
getMaskedParameterWithReplacement(outputFilename[traj], PARAMETER_PULLING_PLANE_OUTPUT_FILE, trajnum, "<run>");
outputFile = safe_fopen(outputFilename[traj], "w");
fclose(outputFile);
}
outputFreq = getIntegerParameter(PARAMETER_PULLING_PLANE_OUTPUT_FREQ);
logOutputFreq = getIntegerParameter(PARAMETER_ENERGYOUTPUT_FREQ);
planePullingGridSize = potentialData.Ntotal/BLOCK_SIZE + 1;
planePullingBlockSize = BLOCK_SIZE;
LOG << "Done initializing Pulling-Plane potential.";
}
else
{
LOG << "No Plane pulling will be performed.";
}
}
void init()
{
LOG << "Initializing Pulling-Plane potential...";
//1tr
//allocateCPU((void**)&potentialData.prc, gsystem.N*sizeof(int));
//ntr
allocateCPU((void**)&potentialData.prc, gsystem.Ntot*sizeof(int));
potentialData.Npulled = 0;
potentialData.Nfixed = 0;
int traj, i, itot;
char refFilename[100];
char trajnum[10];
PDB refFile;
//1tr
//traj = 0;
//if(parameters.Ntr != 1)
// DIE("Pulling-Plane protocol can not yet perform pulling for more than 1 trajectory.");
//ntr
for(traj = 0; traj < parameters.Ntr; traj ++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
getMaskedParameterWithReplacement(refFilename, PARAMETER_PULLING_PLANE_REFFILE, trajnum, "<run>");
readPDB(refFilename, &refFile);
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
if(refFile.atoms[i].beta > 0)
{
potentialData.prc[itot] = PLANE_ATOM_FIXED;
potentialData.Nfixed++;
}
else
if(refFile.atoms[i].occupancy > 0)
{
potentialData.prc[itot] = PLANE_ATOM_PULLED;
potentialData.Npulled++;
}
else
{
potentialData.prc[itot] = PLANE_ATOM_FREE;
}
}
}
if(potentialData.Npulled + potentialData.Nfixed != 0)
{
potentialData.Ntotal = potentialData.Npulled + potentialData.Nfixed;
LOG << "Total amount of atoms, included in Plane-pulling worklist is " << potentialData.Ntotal << ".";
potentialData.Nplane = 2*parameters.Ntr;
LOG << "Total amount of planes is " << potentialData.Nplane << ".";
getVectorParameter(PARAMETER_PULLING_PLANE_NORM, &potentialData.planeNorm.x, &potentialData.planeNorm.y, &potentialData.planeNorm.z, 0, 0, 0, 0);
potentialData.planeNorm.x = potentialData.planeNorm.x / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
potentialData.planeNorm.y = potentialData.planeNorm.y / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
potentialData.planeNorm.z = potentialData.planeNorm.z / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
LOG << "Plane normal is set to (" << potentialData.planeNorm.x << " " << potentialData.planeNorm.y << " " << potentialData.planeNorm.z << ").";
getVectorParameter(PARAMETER_PULLING_PLANE_POINT, &potentialData.planePoint.x, &potentialData.planePoint.y, &potentialData.planePoint.z, 0, 0, 0, 0);
LOG << "Plane point is set to (" << potentialData.planePoint.x << " " << potentialData.planePoint.y << " " << potentialData.planePoint.z << ").";
potentialData.planeMass = getFloatParameter(PARAMETER_PULLING_PLANE_MASS, 0, 0);
LOG << "Plane mass is set to " << potentialData.planeMass << ".";
potentialData.fixSpring = getFloatParameter(PARAMETER_PULLING_PLANE_FIXSPRING, 0, 0);
LOG << "Plane fixing spring is set to " << potentialData.fixSpring << " pN/nm.";
potentialData.fixSpring = 0.6*potentialData.fixSpring;
allocateCPU((void**)&potentialData.h_force, potentialData.Ntotal*sizeof(float));
allocateGPU((void**)&potentialData.d_force, potentialData.Ntotal*sizeof(float));
for(i = 0; i < potentialData.Ntotal; i++)
{
potentialData.h_force[i] = 0;
}
allocateCPU((void**)&potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float));
allocateGPU((void**)&potentialData.d_planeDisplacement, potentialData.Nplane*sizeof(float));
for(i = 0; i < potentialData.Nplane; i++)
{
potentialData.h_planeDisplacement[i] = 0;
}
allocateCPU((void**)&potentialData.h_workList, potentialData.Ntotal*sizeof(WorkList));
allocateGPU((void**)&potentialData.d_workList, potentialData.Ntotal*sizeof(WorkList));
initWorkList();
}
else
{
DIE("No atoms were selected for Pulling-Plane protocol, while it was switched on.");
}
}
void initWorkList()
{
int i, j, traj, itot;
float4 d;
j = 0;
//1tr
//traj = 0;
//ntr
for(traj = 0; traj < parameters.Ntr; traj++)
{
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
if(potentialData.prc[i] == PLANE_ATOM_FIXED)
{
DPRINTF("Atom %d-%d\t(%s%d-%s) in trajectory %d will be connected to the fixed plane.\n",
itot, i, topology.atoms[i].resName, topology.atoms[i].resid, topology.atoms[i].name, traj+parameters.firstrun);
LOG << "Atom " << itot << "-" << i << "\t(" << topology.atoms[i].resName << topology.atoms[i].resid << " - " << topology.atoms[i].name << ") " << " will be connected to the fixed plane.";
potentialData.h_workList[j].atomID = itot;
potentialData.h_workList[j].planeID = traj*2;
j++;
}
if(potentialData.prc[i] == PLANE_ATOM_PULLED)
{
DPRINTF("Atom %d-%d\t(%s%d-%s) in trajectory %d will be connected to the pulled plane.\n",
itot, i, topology.atoms[i].resName, topology.atoms[i].resid, topology.atoms[i].name, traj+parameters.firstrun);
LOG << "Atom " << itot << "-" << i << "\t(" << topology.atoms[i].resName << topology.atoms[i].resid << " - " << topology.atoms[i].name << ") " << " will be connected to the pulled plane.";
potentialData.h_workList[j].atomID = itot;
potentialData.h_workList[j].planeID = traj*2 + 1;
j++;
}
}
}
if(getYesNoParameter(PARAMETER_PULLING_PLANE_USE_PDB, 0))
{
float3* refCoords = (float3*)calloc(gsystem.Ntot, sizeof(float3));
PDB refPDB;
char trajnum[10];
char* refPDBFilename = (char*)calloc(PARAMETER_LENGTH, sizeof(char));
for(traj = 0; traj < parameters.Ntr; traj++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
getMaskedParameterWithReplacement(refPDBFilename, PARAMETER_PULLING_PLANE_COORDS_PDB, trajnum, "<run>");
readPDB(refPDBFilename, &refPDB);
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
refCoords[itot].x = refPDB.atoms[i].x;
refCoords[itot].y = refPDB.atoms[i].y;
refCoords[itot].z = refPDB.atoms[i].z;
}
}
for(i = 0; i < potentialData.Ntotal; i++)
{
j = potentialData.h_workList[i].atomID;
d.x = potentialData.planePoint.x - refCoords[j].x;
d.y = potentialData.planePoint.y - refCoords[j].y;
d.z = potentialData.planePoint.z - refCoords[j].z;
potentialData.h_workList[i].bDistance = d.x*potentialData.planeNorm.x + d.y*potentialData.planeNorm.y + d.z*potentialData.planeNorm.z;
}
} else {
for(i = 0; i < potentialData.Ntotal; i++)
{
j = potentialData.h_workList[i].atomID;
d.x = potentialData.planePoint.x - gsystem.h_coord[j].x;
d.y = potentialData.planePoint.y - gsystem.h_coord[j].y;
d.z = potentialData.planePoint.z - gsystem.h_coord[j].z;
potentialData.h_workList[i].bDistance = d.x*potentialData.planeNorm.x + d.y*potentialData.planeNorm.y + d.z*potentialData.planeNorm.z;
}
}
}
__global__ void computePlanePulling_kernel()
{
int d_i = blockIdx.x*blockDim.x + threadIdx.x;
if(d_i < c_potentialData.Ntotal)
{
int atomID = c_potentialData.d_workList[d_i].atomID;
int planeID = c_potentialData.d_workList[d_i].planeID;
float4 coord = tex1Dfetch(t_coord, atomID);
float4 currentpoint = c_potentialData.planePoint;
float4 norm = c_potentialData.planeNorm;
//here dis is a plane displacement
float dis = c_potentialData.d_planeDisplacement[planeID];
//calculating current plane position
currentpoint.x += dis*norm.x;
currentpoint.y += dis*norm.y;
currentpoint.z += dis*norm.z;
//here dis is the distance between atom and plane
dis = (currentpoint.x - coord.x)*norm.x + (currentpoint.y - coord.y)*norm.y + (currentpoint.z - coord.z)*norm.z;
//here dis is atom's displacement from its balanced state
dis = dis - c_potentialData.d_workList[d_i].bDistance;
//here dis becomes a force value
dis = dis*c_potentialData.fixSpring;
//here we write the force value to use it for integrating plane movement
c_potentialData.d_force[d_i] += dis;
//here we add the forces to all the other forces
//we use currentpoint to store them
currentpoint = c_gsystem.d_forces[atomID];
currentpoint.x += dis*norm.x;
currentpoint.y += dis*norm.y;
currentpoint.z += dis*norm.z;
c_gsystem.d_forces[atomID] = currentpoint;
}
}
inline void computePlanePulling()
{
hipLaunchKernelGGL(( computePlanePulling_kernel), dim3(planePullingGridSize), dim3(planePullingBlockSize), 0, 0, );
}
void updatePlaneLocation()
{
//float4 force;
hipMemcpy(potentialData.h_force, potentialData.d_force, potentialData.Ntotal*sizeof(float),hipMemcpyDeviceToHost);
float force, pullforce, atomforce;
int i, j, traj;
traj = 0;
for(i = 1; i < potentialData.Nplane; i = i + 2)
{
force = 0;
atomforce = 0;
for(j = 0; j < potentialData.Ntotal; j++)
{
if(potentialData.h_workList[j].planeID == i)
{
atomforce += potentialData.h_force[j];
potentialData.h_force[j] = 0;
}
}
//here take - instead of +, because this is a force applied to the plane
//LOG << -force*potentialData.planeNorm.x/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.y/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.z/planeLocationUpdater.frequency <<" ";
//LOG << "Plane " << i << ": atomic force increased to\t"
// << -force*potentialData.planeNorm.x/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.y/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.z/planeLocationUpdater.frequency <<" ";
//compute force for CF
if(potentialData.pullForce != 0)
{
atomforce = -atomforce/planeLocationUpdater.frequency;
force = atomforce + potentialData.pullForce;
}
//compute force for CV
else
{
pullforce = potentialData.pullSpring*(potentialData.pullSpeed*step*integrator->h - potentialData.h_planeDisplacement[i]);
atomforce = -atomforce/planeLocationUpdater.frequency;
force = atomforce + pullforce;
}
//here force somehow becomes speed
force = force/potentialData.planeMass;
//compute current displacement
potentialData.h_planeDisplacement[i] += force*planeLocationUpdater.frequency*integrator->h;
//LOG << "Plane " << i << ": relocating point position to\t"
// << potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.x*potentialData.h_planeDisplacement[i] << ") "
// << potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.y*potentialData.h_planeDisplacement[i] << ") "
// << potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.z*potentialData.h_planeDisplacement[i] << ").";
outputFile = safe_fopen(outputFilename[traj], "a");
if(potentialData.pullForce != 0)
{
if(step%outputFreq == 0 && step != 0)
{
fprintf(outputFile, "%lld\t%f\t%f\t%f\t%f\t%f\n",
step,
potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
potentialData.h_planeDisplacement[i],
atomforce/0.6);
if((step%logOutputFreq - outputFreq == 0) && traj == 0)
{
printf("Pulling-plane CF output:\n");
printf("%*s%*s%*s%*s%*s%*s%*s\n",
ENERGY_OUTPUT_WIDTH, "Step",
ENERGY_OUTPUT_WIDTH, "Plane",
ENERGY_OUTPUT_WIDTH, "X",
ENERGY_OUTPUT_WIDTH, "Y",
ENERGY_OUTPUT_WIDTH, "Z",
ENERGY_OUTPUT_WIDTH, "Displacement",
ENERGY_OUTPUT_WIDTH, "Atom Force");
}
printf("%*lld%*d%*f%*f%*f%*f%*f\n",
ENERGY_OUTPUT_WIDTH, step,
ENERGY_OUTPUT_WIDTH, i,
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, atomforce/0.6);
}
}
else
{
if(step%outputFreq == 0 && step != 0)
{
fprintf(outputFile, "%lld\t%f\t%f\t%f\t%f\t%f\t%f\n",
step,
potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
potentialData.h_planeDisplacement[i],
atomforce/0.6,
pullforce/0.6);
if((step%logOutputFreq - outputFreq == 0) && traj == 0)
{
printf("Pulling-plane CV output:\n");
printf("%*s%*s%*s%*s%*s%*s%*s%*s\n",
ENERGY_OUTPUT_WIDTH, "Step",
ENERGY_OUTPUT_WIDTH, "Plane",
ENERGY_OUTPUT_WIDTH, "X",
ENERGY_OUTPUT_WIDTH, "Y",
ENERGY_OUTPUT_WIDTH, "Z",
ENERGY_OUTPUT_WIDTH, "Displacement",
ENERGY_OUTPUT_WIDTH, "Atom Force",
ENERGY_OUTPUT_WIDTH, "Pull Force");
}
printf("%*lld%*d%*f%*f%*f%*f%*f%*f\n",
ENERGY_OUTPUT_WIDTH, step,
ENERGY_OUTPUT_WIDTH, i,
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, atomforce/0.6,
ENERGY_OUTPUT_WIDTH, pullforce/0.6);
}
}
fclose(outputFile);
traj++;
}
hipMemcpy(potentialData.d_force, potentialData.h_force, potentialData.Ntotal*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(potentialData.d_planeDisplacement, potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float),hipMemcpyHostToDevice);
}
void destroyPlaneLocationUpdater()
{
}
void destroy()
{
}
#undef LOG
} //namespace pulling_plane_potential
|
8fee339703852a21cd5e24f090a04d2bcc438f8e.cu
|
/*
* PullingPlanePotential.cu
*
* Created on: Sep 19, 2013
* Author: zip
*/
#include "../Core/global.h"
#include "../Util/Log.h"
#include "PullingPlanePotential.cuh"
namespace pulling_plane_potential
{
class Log: public ILog
{
virtual void Write(const char* message) const
{
std::cout << makeTimePrefix() << "<pulling_plane_potential> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
void create()
{
if(getYesNoParameter(PARAMETER_PULLING_PLANE, DEFAULT_PULLING_PLANE))
{
potential.destroy = &destroy;
sprintf(potential.name, "Pulling-Plane potential");
potentials[potentialsCount] = &potential;
potentialsCount++;
init();
char pullProtocol[PARAMETER_LENGTH];
getMaskedParameter(pullProtocol, PARAMETER_PULLING_PLANE_PROTOCOL);
if(strcmp(pullProtocol, PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FCONST) == 0)
{
potential.compute = &computePlanePulling;
potentialData.pullForce = getFloatParameter(PARAMETER_PULLING_PLANE_PULLFORCE, 0, 0);
LOG << "Constant Force Plane pulling will be performed. Pulling force value is set to " << potentialData.pullForce << "pN.";
//convert pN to kJ/(mol*nm)
potentialData.pullForce = 0.6*potentialData.pullForce;
potentialData.pullSpring = 0;
potentialData.pullSpeed = 0;
sprintf(planeLocationUpdater.name, "Pulling-Plane Constant Force updater");
planeLocationUpdater.update = updatePlaneLocation;
planeLocationUpdater.destroy = destroyPlaneLocationUpdater;
planeLocationUpdater.frequency = getIntegerParameter(PARAMETER_PULLING_PLANE_UPDATE_FREQ);
updaters[updatersCount] = &planeLocationUpdater;
updatersCount ++;
}
else
if(strcmp(pullProtocol, PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FRAMP) == 0)
{
potential.compute = &computePlanePulling;
potentialData.pullForce = 0;
potentialData.pullSpring = getFloatParameter(PARAMETER_PULLING_PLANE_PULLSPRING, 0, 0);
potentialData.pullSpeed = getFloatParameter(PARAMETER_PULLING_PLANE_PULLSPEED, 0, 0);
LOG << "Constant Velocity Plane pulling will be performed. Pulling speed is set to " << potentialData.pullSpeed << " um/s, spring constant is set to " << potentialData.pullSpring << " pN/nm.";
//convert um/s to nm/ps
potentialData.pullSpeed = potentialData.pullSpeed / 1000000000.0;
//convert pN/nm to kJ/(mol*nm^2)
potentialData.pullSpring = 0.6*potentialData.pullSpring;
sprintf(planeLocationUpdater.name, "Pulling-Plane Constant Velocity updater");
planeLocationUpdater.update = updatePlaneLocation;
planeLocationUpdater.destroy = destroyPlaneLocationUpdater;
planeLocationUpdater.frequency = getIntegerParameter(PARAMETER_PULLING_PLANE_UPDATE_FREQ);
updaters[updatersCount] = &planeLocationUpdater;
updatersCount ++;
}
else
{
DIE("Pulling-Plane protocol parameter should be either %s for constant force pulling or %s for force-ramp (constant speed) pulling.",
PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FCONST,
PARAMETER_VALUE_PULLING_PLANE_PROTOCOL_FRAMP);
}
cudaMemcpy(potentialData.d_workList, potentialData.h_workList, potentialData.Ntotal*sizeof(WorkList), cudaMemcpyHostToDevice);
cudaMemcpy(potentialData.d_force, potentialData.h_force, potentialData.Ntotal*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(potentialData.d_planeDisplacement, potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_potentialData, &potentialData, sizeof(PotentialData), 0, cudaMemcpyHostToDevice);
//init output
allocateCPU((void**)&outputFilename, parameters.Ntr*sizeof(char*));
char trajnum[10];
int traj;
for(traj = 0; traj < parameters.Ntr; traj++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
outputFilename[traj] = (char*)calloc(PARAMETER_LENGTH, sizeof(char));
getMaskedParameterWithReplacement(outputFilename[traj], PARAMETER_PULLING_PLANE_OUTPUT_FILE, trajnum, "<run>");
outputFile = safe_fopen(outputFilename[traj], "w");
fclose(outputFile);
}
outputFreq = getIntegerParameter(PARAMETER_PULLING_PLANE_OUTPUT_FREQ);
logOutputFreq = getIntegerParameter(PARAMETER_ENERGYOUTPUT_FREQ);
planePullingGridSize = potentialData.Ntotal/BLOCK_SIZE + 1;
planePullingBlockSize = BLOCK_SIZE;
LOG << "Done initializing Pulling-Plane potential.";
}
else
{
LOG << "No Plane pulling will be performed.";
}
}
void init()
{
LOG << "Initializing Pulling-Plane potential...";
//1tr
//allocateCPU((void**)&potentialData.prc, gsystem.N*sizeof(int));
//ntr
allocateCPU((void**)&potentialData.prc, gsystem.Ntot*sizeof(int));
potentialData.Npulled = 0;
potentialData.Nfixed = 0;
int traj, i, itot;
char refFilename[100];
char trajnum[10];
PDB refFile;
//1tr
//traj = 0;
//if(parameters.Ntr != 1)
// DIE("Pulling-Plane protocol can not yet perform pulling for more than 1 trajectory.");
//ntr
for(traj = 0; traj < parameters.Ntr; traj ++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
getMaskedParameterWithReplacement(refFilename, PARAMETER_PULLING_PLANE_REFFILE, trajnum, "<run>");
readPDB(refFilename, &refFile);
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
if(refFile.atoms[i].beta > 0)
{
potentialData.prc[itot] = PLANE_ATOM_FIXED;
potentialData.Nfixed++;
}
else
if(refFile.atoms[i].occupancy > 0)
{
potentialData.prc[itot] = PLANE_ATOM_PULLED;
potentialData.Npulled++;
}
else
{
potentialData.prc[itot] = PLANE_ATOM_FREE;
}
}
}
if(potentialData.Npulled + potentialData.Nfixed != 0)
{
potentialData.Ntotal = potentialData.Npulled + potentialData.Nfixed;
LOG << "Total amount of atoms, included in Plane-pulling worklist is " << potentialData.Ntotal << ".";
potentialData.Nplane = 2*parameters.Ntr;
LOG << "Total amount of planes is " << potentialData.Nplane << ".";
getVectorParameter(PARAMETER_PULLING_PLANE_NORM, &potentialData.planeNorm.x, &potentialData.planeNorm.y, &potentialData.planeNorm.z, 0, 0, 0, 0);
potentialData.planeNorm.x = potentialData.planeNorm.x / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
potentialData.planeNorm.y = potentialData.planeNorm.y / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
potentialData.planeNorm.z = potentialData.planeNorm.z / sqrt(potentialData.planeNorm.x*potentialData.planeNorm.x + potentialData.planeNorm.y*potentialData.planeNorm.y + potentialData.planeNorm.z*potentialData.planeNorm.z);
LOG << "Plane normal is set to (" << potentialData.planeNorm.x << " " << potentialData.planeNorm.y << " " << potentialData.planeNorm.z << ").";
getVectorParameter(PARAMETER_PULLING_PLANE_POINT, &potentialData.planePoint.x, &potentialData.planePoint.y, &potentialData.planePoint.z, 0, 0, 0, 0);
LOG << "Plane point is set to (" << potentialData.planePoint.x << " " << potentialData.planePoint.y << " " << potentialData.planePoint.z << ").";
potentialData.planeMass = getFloatParameter(PARAMETER_PULLING_PLANE_MASS, 0, 0);
LOG << "Plane mass is set to " << potentialData.planeMass << ".";
potentialData.fixSpring = getFloatParameter(PARAMETER_PULLING_PLANE_FIXSPRING, 0, 0);
LOG << "Plane fixing spring is set to " << potentialData.fixSpring << " pN/nm.";
potentialData.fixSpring = 0.6*potentialData.fixSpring;
allocateCPU((void**)&potentialData.h_force, potentialData.Ntotal*sizeof(float));
allocateGPU((void**)&potentialData.d_force, potentialData.Ntotal*sizeof(float));
for(i = 0; i < potentialData.Ntotal; i++)
{
potentialData.h_force[i] = 0;
}
allocateCPU((void**)&potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float));
allocateGPU((void**)&potentialData.d_planeDisplacement, potentialData.Nplane*sizeof(float));
for(i = 0; i < potentialData.Nplane; i++)
{
potentialData.h_planeDisplacement[i] = 0;
}
allocateCPU((void**)&potentialData.h_workList, potentialData.Ntotal*sizeof(WorkList));
allocateGPU((void**)&potentialData.d_workList, potentialData.Ntotal*sizeof(WorkList));
initWorkList();
}
else
{
DIE("No atoms were selected for Pulling-Plane protocol, while it was switched on.");
}
}
void initWorkList()
{
int i, j, traj, itot;
float4 d;
j = 0;
//1tr
//traj = 0;
//ntr
for(traj = 0; traj < parameters.Ntr; traj++)
{
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
if(potentialData.prc[i] == PLANE_ATOM_FIXED)
{
DPRINTF("Atom %d-%d\t(%s%d-%s) in trajectory %d will be connected to the fixed plane.\n",
itot, i, topology.atoms[i].resName, topology.atoms[i].resid, topology.atoms[i].name, traj+parameters.firstrun);
LOG << "Atom " << itot << "-" << i << "\t(" << topology.atoms[i].resName << topology.atoms[i].resid << " - " << topology.atoms[i].name << ") " << " will be connected to the fixed plane.";
potentialData.h_workList[j].atomID = itot;
potentialData.h_workList[j].planeID = traj*2;
j++;
}
if(potentialData.prc[i] == PLANE_ATOM_PULLED)
{
DPRINTF("Atom %d-%d\t(%s%d-%s) in trajectory %d will be connected to the pulled plane.\n",
itot, i, topology.atoms[i].resName, topology.atoms[i].resid, topology.atoms[i].name, traj+parameters.firstrun);
LOG << "Atom " << itot << "-" << i << "\t(" << topology.atoms[i].resName << topology.atoms[i].resid << " - " << topology.atoms[i].name << ") " << " will be connected to the pulled plane.";
potentialData.h_workList[j].atomID = itot;
potentialData.h_workList[j].planeID = traj*2 + 1;
j++;
}
}
}
if(getYesNoParameter(PARAMETER_PULLING_PLANE_USE_PDB, 0))
{
float3* refCoords = (float3*)calloc(gsystem.Ntot, sizeof(float3));
PDB refPDB;
char trajnum[10];
char* refPDBFilename = (char*)calloc(PARAMETER_LENGTH, sizeof(char));
for(traj = 0; traj < parameters.Ntr; traj++)
{
sprintf(trajnum, "%d", traj + parameters.firstrun);
getMaskedParameterWithReplacement(refPDBFilename, PARAMETER_PULLING_PLANE_COORDS_PDB, trajnum, "<run>");
readPDB(refPDBFilename, &refPDB);
for(i = 0; i < gsystem.N; i++)
{
itot = i + traj*gsystem.N;
refCoords[itot].x = refPDB.atoms[i].x;
refCoords[itot].y = refPDB.atoms[i].y;
refCoords[itot].z = refPDB.atoms[i].z;
}
}
for(i = 0; i < potentialData.Ntotal; i++)
{
j = potentialData.h_workList[i].atomID;
d.x = potentialData.planePoint.x - refCoords[j].x;
d.y = potentialData.planePoint.y - refCoords[j].y;
d.z = potentialData.planePoint.z - refCoords[j].z;
potentialData.h_workList[i].bDistance = d.x*potentialData.planeNorm.x + d.y*potentialData.planeNorm.y + d.z*potentialData.planeNorm.z;
}
} else {
for(i = 0; i < potentialData.Ntotal; i++)
{
j = potentialData.h_workList[i].atomID;
d.x = potentialData.planePoint.x - gsystem.h_coord[j].x;
d.y = potentialData.planePoint.y - gsystem.h_coord[j].y;
d.z = potentialData.planePoint.z - gsystem.h_coord[j].z;
potentialData.h_workList[i].bDistance = d.x*potentialData.planeNorm.x + d.y*potentialData.planeNorm.y + d.z*potentialData.planeNorm.z;
}
}
}
__global__ void computePlanePulling_kernel()
{
int d_i = blockIdx.x*blockDim.x + threadIdx.x;
if(d_i < c_potentialData.Ntotal)
{
int atomID = c_potentialData.d_workList[d_i].atomID;
int planeID = c_potentialData.d_workList[d_i].planeID;
float4 coord = tex1Dfetch(t_coord, atomID);
float4 currentpoint = c_potentialData.planePoint;
float4 norm = c_potentialData.planeNorm;
//here dis is a plane displacement
float dis = c_potentialData.d_planeDisplacement[planeID];
//calculating current plane position
currentpoint.x += dis*norm.x;
currentpoint.y += dis*norm.y;
currentpoint.z += dis*norm.z;
//here dis is the distance between atom and plane
dis = (currentpoint.x - coord.x)*norm.x + (currentpoint.y - coord.y)*norm.y + (currentpoint.z - coord.z)*norm.z;
//here dis is atom's displacement from its balanced state
dis = dis - c_potentialData.d_workList[d_i].bDistance;
//here dis becomes a force value
dis = dis*c_potentialData.fixSpring;
//here we write the force value to use it for integrating plane movement
c_potentialData.d_force[d_i] += dis;
//here we add the forces to all the other forces
//we use currentpoint to store them
currentpoint = c_gsystem.d_forces[atomID];
currentpoint.x += dis*norm.x;
currentpoint.y += dis*norm.y;
currentpoint.z += dis*norm.z;
c_gsystem.d_forces[atomID] = currentpoint;
}
}
inline void computePlanePulling()
{
computePlanePulling_kernel<<<planePullingGridSize, planePullingBlockSize>>>();
}
void updatePlaneLocation()
{
//float4 force;
cudaMemcpy(potentialData.h_force, potentialData.d_force, potentialData.Ntotal*sizeof(float),cudaMemcpyDeviceToHost);
float force, pullforce, atomforce;
int i, j, traj;
traj = 0;
for(i = 1; i < potentialData.Nplane; i = i + 2)
{
force = 0;
atomforce = 0;
for(j = 0; j < potentialData.Ntotal; j++)
{
if(potentialData.h_workList[j].planeID == i)
{
atomforce += potentialData.h_force[j];
potentialData.h_force[j] = 0;
}
}
//here take - instead of +, because this is a force applied to the plane
//LOG << -force*potentialData.planeNorm.x/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.y/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.z/planeLocationUpdater.frequency <<" ";
//LOG << "Plane " << i << ": atomic force increased to\t"
// << -force*potentialData.planeNorm.x/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.y/planeLocationUpdater.frequency <<" "<< -force*potentialData.planeNorm.z/planeLocationUpdater.frequency <<" ";
//compute force for CF
if(potentialData.pullForce != 0)
{
atomforce = -atomforce/planeLocationUpdater.frequency;
force = atomforce + potentialData.pullForce;
}
//compute force for CV
else
{
pullforce = potentialData.pullSpring*(potentialData.pullSpeed*step*integrator->h - potentialData.h_planeDisplacement[i]);
atomforce = -atomforce/planeLocationUpdater.frequency;
force = atomforce + pullforce;
}
//here force somehow becomes speed
force = force/potentialData.planeMass;
//compute current displacement
potentialData.h_planeDisplacement[i] += force*planeLocationUpdater.frequency*integrator->h;
//LOG << "Plane " << i << ": relocating point position to\t"
// << potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.x*potentialData.h_planeDisplacement[i] << ") "
// << potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.y*potentialData.h_planeDisplacement[i] << ") "
// << potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i] << "(" << potentialData.planeNorm.z*potentialData.h_planeDisplacement[i] << ").";
outputFile = safe_fopen(outputFilename[traj], "a");
if(potentialData.pullForce != 0)
{
if(step%outputFreq == 0 && step != 0)
{
fprintf(outputFile, "%lld\t%f\t%f\t%f\t%f\t%f\n",
step,
potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
potentialData.h_planeDisplacement[i],
atomforce/0.6);
if((step%logOutputFreq - outputFreq == 0) && traj == 0)
{
printf("Pulling-plane CF output:\n");
printf("%*s%*s%*s%*s%*s%*s%*s\n",
ENERGY_OUTPUT_WIDTH, "Step",
ENERGY_OUTPUT_WIDTH, "Plane",
ENERGY_OUTPUT_WIDTH, "X",
ENERGY_OUTPUT_WIDTH, "Y",
ENERGY_OUTPUT_WIDTH, "Z",
ENERGY_OUTPUT_WIDTH, "Displacement",
ENERGY_OUTPUT_WIDTH, "Atom Force");
}
printf("%*lld%*d%*f%*f%*f%*f%*f\n",
ENERGY_OUTPUT_WIDTH, step,
ENERGY_OUTPUT_WIDTH, i,
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, atomforce/0.6);
}
}
else
{
if(step%outputFreq == 0 && step != 0)
{
fprintf(outputFile, "%lld\t%f\t%f\t%f\t%f\t%f\t%f\n",
step,
potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
potentialData.h_planeDisplacement[i],
atomforce/0.6,
pullforce/0.6);
if((step%logOutputFreq - outputFreq == 0) && traj == 0)
{
printf("Pulling-plane CV output:\n");
printf("%*s%*s%*s%*s%*s%*s%*s%*s\n",
ENERGY_OUTPUT_WIDTH, "Step",
ENERGY_OUTPUT_WIDTH, "Plane",
ENERGY_OUTPUT_WIDTH, "X",
ENERGY_OUTPUT_WIDTH, "Y",
ENERGY_OUTPUT_WIDTH, "Z",
ENERGY_OUTPUT_WIDTH, "Displacement",
ENERGY_OUTPUT_WIDTH, "Atom Force",
ENERGY_OUTPUT_WIDTH, "Pull Force");
}
printf("%*lld%*d%*f%*f%*f%*f%*f%*f\n",
ENERGY_OUTPUT_WIDTH, step,
ENERGY_OUTPUT_WIDTH, i,
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.x + potentialData.planeNorm.x*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.y + potentialData.planeNorm.y*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.planePoint.z + potentialData.planeNorm.z*potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, potentialData.h_planeDisplacement[i],
ENERGY_OUTPUT_WIDTH, atomforce/0.6,
ENERGY_OUTPUT_WIDTH, pullforce/0.6);
}
}
fclose(outputFile);
traj++;
}
cudaMemcpy(potentialData.d_force, potentialData.h_force, potentialData.Ntotal*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(potentialData.d_planeDisplacement, potentialData.h_planeDisplacement, potentialData.Nplane*sizeof(float),cudaMemcpyHostToDevice);
}
void destroyPlaneLocationUpdater()
{
}
void destroy()
{
}
#undef LOG
} //namespace pulling_plane_potential
|
a7d1fa5a7df5967de7d5e67cb36924861f385f3b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ int counter;
__host__ __device__ void fun() {
++counter;
}
__global__ void printk() {
fun();
printf("printk (after fun): %d\n", counter);
}
int main() {
//counter = 0;
//printf("main: %d\n", counter);
hipLaunchKernelGGL(( printk) , dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
//fun();
//printf("main (after fun): %d\n", counter);
return 0;
}
|
a7d1fa5a7df5967de7d5e67cb36924861f385f3b.cu
|
#include <cuda.h>
#include <stdio.h>
__device__ int counter;
__host__ __device__ void fun() {
++counter;
}
__global__ void printk() {
fun();
printf("printk (after fun): %d\n", counter);
}
int main() {
//counter = 0;
//printf("main: %d\n", counter);
printk <<<1, 1>>>();
cudaDeviceSynchronize();
//fun();
//printf("main (after fun): %d\n", counter);
return 0;
}
|
2ac04251ee73050ebcd3c5eca42e1d98ec96ae5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor csr = _to_csr_int(rowIndices, m, nnz);
Tensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
int64_t* mat_el_end_indices_device;
hipMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
);
hipFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministicAlgorithms();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
2ac04251ee73050ebcd3c5eca42e1d98ec96ae5b.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor csr = _to_csr_int(rowIndices, m, nnz);
Tensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
int64_t* mat_el_end_indices_device;
cudaMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
);
cudaFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministicAlgorithms();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
d0eba117fd73d23bf6c7bbfc7420ef28e303ccad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#include <cmath>
#include <cstdlib>
#include<hiprand/hiprand.h>
#include<hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
using namespace std;
__global__ void setup_kernel ( hiprandState_t * state, unsigned long seed )
{
unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
hiprand_init ( seed, id, 0, &state[id] );
}
__device__ float generate( hiprandState_t* globalState, int ind )
{
//int ind = threadIdx.x;
hiprandState_t localState = globalState[ind];
float RANDOM = hiprand_uniform( &localState );
globalState[ind] = localState;
return RANDOM;
}
__device__ float expo(double mu, hiprandState_t* globalState, int ind)
{
double u;
do{
u = generate(globalState, ind);
} while(u==1.0);
return -log(1- u) * mu;
}
__global__ void model(double *dd, double *pos, double *dmu, double *dh, hiprandState_t* globalState)
{
unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
double mu = *dmu;
double h = *dh;
double d = *dd;
double pi = 3.1416;
double x=0, y=0; //
double l; //
//int n = 0;
double Theta = 0; //
double dTh;
int n = 0;
while (n<10)
{
l = expo(mu, globalState, id);
y = y + l*cos(Theta);
x = x + l*sin(Theta);
if(y>=d) break;
if(y<0)
{x = h*10;
break;}
if(x<-h) break;
if(y>h) break;
dTh = generate(globalState, id)*(4*pi)-(2*pi);
Theta = Theta + dTh;
n++;
}
//printf("position: %f \n", x);
pos[id] = x;
}
int main() {
int N = 1; //
//cin >> N;
double d; //
cin >> d;
double mu=0.07; //
double h = 100;
double *dmu, *dh, *dd;
//hipMalloc((void**)&dN, sizeof(int));
hipMalloc((void**)&dmu, sizeof(double));
hipMalloc((void**)&dh, sizeof(double));
hipMalloc((void**)&dd, sizeof(double));
//hipMemcpy(dN, &N, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dmu, &mu, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dh, &h, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dd, &d, sizeof(double), hipMemcpyHostToDevice);
double xpos[N];
double *pos;
hipMalloc((void**) &pos, N*sizeof(double));
hiprandState_t* devStates;
hipMalloc ( &devStates, N*sizeof( hiprandState_t ) );
hipLaunchKernelGGL((
setup_kernel) , dim3(1), dim3(1) , 0, 0, devStates,unsigned(time(NULL)) );
hipLaunchKernelGGL((
model) , dim3(10), dim3(1) , 0, 0, dd,pos,dmu,dh, devStates);
hipMemcpy(xpos, pos, N*sizeof(double), hipMemcpyDeviceToHost);
for (int j = 0; j<20; j++)
{
int n = 0;
for(int k = 0; k<N;k++){
if( (0.1*j -1)<=xpos[k] && (0.1*j -0.9) > xpos[k]) n++;
}
cout<<n<<endl;
}
//
ofstream f;
f.open("E:\\data.txt");
for (int i = 0; i < N; i++) f << xpos[i] << endl;
f.close();
return 0;}
|
d0eba117fd73d23bf6c7bbfc7420ef28e303ccad.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#include <cmath>
#include <cstdlib>
#include<curand.h>
#include<curand_kernel.h>
#include <cuda.h>
#include <stdio.h>
using namespace std;
__global__ void setup_kernel ( curandState * state, unsigned long seed )
{
unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
__device__ float generate( curandState* globalState, int ind )
{
//int ind = threadIdx.x;
curandState localState = globalState[ind];
float RANDOM = curand_uniform( &localState );
globalState[ind] = localState;
return RANDOM;
}
__device__ float expo(double mu, curandState* globalState, int ind)
{
double u;
do{
u = generate(globalState, ind);
} while(u==1.0);
return -log(1- u) * mu;
}
__global__ void model(double *dd, double *pos, double *dmu, double *dh, curandState* globalState)
{
unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
double mu = *dmu;
double h = *dh;
double d = *dd;
double pi = 3.1416;
double x=0, y=0; //початкові координати частинки
double l; //відстань прольоту частинки між актами взаємодії
//int n = 0;
double Theta = 0; //напрям руху частинки
double dTh;
int n = 0;
while (n<10)
{
l = expo(mu, globalState, id);
y = y + l*cos(Theta);
x = x + l*sin(Theta);
if(y>=d) break;
if(y<0)
{x = h*10;
break;}
if(x<-h) break;
if(y>h) break;
dTh = generate(globalState, id)*(4*pi)-(2*pi);
Theta = Theta + dTh;
n++;
}
//printf("position: %f \n", x);
pos[id] = x;
}
int main() {
int N = 1; //кількість частинок
//cin >> N;
double d; //товщина пластинки
cin >> d;
double mu=0.07; //довжина вільного пробігу
double h = 100;
double *dmu, *dh, *dd;
//cudaMalloc((void**)&dN, sizeof(int));
cudaMalloc((void**)&dmu, sizeof(double));
cudaMalloc((void**)&dh, sizeof(double));
cudaMalloc((void**)&dd, sizeof(double));
//cudaMemcpy(dN, &N, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dmu, &mu, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dh, &h, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dd, &d, sizeof(double), cudaMemcpyHostToDevice);
double xpos[N];
double *pos;
cudaMalloc((void**) &pos, N*sizeof(double));
curandState* devStates;
cudaMalloc ( &devStates, N*sizeof( curandState ) );
setup_kernel <<< 1, 1 >>> ( devStates,unsigned(time(NULL)) );
model <<< 10, 1 >>> (dd,pos,dmu,dh, devStates);
cudaMemcpy(xpos, pos, N*sizeof(double), cudaMemcpyDeviceToHost);
for (int j = 0; j<20; j++)
{
int n = 0;
for(int k = 0; k<N;k++){
if( (0.1*j -1)<=xpos[k] && (0.1*j -0.9) > xpos[k]) n++;
}
cout<<n<<endl;
}
//запис в файл
ofstream f;
f.open("E:\\data.txt");
for (int i = 0; i < N; i++) f << xpos[i] << endl;
f.close();
return 0;}
|
651b2f44a303c27cd87416865453146c4907a375.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file alg4.cu
* @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 4
* @author Rodolfo Lima
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <cmath>
#include <cstdio>
#include <cfloat>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <util.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <alg4.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Utilities ----------------------------------------------------------------
template <class T>
__device__ inline void swap(T& a, T& b) {
T c = a;
a = b;
b = c;
}
__device__ float2 operator + ( const float2 &a,
const float2 &b ) {
return make_float2(a.x+b.x, a.y+b.y);
}
__device__ float2& operator += ( float2& a,
const float2& b ) {
a.x += b.x;
a.y += b.y;
return a;
}
__device__ float2 operator * ( const float2& a,
float b ) {
return make_float2(a.x*b, a.y*b);
}
__device__ float2 operator * ( float a,
const float2& b ) {
return b*a;
}
__device__ float2 operator / ( const float2& a,
float b ) {
return make_float2(a.x/b, a.y/b);
}
__device__ float2 mul2x2( const float2& v,
Matrix<float,2,2> mat) {
return make_float2(v.x*mat[0][0] + v.y*mat[1][0],
v.x*mat[0][1] + v.y*mat[1][1]);
}
//-- Algorithm 4_2 Stage 1 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage1( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty+.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for pybar and ezhat to have
// coalesced memory accesses. This is the index for these
// transposed buffers.
g_transp_pybar += m*c_carry_height + n*WS + tx;
g_transp_ezhat += m*c_carry_height + n*WS + tx;
__syncthreads();
float2 prev; // .x -> p0, .y -> p1
if (ty < 2)
{
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
prev = make_float2(0,*bdata++);
#pragma unroll
for (int j=1; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (m < c_m_size-1)
*g_transp_pybar = prev*c_b0;
if (m > 0)
{
// calculate ezhat, scan right -> left
prev = make_float2(*--bdata, 0);
--bdata;
#pragma unroll
for (int j=WS-2; j>=0; --j, --bdata)
{
*bdata = prev.y = *bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_transp_ezhat = prev*(c_b0*c_b0);
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 or Stage 5 and 6 -----------------------------
__device__
void alg4_stage2_3v5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y;
__shared__ float2 s_transp_block[DW][WS];
float2 *bdata = &s_transp_block[ty][tx];
// P(ybar) -> P(y) processing --------------------------------------
float2 *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx;
// first column-block
// read P(ybar)
*bdata = *transp_pybar;
float2 py; // P(Y), .x = p0, .y = p1
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
py = **bdata++;
#pragma unroll
for (int m=1; m<blockDim.y; ++m, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
// write P(y)
if (ty > 0) // first one doesn't need fixing
*transp_pybar = *bdata;
transp_pybar += c_carry_height*blockDim.y;
// middle column-blocks
int m = blockDim.y;
if (m == DW)
{
int mmax = c_m_size-(c_m_size%DW)-1;
for (; m<mmax; m+=DW)
{
*bdata = *transp_pybar;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
*transp_pybar = *bdata;
transp_pybar += c_carry_height*DW;
}
}
// remaining column-blocks
if (m < c_m_size-1)
{
if (m+ty < c_m_size-1)
*bdata = *transp_pybar;
int remaining = c_m_size-1 - m;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
if (m+ty < c_m_size-1)
*transp_pybar = *bdata;
}
// E(zhat) -> E(z) processing --------------------------------------
int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx;
const float2 *transp_pm1y = g_transp_pybar + idx - c_carry_height;
// last column-block
float2 *transp_ezhat = g_transp_ezhat + idx;
m = c_m_size-1;
// all pybars must be updated!
__syncthreads();
float2 ez;
if (m-ty > 0)
{
*bdata = *transp_ezhat;
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
ez = **bdata++;
for (int dm=1; dm<blockDim.y; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= c_carry_height*blockDim.y;
transp_pm1y -= c_carry_height*blockDim.y;
// middle column-blocks
m = c_m_size-1 - blockDim.y;
if (blockDim.y == DW)
{
int mmin = c_m_size%DW;
for (; m>=mmin; m-=DW)
{
if (m > 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= DW*c_carry_height;
transp_pm1y -= DW*c_carry_height;
}
}
// remaining column-blocks
if (m > 0)
{
int remaining = m+1;
if (m-ty >= 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
}
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
if (m-ty > 0)
*transp_ezhat = *bdata;
}
}
//-- Algorithm 4_2 Stage 4 or Stage 7 -----------------------------------------
template <bool p_fusion>
__device__
void alg4_stage4v7( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for py and ez to have coalesced
// memory accesses. This is the index for these transposed
// buffers.
g_transp_py += (m-1)*c_carry_height + n*WS + tx;
g_transp_ez += (m+1)*c_carry_height + n*WS + tx;
__syncthreads();
if (ty < 2)
{
float2 prev; // .x -> p0, .y -> p1
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
if (m > 0)
prev = *g_transp_py * c_inv_b0;
else
prev = make_float2(0,0);
#pragma unroll
for (int j=0; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
--bdata;
// calculate ez, scan right -> left
if (m < c_m_size-1)
prev = *g_transp_ez;
else
prev = make_float2(0,0);
float b0_2 = c_b0*c_b0;
// For some reason it's faster when this is here then inside
// the next if block
int x = (m-c_border+1)*WS-1;
int y = (n-c_border)*WS+tx;
// current block intersects transp_out's area?
if (m >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n)
{
// image's end is in the middle of the block and we're outside
// the image width?
if (x >= c_width)
{
// process data until we get into the image
int j;
#pragma unroll
for (j=x; j>=c_width; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
// now we're inside the image, we must write to transp_out
float *out = g_transp_out + (c_width-1)*out_stride + y;
int mmin = x-(WS-1);
#pragma unroll
for (;j>=mmin; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
else
{
float *out = g_transp_out + x*out_stride + y;
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
}
else
{
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
}
if (p_fusion)
{
g_pubar += n*c_carry_width + m*WS + tx;
g_evhat += n*c_carry_width + m*WS + tx;
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx];
// calculate pubar, scan left -> right
float2 prev = make_float2(0,**bdata++);
#pragma unroll
for (int i=1; i<WS; ++i, ++bdata)
{
**bdata = prev.x = **bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (n < c_n_size-1)
*g_pubar = prev*c_b0;
if (n > 0)
{
// calculate evhat, scan right -> left
prev = make_float2(**--bdata, 0);
--bdata;
#pragma unroll
for (int i=WS-2; i>=0; --i, --bdata)
{
prev.y = **bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_evhat = prev*b0_2;
}
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage2_3( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 4 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage4( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride ) {
alg4_stage4v7<true>( g_transp_out, g_transp_py, g_transp_ez, g_pubar,
g_evhat, out_stride );
}
//-- Algorithm 4_2 Stage 5 and 6 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 7 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage7( float *g_out,
float2 *g_transp_py,
float2 *g_transp_ez,
int out_stride ) {
alg4_stage4v7<false>( g_out, g_transp_py, g_transp_ez, 0, 0,
out_stride );
}
//-- Host ---------------------------------------------------------------------
__host__
inline int transp_out_height( const int& h ) {
// hipBindTexture2D chokes when memory block stride isn't
// multiple of 256 bytes, let's add some padding.
return ((h+WS-1)/WS)*WS;
}
__host__
void prepare_alg4( alg_setup& algs,
alg_setup& algs_transp,
dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
hipArray *& a_in,
const float *h_in,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
up_constants_coefficients2( b0, a1, a2 );
calc_alg_setup( algs, w, h, extb );
calc_alg_setup( algs_transp, h, w, extb );
d_out.resize( w * h );
d_transp_out.resize( transp_out_height(h) * w );
d_transp_pybar.resize( algs.m_size * algs.carry_height );
d_transp_ezhat.resize( algs.m_size * algs.carry_height );
d_pubar.resize( algs.n_size * algs.carry_width );
d_evhat.resize( algs.n_size * algs.carry_width );
d_transp_pybar.fill_zero();
d_transp_ezhat.fill_zero();
d_pubar.fill_zero();
d_evhat.fill_zero();
up_texture( a_in, h_in, w, h, ic );
}
__host__
void alg4( dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
const hipArray *a_in,
const alg_setup& algs,
const alg_setup& algs_transp )
{
dvector<float2> d_transp_py, d_transp_ez, d_pu, d_ev;
hipBindTextureToArray( t_in, a_in );
up_alg_setup( algs );
hipLaunchKernelGGL(( alg4_stage1),
dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0,
d_transp_pybar, d_transp_ezhat );
hipLaunchKernelGGL(( alg4_stage2_3),
dim3(dim3(1, algs.n_size)), dim3(dim3(WS, std::min<int>(algs.m_size, DW))) , 0, 0,
d_transp_pybar, d_transp_ezhat );
swap( d_transp_pybar, d_transp_py );
swap( d_transp_ezhat, d_transp_ez );
hipLaunchKernelGGL(( alg4_stage4),
dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0,
d_transp_out, d_transp_py, d_transp_ez, d_pubar, d_evhat,
transp_out_height(algs.height) );
up_alg_setup( algs_transp );
hipLaunchKernelGGL(( alg4_stage5_6),
dim3(dim3(1, algs.m_size)), dim3(dim3(WS, std::min<int>(algs.n_size, DW))) , 0, 0,
d_pubar, d_evhat );
swap( d_pubar, d_pu );
swap( d_evhat, d_ev );
hipUnbindTexture( t_in );
size_t offset;
hipBindTexture2D( &offset, t_in, d_transp_out, algs.height, algs.width,
transp_out_height(algs.height)*sizeof(float) );
hipLaunchKernelGGL(( alg4_stage7),
dim3(dim3((algs.n_size+2-1)/2, algs.m_size)), dim3(dim3(WS, DW)) , 0, 0,
d_out, d_pu, d_ev, algs.width );
swap( d_ev, d_evhat );
swap( d_pu, d_pubar );
swap( d_transp_ez, d_transp_ezhat );
swap( d_transp_py, d_transp_pybar );
hipUnbindTexture( t_in );
}
__host__
void alg4( float *h_inout,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
alg_setup algs, algs_transp;
dvector<float> d_out, d_transp_out;
dvector<float2> d_transp_pybar, d_transp_ezhat, d_pubar, d_evhat;
hipArray *a_in;
prepare_alg4( algs, algs_transp, d_out, d_transp_out, d_transp_pybar,
d_transp_ezhat, d_pubar, d_evhat, a_in, h_inout, w, h,
b0, a1, a2, extb, ic );
alg4( d_out, d_transp_out, d_transp_pybar, d_transp_ezhat, d_pubar,
d_evhat, a_in, algs, algs_transp );
d_out.copy_to( h_inout, w * h );
hipFreeArray( a_in );
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
// vi: ai ts=4 sw=4
|
651b2f44a303c27cd87416865453146c4907a375.cu
|
/**
* @file alg4.cu
* @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 4
* @author Rodolfo Lima
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <cmath>
#include <cstdio>
#include <cfloat>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <util.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <alg4.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Utilities ----------------------------------------------------------------
template <class T>
__device__ inline void swap(T& a, T& b) {
T c = a;
a = b;
b = c;
}
__device__ float2 operator + ( const float2 &a,
const float2 &b ) {
return make_float2(a.x+b.x, a.y+b.y);
}
__device__ float2& operator += ( float2& a,
const float2& b ) {
a.x += b.x;
a.y += b.y;
return a;
}
__device__ float2 operator * ( const float2& a,
float b ) {
return make_float2(a.x*b, a.y*b);
}
__device__ float2 operator * ( float a,
const float2& b ) {
return b*a;
}
__device__ float2 operator / ( const float2& a,
float b ) {
return make_float2(a.x/b, a.y/b);
}
__device__ float2 mul2x2( const float2& v,
Matrix<float,2,2> mat) {
return make_float2(v.x*mat[0][0] + v.y*mat[1][0],
v.x*mat[0][1] + v.y*mat[1][1]);
}
//-- Algorithm 4_2 Stage 1 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage1( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty+.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for pybar and ezhat to have
// coalesced memory accesses. This is the index for these
// transposed buffers.
g_transp_pybar += m*c_carry_height + n*WS + tx;
g_transp_ezhat += m*c_carry_height + n*WS + tx;
__syncthreads();
float2 prev; // .x -> p0, .y -> p1
if (ty < 2)
{
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
prev = make_float2(0,*bdata++);
#pragma unroll
for (int j=1; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (m < c_m_size-1)
*g_transp_pybar = prev*c_b0;
if (m > 0)
{
// calculate ezhat, scan right -> left
prev = make_float2(*--bdata, 0);
--bdata;
#pragma unroll
for (int j=WS-2; j>=0; --j, --bdata)
{
*bdata = prev.y = *bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_transp_ezhat = prev*(c_b0*c_b0);
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 or Stage 5 and 6 -----------------------------
__device__
void alg4_stage2_3v5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y;
__shared__ float2 s_transp_block[DW][WS];
float2 *bdata = &s_transp_block[ty][tx];
// P(ybar) -> P(y) processing --------------------------------------
float2 *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx;
// first column-block
// read P(ybar)
*bdata = *transp_pybar;
float2 py; // P(Y), .x = p0, .y = p1
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
py = **bdata++;
#pragma unroll
for (int m=1; m<blockDim.y; ++m, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
// write P(y)
if (ty > 0) // first one doesn't need fixing
*transp_pybar = *bdata;
transp_pybar += c_carry_height*blockDim.y;
// middle column-blocks
int m = blockDim.y;
if (m == DW)
{
int mmax = c_m_size-(c_m_size%DW)-1;
for (; m<mmax; m+=DW)
{
*bdata = *transp_pybar;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
*transp_pybar = *bdata;
transp_pybar += c_carry_height*DW;
}
}
// remaining column-blocks
if (m < c_m_size-1)
{
if (m+ty < c_m_size-1)
*bdata = *transp_pybar;
int remaining = c_m_size-1 - m;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
if (m+ty < c_m_size-1)
*transp_pybar = *bdata;
}
// E(zhat) -> E(z) processing --------------------------------------
int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx;
const float2 *transp_pm1y = g_transp_pybar + idx - c_carry_height;
// last column-block
float2 *transp_ezhat = g_transp_ezhat + idx;
m = c_m_size-1;
// all pybars must be updated!
__syncthreads();
float2 ez;
if (m-ty > 0)
{
*bdata = *transp_ezhat;
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
ez = **bdata++;
for (int dm=1; dm<blockDim.y; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= c_carry_height*blockDim.y;
transp_pm1y -= c_carry_height*blockDim.y;
// middle column-blocks
m = c_m_size-1 - blockDim.y;
if (blockDim.y == DW)
{
int mmin = c_m_size%DW;
for (; m>=mmin; m-=DW)
{
if (m > 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= DW*c_carry_height;
transp_pm1y -= DW*c_carry_height;
}
}
// remaining column-blocks
if (m > 0)
{
int remaining = m+1;
if (m-ty >= 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
}
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
if (m-ty > 0)
*transp_ezhat = *bdata;
}
}
//-- Algorithm 4_2 Stage 4 or Stage 7 -----------------------------------------
template <bool p_fusion>
__device__
void alg4_stage4v7( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for py and ez to have coalesced
// memory accesses. This is the index for these transposed
// buffers.
g_transp_py += (m-1)*c_carry_height + n*WS + tx;
g_transp_ez += (m+1)*c_carry_height + n*WS + tx;
__syncthreads();
if (ty < 2)
{
float2 prev; // .x -> p0, .y -> p1
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
if (m > 0)
prev = *g_transp_py * c_inv_b0;
else
prev = make_float2(0,0);
#pragma unroll
for (int j=0; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
--bdata;
// calculate ez, scan right -> left
if (m < c_m_size-1)
prev = *g_transp_ez;
else
prev = make_float2(0,0);
float b0_2 = c_b0*c_b0;
// For some reason it's faster when this is here then inside
// the next if block
int x = (m-c_border+1)*WS-1;
int y = (n-c_border)*WS+tx;
// current block intersects transp_out's area?
if (m >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n)
{
// image's end is in the middle of the block and we're outside
// the image width?
if (x >= c_width)
{
// process data until we get into the image
int j;
#pragma unroll
for (j=x; j>=c_width; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
// now we're inside the image, we must write to transp_out
float *out = g_transp_out + (c_width-1)*out_stride + y;
int mmin = x-(WS-1);
#pragma unroll
for (;j>=mmin; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
else
{
float *out = g_transp_out + x*out_stride + y;
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
}
else
{
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
}
if (p_fusion)
{
g_pubar += n*c_carry_width + m*WS + tx;
g_evhat += n*c_carry_width + m*WS + tx;
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx];
// calculate pubar, scan left -> right
float2 prev = make_float2(0,**bdata++);
#pragma unroll
for (int i=1; i<WS; ++i, ++bdata)
{
**bdata = prev.x = **bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (n < c_n_size-1)
*g_pubar = prev*c_b0;
if (n > 0)
{
// calculate evhat, scan right -> left
prev = make_float2(**--bdata, 0);
--bdata;
#pragma unroll
for (int i=WS-2; i>=0; --i, --bdata)
{
prev.y = **bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_evhat = prev*b0_2;
}
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage2_3( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 4 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage4( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride ) {
alg4_stage4v7<true>( g_transp_out, g_transp_py, g_transp_ez, g_pubar,
g_evhat, out_stride );
}
//-- Algorithm 4_2 Stage 5 and 6 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 7 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage7( float *g_out,
float2 *g_transp_py,
float2 *g_transp_ez,
int out_stride ) {
alg4_stage4v7<false>( g_out, g_transp_py, g_transp_ez, 0, 0,
out_stride );
}
//-- Host ---------------------------------------------------------------------
__host__
inline int transp_out_height( const int& h ) {
// cudaBindTexture2D chokes when memory block stride isn't
// multiple of 256 bytes, let's add some padding.
return ((h+WS-1)/WS)*WS;
}
__host__
void prepare_alg4( alg_setup& algs,
alg_setup& algs_transp,
dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
cudaArray *& a_in,
const float *h_in,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
up_constants_coefficients2( b0, a1, a2 );
calc_alg_setup( algs, w, h, extb );
calc_alg_setup( algs_transp, h, w, extb );
d_out.resize( w * h );
d_transp_out.resize( transp_out_height(h) * w );
d_transp_pybar.resize( algs.m_size * algs.carry_height );
d_transp_ezhat.resize( algs.m_size * algs.carry_height );
d_pubar.resize( algs.n_size * algs.carry_width );
d_evhat.resize( algs.n_size * algs.carry_width );
d_transp_pybar.fill_zero();
d_transp_ezhat.fill_zero();
d_pubar.fill_zero();
d_evhat.fill_zero();
up_texture( a_in, h_in, w, h, ic );
}
__host__
void alg4( dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
const cudaArray *a_in,
const alg_setup& algs,
const alg_setup& algs_transp )
{
dvector<float2> d_transp_py, d_transp_ez, d_pu, d_ev;
cudaBindTextureToArray( t_in, a_in );
up_alg_setup( algs );
alg4_stage1<<<
dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>(
d_transp_pybar, d_transp_ezhat );
alg4_stage2_3<<<
dim3(1, algs.n_size), dim3(WS, std::min<int>(algs.m_size, DW)) >>>(
d_transp_pybar, d_transp_ezhat );
swap( d_transp_pybar, d_transp_py );
swap( d_transp_ezhat, d_transp_ez );
alg4_stage4<<<
dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>(
d_transp_out, d_transp_py, d_transp_ez, d_pubar, d_evhat,
transp_out_height(algs.height) );
up_alg_setup( algs_transp );
alg4_stage5_6<<<
dim3(1, algs.m_size), dim3(WS, std::min<int>(algs.n_size, DW)) >>>(
d_pubar, d_evhat );
swap( d_pubar, d_pu );
swap( d_evhat, d_ev );
cudaUnbindTexture( t_in );
size_t offset;
cudaBindTexture2D( &offset, t_in, d_transp_out, algs.height, algs.width,
transp_out_height(algs.height)*sizeof(float) );
alg4_stage7<<<
dim3((algs.n_size+2-1)/2, algs.m_size), dim3(WS, DW) >>>(
d_out, d_pu, d_ev, algs.width );
swap( d_ev, d_evhat );
swap( d_pu, d_pubar );
swap( d_transp_ez, d_transp_ezhat );
swap( d_transp_py, d_transp_pybar );
cudaUnbindTexture( t_in );
}
__host__
void alg4( float *h_inout,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
alg_setup algs, algs_transp;
dvector<float> d_out, d_transp_out;
dvector<float2> d_transp_pybar, d_transp_ezhat, d_pubar, d_evhat;
cudaArray *a_in;
prepare_alg4( algs, algs_transp, d_out, d_transp_out, d_transp_pybar,
d_transp_ezhat, d_pubar, d_evhat, a_in, h_inout, w, h,
b0, a1, a2, extb, ic );
alg4( d_out, d_transp_out, d_transp_pybar, d_transp_ezhat, d_pubar,
d_evhat, a_in, algs, algs_transp );
d_out.copy_to( h_inout, w * h );
cudaFreeArray( a_in );
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
// vi: ai ts=4 sw=4
|
e9ae654f7bd4fb77c556acce55da41e0f5df31bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc -o Blocks.dll --shared Blocks.cu
__global__ void mainImage(uchar4 *fragColor, float iTime)
{
asm volatile(
".reg .b16 %rs<5>;"
".reg .f32 %f<92>;"
".reg .b32 %r<16>;"
".reg .b64 %rd<5>;"
"ld.param.u64 %rd1, [_Z9mainImageP6uchar4f_param_0];"
"ld.param.f32 %f1, [_Z9mainImageP6uchar4f_param_1];"
"cvta.to.global.u64 %rd2, %rd1;"
"mov.u32 %r1, %ctaid.x;"
"mov.u32 %r2, %ntid.x;"
"mov.u32 %r3, %tid.x;"
"mad.lo.s32 %r4, %r2, %r1, %r3;"
"mov.u32 %r5, %ctaid.y;"
"mov.u32 %r6, %ntid.y;"
"mov.u32 %r7, %tid.y;"
"mad.lo.s32 %r8, %r6, %r5, %r7;"
"mov.u32 %r9, 4195327;"
"sub.s32 %r10, %r9, %r8;"
"shl.b32 %r11, %r10, 10;"
"add.s32 %r12, %r11, %r4;"
"cvt.rn.f32.u32 %f2, %r4;"
"cvt.rn.f32.u32 %f3, %r8;"
"mul.f32 %f4, %f3, 0f3A800000;"
"fma.rn.f32 %f5, %f4, 0f40000000, %f1;"
"add.f32 %f6, %f5, 0fBFC90FDA;"
"div.rn.f32 %f7, %f6, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f8, %f7;"
"sub.f32 %f9, %f7, %f8;"
"fma.rn.f32 %f10, %f9, 0f40000000, 0fBF800000;"
"abs.f32 %f11, %f10;"
"mul.f32 %f12, %f11, %f11;"
"add.f32 %f13, %f11, %f11;"
"mov.f32 %f14, 0f40400000;"
"sub.f32 %f15, %f14, %f13;"
"mul.f32 %f16, %f12, %f15;"
"fma.rn.f32 %f17, %f16, 0f40000000, 0fBF800000;"
"div.rn.f32 %f18, %f17, 0f40A00000;"
"fma.rn.f32 %f19, %f2, 0f3A800000, %f18;"
"fma.rn.f32 %f20, %f19, 0f40000000, %f1;"
"add.f32 %f21, %f20, 0f3FC90FDB;"
"add.f32 %f22, %f21, 0fBFC90FDA;"
"div.rn.f32 %f23, %f22, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f24, %f23;"
"sub.f32 %f25, %f23, %f24;"
"fma.rn.f32 %f26, %f25, 0f40000000, 0fBF800000;"
"abs.f32 %f27, %f26;"
"mul.f32 %f28, %f27, %f27;"
"add.f32 %f29, %f27, %f27;"
"sub.f32 %f30, %f14, %f29;"
"mul.f32 %f31, %f28, %f30;"
"fma.rn.f32 %f32, %f31, 0f40000000, 0fBF800000;"
"div.rn.f32 %f33, %f32, 0f40A00000;"
"add.f32 %f34, %f4, %f33;"
"mul.f32 %f35, %f19, 0f41800000;"
"cvt.rmi.f32.f32 %f36, %f35;"
"mul.f32 %f37, %f36, 0f3D800000;"
"mul.f32 %f38, %f34, 0f41800000;"
"cvt.rmi.f32.f32 %f39, %f38;"
"mul.f32 %f40, %f39, 0f3D800000;"
"mul.f32 %f41, %f40, 0f439BD99A;"
"fma.rn.f32 %f42, %f37, 0f42FE3333, %f41;"
"mul.f32 %f43, %f40, 0f43374CCD;"
"fma.rn.f32 %f44, %f37, 0f4386C000, %f43;"
"mul.f32 %f45, %f40, 0f43B9F333;"
"fma.rn.f32 %f46, %f37, 0f43D1999A, %f45;"
"add.f32 %f47, %f42, 0fBFC90FDA;"
"div.rn.f32 %f48, %f47, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f49, %f48;"
"sub.f32 %f50, %f48, %f49;"
"fma.rn.f32 %f51, %f50, 0f40000000, 0fBF800000;"
"abs.f32 %f52, %f51;"
"mul.f32 %f53, %f52, %f52;"
"add.f32 %f54, %f52, %f52;"
"sub.f32 %f55, %f14, %f54;"
"mul.f32 %f56, %f53, %f55;"
"fma.rn.f32 %f57, %f56, 0f40000000, 0fBF800000;"
"mul.f32 %f58, %f57, 0f472AEE8C;"
"add.f32 %f59, %f44, 0fBFC90FDA;"
"div.rn.f32 %f60, %f59, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f61, %f60;"
"sub.f32 %f62, %f60, %f61;"
"fma.rn.f32 %f63, %f62, 0f40000000, 0fBF800000;"
"abs.f32 %f64, %f63;"
"mul.f32 %f65, %f64, %f64;"
"add.f32 %f66, %f64, %f64;"
"sub.f32 %f67, %f14, %f66;"
"mul.f32 %f68, %f65, %f67;"
"fma.rn.f32 %f69, %f68, 0f40000000, 0fBF800000;"
"mul.f32 %f70, %f69, 0f472AEE8C;"
"add.f32 %f71, %f46, 0fBFC90FDA;"
"div.rn.f32 %f72, %f71, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f73, %f72;"
"sub.f32 %f74, %f72, %f73;"
"fma.rn.f32 %f75, %f74, 0f40000000, 0fBF800000;"
"abs.f32 %f76, %f75;"
"mul.f32 %f77, %f76, %f76;"
"add.f32 %f78, %f76, %f76;"
"sub.f32 %f79, %f14, %f78;"
"mul.f32 %f80, %f77, %f79;"
"fma.rn.f32 %f81, %f80, 0f40000000, 0fBF800000;"
"mul.f32 %f82, %f81, 0f472AEE8C;"
"cvt.rmi.f32.f32 %f83, %f58;"
"sub.f32 %f84, %f58, %f83;"
"mul.f32 %f85, %f84, 0f437F0000;"
"cvt.rzi.u32.f32 %r13, %f85;"
"cvt.rmi.f32.f32 %f86, %f70;"
"sub.f32 %f87, %f70, %f86;"
"mul.f32 %f88, %f87, 0f437F0000;"
"cvt.rzi.u32.f32 %r14, %f88;"
"cvt.rmi.f32.f32 %f89, %f82;"
"sub.f32 %f90, %f82, %f89;"
"mul.f32 %f91, %f90, 0f437F0000;"
"cvt.rzi.u32.f32 %r15, %f91;"
"mul.wide.u32 %rd3, %r12, 4;"
"add.s64 %rd4, %rd2, %rd3;"
"cvt.u16.u32 %rs1, %r15;"
"cvt.u16.u32 %rs2, %r14;"
"cvt.u16.u32 %rs3, %r13;"
"mov.u16 %rs4, 255;"
"st.global.v4.u8 [%rd4], {%rs3, %rs2, %rs1, %rs4};"
"ret;"
);
}
extern "C"
{
__declspec(dllexport) unsigned char* Render(float time);
__declspec(dllexport) void Clear();
unsigned char* host;
uchar4 *device;
unsigned char* Render(float time) //render procedural image
{
host = (unsigned char*) malloc(1024*1024*sizeof(uchar4));
hipMalloc((void**)&device, 1024*1024*sizeof(uchar4));
dim3 block(8, 8);
dim3 grid(128, 128);
hipLaunchKernelGGL(( mainImage), dim3(grid), dim3(block), 0, 0, device, time);
hipDeviceSynchronize();
hipMemcpy(host, device, 1024 * 1024 * sizeof(uchar4), hipMemcpyDeviceToHost);
return host;
}
void Clear()
{
free(host);
hipFree(device);
}
}
|
e9ae654f7bd4fb77c556acce55da41e0f5df31bf.cu
|
// nvcc -o Blocks.dll --shared Blocks.cu
__global__ void mainImage(uchar4 *fragColor, float iTime)
{
asm volatile(
".reg .b16 %rs<5>;"
".reg .f32 %f<92>;"
".reg .b32 %r<16>;"
".reg .b64 %rd<5>;"
"ld.param.u64 %rd1, [_Z9mainImageP6uchar4f_param_0];"
"ld.param.f32 %f1, [_Z9mainImageP6uchar4f_param_1];"
"cvta.to.global.u64 %rd2, %rd1;"
"mov.u32 %r1, %ctaid.x;"
"mov.u32 %r2, %ntid.x;"
"mov.u32 %r3, %tid.x;"
"mad.lo.s32 %r4, %r2, %r1, %r3;"
"mov.u32 %r5, %ctaid.y;"
"mov.u32 %r6, %ntid.y;"
"mov.u32 %r7, %tid.y;"
"mad.lo.s32 %r8, %r6, %r5, %r7;"
"mov.u32 %r9, 4195327;"
"sub.s32 %r10, %r9, %r8;"
"shl.b32 %r11, %r10, 10;"
"add.s32 %r12, %r11, %r4;"
"cvt.rn.f32.u32 %f2, %r4;"
"cvt.rn.f32.u32 %f3, %r8;"
"mul.f32 %f4, %f3, 0f3A800000;"
"fma.rn.f32 %f5, %f4, 0f40000000, %f1;"
"add.f32 %f6, %f5, 0fBFC90FDA;"
"div.rn.f32 %f7, %f6, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f8, %f7;"
"sub.f32 %f9, %f7, %f8;"
"fma.rn.f32 %f10, %f9, 0f40000000, 0fBF800000;"
"abs.f32 %f11, %f10;"
"mul.f32 %f12, %f11, %f11;"
"add.f32 %f13, %f11, %f11;"
"mov.f32 %f14, 0f40400000;"
"sub.f32 %f15, %f14, %f13;"
"mul.f32 %f16, %f12, %f15;"
"fma.rn.f32 %f17, %f16, 0f40000000, 0fBF800000;"
"div.rn.f32 %f18, %f17, 0f40A00000;"
"fma.rn.f32 %f19, %f2, 0f3A800000, %f18;"
"fma.rn.f32 %f20, %f19, 0f40000000, %f1;"
"add.f32 %f21, %f20, 0f3FC90FDB;"
"add.f32 %f22, %f21, 0fBFC90FDA;"
"div.rn.f32 %f23, %f22, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f24, %f23;"
"sub.f32 %f25, %f23, %f24;"
"fma.rn.f32 %f26, %f25, 0f40000000, 0fBF800000;"
"abs.f32 %f27, %f26;"
"mul.f32 %f28, %f27, %f27;"
"add.f32 %f29, %f27, %f27;"
"sub.f32 %f30, %f14, %f29;"
"mul.f32 %f31, %f28, %f30;"
"fma.rn.f32 %f32, %f31, 0f40000000, 0fBF800000;"
"div.rn.f32 %f33, %f32, 0f40A00000;"
"add.f32 %f34, %f4, %f33;"
"mul.f32 %f35, %f19, 0f41800000;"
"cvt.rmi.f32.f32 %f36, %f35;"
"mul.f32 %f37, %f36, 0f3D800000;"
"mul.f32 %f38, %f34, 0f41800000;"
"cvt.rmi.f32.f32 %f39, %f38;"
"mul.f32 %f40, %f39, 0f3D800000;"
"mul.f32 %f41, %f40, 0f439BD99A;"
"fma.rn.f32 %f42, %f37, 0f42FE3333, %f41;"
"mul.f32 %f43, %f40, 0f43374CCD;"
"fma.rn.f32 %f44, %f37, 0f4386C000, %f43;"
"mul.f32 %f45, %f40, 0f43B9F333;"
"fma.rn.f32 %f46, %f37, 0f43D1999A, %f45;"
"add.f32 %f47, %f42, 0fBFC90FDA;"
"div.rn.f32 %f48, %f47, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f49, %f48;"
"sub.f32 %f50, %f48, %f49;"
"fma.rn.f32 %f51, %f50, 0f40000000, 0fBF800000;"
"abs.f32 %f52, %f51;"
"mul.f32 %f53, %f52, %f52;"
"add.f32 %f54, %f52, %f52;"
"sub.f32 %f55, %f14, %f54;"
"mul.f32 %f56, %f53, %f55;"
"fma.rn.f32 %f57, %f56, 0f40000000, 0fBF800000;"
"mul.f32 %f58, %f57, 0f472AEE8C;"
"add.f32 %f59, %f44, 0fBFC90FDA;"
"div.rn.f32 %f60, %f59, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f61, %f60;"
"sub.f32 %f62, %f60, %f61;"
"fma.rn.f32 %f63, %f62, 0f40000000, 0fBF800000;"
"abs.f32 %f64, %f63;"
"mul.f32 %f65, %f64, %f64;"
"add.f32 %f66, %f64, %f64;"
"sub.f32 %f67, %f14, %f66;"
"mul.f32 %f68, %f65, %f67;"
"fma.rn.f32 %f69, %f68, 0f40000000, 0fBF800000;"
"mul.f32 %f70, %f69, 0f472AEE8C;"
"add.f32 %f71, %f46, 0fBFC90FDA;"
"div.rn.f32 %f72, %f71, 0f40C90FDA;"
"cvt.rmi.f32.f32 %f73, %f72;"
"sub.f32 %f74, %f72, %f73;"
"fma.rn.f32 %f75, %f74, 0f40000000, 0fBF800000;"
"abs.f32 %f76, %f75;"
"mul.f32 %f77, %f76, %f76;"
"add.f32 %f78, %f76, %f76;"
"sub.f32 %f79, %f14, %f78;"
"mul.f32 %f80, %f77, %f79;"
"fma.rn.f32 %f81, %f80, 0f40000000, 0fBF800000;"
"mul.f32 %f82, %f81, 0f472AEE8C;"
"cvt.rmi.f32.f32 %f83, %f58;"
"sub.f32 %f84, %f58, %f83;"
"mul.f32 %f85, %f84, 0f437F0000;"
"cvt.rzi.u32.f32 %r13, %f85;"
"cvt.rmi.f32.f32 %f86, %f70;"
"sub.f32 %f87, %f70, %f86;"
"mul.f32 %f88, %f87, 0f437F0000;"
"cvt.rzi.u32.f32 %r14, %f88;"
"cvt.rmi.f32.f32 %f89, %f82;"
"sub.f32 %f90, %f82, %f89;"
"mul.f32 %f91, %f90, 0f437F0000;"
"cvt.rzi.u32.f32 %r15, %f91;"
"mul.wide.u32 %rd3, %r12, 4;"
"add.s64 %rd4, %rd2, %rd3;"
"cvt.u16.u32 %rs1, %r15;"
"cvt.u16.u32 %rs2, %r14;"
"cvt.u16.u32 %rs3, %r13;"
"mov.u16 %rs4, 255;"
"st.global.v4.u8 [%rd4], {%rs3, %rs2, %rs1, %rs4};"
"ret;"
);
}
extern "C"
{
__declspec(dllexport) unsigned char* Render(float time);
__declspec(dllexport) void Clear();
unsigned char* host;
uchar4 *device;
unsigned char* Render(float time) //render procedural image
{
host = (unsigned char*) malloc(1024*1024*sizeof(uchar4));
cudaMalloc((void**)&device, 1024*1024*sizeof(uchar4));
dim3 block(8, 8);
dim3 grid(128, 128);
mainImage<<<grid, block>>>(device, time);
cudaDeviceSynchronize();
cudaMemcpy(host, device, 1024 * 1024 * sizeof(uchar4), cudaMemcpyDeviceToHost);
return host;
}
void Clear()
{
free(host);
cudaFree(device);
}
}
|
c18735329baff559549c64d08bd4b8e0371af742.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcTurbulentViscositySO.cu
*
* Created on: 10-04-2015
* Author: Kamil Szewc
*
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcTurbulentViscositySO(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if (p[tid].phaseType == 0)
{
p[tid].nut = pow2(0.12 * par->DR) * sqrt(2.0) * sqrt( pow2(p[tid].str.x) + pow2(p[tid].str.y) + pow2(p[tid].str.z) + pow2(p[tid].str.w) );
}
tid += blockDim.x * gridDim.x;
}
}
|
c18735329baff559549c64d08bd4b8e0371af742.cu
|
/*
* calcTurbulentViscositySO.cu
*
* Created on: 10-04-2015
* Author: Kamil Szewc
*
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcTurbulentViscositySO(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if (p[tid].phaseType == 0)
{
p[tid].nut = pow2(0.12 * par->DR) * sqrt(2.0) * sqrt( pow2(p[tid].str.x) + pow2(p[tid].str.y) + pow2(p[tid].str.z) + pow2(p[tid].str.w) );
}
tid += blockDim.x * gridDim.x;
}
}
|
efd7adef59e4a782c7c0c4c62b327e111008fb34.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ParticleData.cuh"
/*! \file ParticleData.cu
\brief ImplementsGPU kernel code and data structure functions used by ParticleData
*/
#ifdef ENABLE_MPI
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hipcub/hipcub.hpp>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scatter.h>
#pragma GCC diagnostic pop
namespace hoomd
{
namespace kernel
{
//! Kernel to partition particle data
__global__ void gpu_scatter_particle_data_kernel(const unsigned int nwork,
const Scalar4* d_pos,
const Scalar4* d_vel,
const Scalar3* d_accel,
const Scalar* d_charge,
const Scalar* d_diameter,
const int3* d_image,
const unsigned int* d_body,
const Scalar4* d_orientation,
const Scalar4* d_angmom,
const Scalar3* d_inertia,
const Scalar4* d_net_force,
const Scalar4* d_net_torque,
const Scalar* d_net_virial,
unsigned int net_virial_pitch,
const unsigned int* d_tag,
unsigned int* d_rtag,
Scalar4* d_pos_alt,
Scalar4* d_vel_alt,
Scalar3* d_accel_alt,
Scalar* d_charge_alt,
Scalar* d_diameter_alt,
int3* d_image_alt,
unsigned int* d_body_alt,
Scalar4* d_orientation_alt,
Scalar4* d_angmom_alt,
Scalar3* d_inertia_alt,
Scalar4* d_net_force_alt,
Scalar4* d_net_torque_alt,
Scalar* d_net_virial_alt,
unsigned int* d_tag_alt,
detail::pdata_element* d_out,
unsigned int* d_comm_flags,
unsigned int* d_comm_flags_out,
const unsigned int* d_scan,
const unsigned int offset)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
bool remove = d_comm_flags[idx];
unsigned int scan_remove = d_scan[idx];
unsigned int scan_keep = idx - scan_remove;
if (remove)
{
detail::pdata_element p;
p.pos = d_pos[idx];
p.vel = d_vel[idx];
p.accel = d_accel[idx];
p.charge = d_charge[idx];
p.diameter = d_diameter[idx];
p.image = d_image[idx];
p.body = d_body[idx];
p.orientation = d_orientation[idx];
p.angmom = d_angmom[idx];
p.inertia = d_inertia[idx];
p.net_force = d_net_force[idx];
p.net_torque = d_net_torque[idx];
for (unsigned int j = 0; j < 6; ++j)
p.net_virial[j] = d_net_virial[j * net_virial_pitch + idx];
p.tag = d_tag[idx];
d_out[scan_remove] = p;
d_comm_flags_out[scan_remove] = d_comm_flags[idx];
// reset communication flags
d_comm_flags[idx] = 0;
// reset rtag
d_rtag[p.tag] = NOT_LOCAL;
}
else
{
d_pos_alt[scan_keep] = d_pos[idx];
d_vel_alt[scan_keep] = d_vel[idx];
d_accel_alt[scan_keep] = d_accel[idx];
d_charge_alt[scan_keep] = d_charge[idx];
d_diameter_alt[scan_keep] = d_diameter[idx];
d_image_alt[scan_keep] = d_image[idx];
d_body_alt[scan_keep] = d_body[idx];
d_orientation_alt[scan_keep] = d_orientation[idx];
d_angmom_alt[scan_keep] = d_angmom[idx];
d_inertia_alt[scan_keep] = d_inertia[idx];
d_net_force_alt[scan_keep] = d_net_force[idx];
d_net_torque_alt[scan_keep] = d_net_torque[idx];
for (unsigned int j = 0; j < 6; ++j)
d_net_virial_alt[j * net_virial_pitch + scan_keep]
= d_net_virial[j * net_virial_pitch + idx];
unsigned int tag = d_tag[idx];
d_tag_alt[scan_keep] = tag;
// update rtag
d_rtag[tag] = scan_keep;
}
}
__global__ void
gpu_select_sent_particles(unsigned int N, unsigned int* d_comm_flags, unsigned int* d_tmp)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
d_tmp[idx] = d_comm_flags[idx] ? 1 : 0;
}
/*! \param N Number of local particles
\param d_pos Device array of particle positions
\param d_vel Device array of particle velocities
\param d_accel Device array of particle accelerations
\param d_charge Device array of particle charges
\param d_diameter Device array of particle diameters
\param d_image Device array of particle images
\param d_body Device array of particle body tags
\param d_orientation Device array of particle orientations
\param d_angmom Device array of particle angular momenta
\param d_inertia Device array of particle moments of inertia
\param d_net_force Net force
\param d_net_torque Net torque
\param d_net_virial Net virial
\param net_virial_pitch Pitch of net virial array
\param d_tag Device array of particle tags
\param d_rtag Device array for reverse-lookup table
\param d_pos_alt Device array of particle positions (output)
\param d_vel_alt Device array of particle velocities (output)
\param d_accel_alt Device array of particle accelerations (output)
\param d_charge_alt Device array of particle charges (output)
\param d_diameter_alt Device array of particle diameters (output)
\param d_image_alt Device array of particle images (output)
\param d_body_alt Device array of particle body tags (output)
\param d_orientation_alt Device array of particle orientations (output)
\param d_angmom_alt Device array of particle angular momenta (output)
\param d_inertia Device array of particle moments of inertia (output)
\param d_net_force Net force (output)
\param d_net_torque Net torque (output)
\param d_net_virial Net virial (output)
\param d_out Output array for packed particle data
\param max_n_out Maximum number of elements to write to output array
\returns Number of elements marked for removal
*/
unsigned int gpu_pdata_remove(const unsigned int N,
const Scalar4* d_pos,
const Scalar4* d_vel,
const Scalar3* d_accel,
const Scalar* d_charge,
const Scalar* d_diameter,
const int3* d_image,
const unsigned int* d_body,
const Scalar4* d_orientation,
const Scalar4* d_angmom,
const Scalar3* d_inertia,
const Scalar4* d_net_force,
const Scalar4* d_net_torque,
const Scalar* d_net_virial,
unsigned int net_virial_pitch,
const unsigned int* d_tag,
unsigned int* d_rtag,
Scalar4* d_pos_alt,
Scalar4* d_vel_alt,
Scalar3* d_accel_alt,
Scalar* d_charge_alt,
Scalar* d_diameter_alt,
int3* d_image_alt,
unsigned int* d_body_alt,
Scalar4* d_orientation_alt,
Scalar4* d_angmom_alt,
Scalar3* d_inertia_alt,
Scalar4* d_net_force_alt,
Scalar4* d_net_torque_alt,
Scalar* d_net_virial_alt,
unsigned int* d_tag_alt,
detail::pdata_element* d_out,
unsigned int* d_comm_flags,
unsigned int* d_comm_flags_out,
unsigned int max_n_out,
unsigned int* d_tmp,
CachedAllocator& alloc,
GPUPartition& gpu_partition)
{
if (!N)
return 0;
assert(d_pos);
assert(d_vel);
assert(d_accel);
assert(d_charge);
assert(d_diameter);
assert(d_image);
assert(d_body);
assert(d_orientation);
assert(d_angmom);
assert(d_inertia);
assert(d_net_force);
assert(d_net_torque);
assert(d_net_virial);
assert(d_tag);
assert(d_rtag);
assert(d_pos_alt);
assert(d_vel_alt);
assert(d_accel_alt);
assert(d_charge_alt);
assert(d_diameter_alt);
assert(d_image_alt);
assert(d_body_alt);
assert(d_orientation_alt);
assert(d_angmom_alt);
assert(d_inertia_alt);
assert(d_net_force_alt);
assert(d_net_torque_alt);
assert(d_net_virial_alt);
assert(d_tag_alt);
assert(d_out);
assert(d_comm_flags);
assert(d_comm_flags_out);
assert(d_tmp);
unsigned int n_out;
// partition particle data into local and removed particles
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
// select nonzero communication flags
hipLaunchKernelGGL(gpu_select_sent_particles,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_comm_flags,
d_tmp);
// perform a scan over the array of ones and zeroes
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// determine size of temporary storage
unsigned int* d_scan = alloc.getTemporaryBuffer<unsigned int>(N);
assert(d_scan);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N);
d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N);
alloc.deallocate((char*)d_temp_storage);
// determine total number of sent particles
d_temp_storage = NULL;
temp_storage_bytes = 0;
unsigned int* d_n_out = (unsigned int*)alloc.getTemporaryBuffer<unsigned int>(1);
assert(d_n_out);
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N);
alloc.deallocate((char*)d_temp_storage);
hipMemcpy(&n_out, d_n_out, sizeof(unsigned int), hipMemcpyDeviceToHost);
alloc.deallocate((char*)d_n_out);
// Don't write past end of buffer
if (n_out <= max_n_out)
{
// partition particle data into local and removed particles
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
unsigned int offset = range.first;
unsigned int block_size = 256;
unsigned int n_blocks = nwork / block_size + 1;
hipLaunchKernelGGL(gpu_scatter_particle_data_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
nwork,
d_pos,
d_vel,
d_accel,
d_charge,
d_diameter,
d_image,
d_body,
d_orientation,
d_angmom,
d_inertia,
d_net_force,
d_net_torque,
d_net_virial,
net_virial_pitch,
d_tag,
d_rtag,
d_pos_alt,
d_vel_alt,
d_accel_alt,
d_charge_alt,
d_diameter_alt,
d_image_alt,
d_body_alt,
d_orientation_alt,
d_angmom_alt,
d_inertia_alt,
d_net_force_alt,
d_net_torque_alt,
d_net_virial_alt,
d_tag_alt,
d_out,
d_comm_flags,
d_comm_flags_out,
d_scan,
offset);
}
}
// free temp buf
alloc.deallocate((char*)d_scan);
// return elements written to output stream
return n_out;
}
__global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
Scalar* d_charge,
Scalar* d_diameter,
int3* d_image,
unsigned int* d_body,
Scalar4* d_orientation,
Scalar4* d_angmom,
Scalar3* d_inertia,
Scalar4* d_net_force,
Scalar4* d_net_torque,
Scalar* d_net_virial,
unsigned int net_virial_pitch,
unsigned int* d_tag,
unsigned int* d_rtag,
const detail::pdata_element* d_in,
unsigned int* d_comm_flags)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_add_ptls)
return;
detail::pdata_element p = d_in[idx];
unsigned int add_idx = old_nparticles + idx;
d_pos[add_idx] = p.pos;
d_vel[add_idx] = p.vel;
d_accel[add_idx] = p.accel;
d_charge[add_idx] = p.charge;
d_diameter[add_idx] = p.diameter;
d_image[add_idx] = p.image;
d_body[add_idx] = p.body;
d_orientation[add_idx] = p.orientation;
d_angmom[add_idx] = p.angmom;
d_inertia[add_idx] = p.inertia;
d_net_force[add_idx] = p.net_force;
d_net_torque[add_idx] = p.net_torque;
for (unsigned int j = 0; j < 6; ++j)
d_net_virial[j * net_virial_pitch + add_idx] = p.net_virial[j];
d_tag[add_idx] = p.tag;
d_rtag[p.tag] = add_idx;
d_comm_flags[add_idx] = 0;
}
/*! \param old_nparticles old local particle count
\param num_add_ptls Number of particles in input array
\param d_pos Device array of particle positions
\param d_vel Device iarray of particle velocities
\param d_accel Device array of particle accelerations
\param d_charge Device array of particle charges
\param d_diameter Device array of particle diameters
\param d_image Device array of particle images
\param d_body Device array of particle body tags
\param d_orientation Device array of particle orientations
\param d_angmom Device array of particle angular momenta
\param d_inertia Device array of particle moments of inertia
\param d_net_force Net force
\param d_net_torque Net torque
\param d_net_virial Net virial
\param d_tag Device array of particle tags
\param d_rtag Device array for reverse-lookup table
\param d_in Device array of packed input particle data
\param d_comm_flags Device array of communication flags (pdata)
*/
void gpu_pdata_add_particles(const unsigned int old_nparticles,
const unsigned int num_add_ptls,
Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
Scalar* d_charge,
Scalar* d_diameter,
int3* d_image,
unsigned int* d_body,
Scalar4* d_orientation,
Scalar4* d_angmom,
Scalar3* d_inertia,
Scalar4* d_net_force,
Scalar4* d_net_torque,
Scalar* d_net_virial,
unsigned int net_virial_pitch,
unsigned int* d_tag,
unsigned int* d_rtag,
const detail::pdata_element* d_in,
unsigned int* d_comm_flags)
{
assert(d_pos);
assert(d_vel);
assert(d_accel);
assert(d_charge);
assert(d_diameter);
assert(d_image);
assert(d_body);
assert(d_orientation);
assert(d_angmom);
assert(d_inertia);
assert(d_net_force);
assert(d_net_torque);
assert(d_net_virial);
assert(d_tag);
assert(d_rtag);
assert(d_in);
unsigned int block_size = 256;
unsigned int n_blocks = num_add_ptls / block_size + 1;
hipLaunchKernelGGL(gpu_pdata_add_particles_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
old_nparticles,
num_add_ptls,
d_pos,
d_vel,
d_accel,
d_charge,
d_diameter,
d_image,
d_body,
d_orientation,
d_angmom,
d_inertia,
d_net_force,
d_net_torque,
d_net_virial,
net_virial_pitch,
d_tag,
d_rtag,
d_in,
d_comm_flags);
}
} // end namespace kernel
} // end namespace hoomd
#endif // ENABLE_MPI
|
efd7adef59e4a782c7c0c4c62b327e111008fb34.cu
|
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ParticleData.cuh"
/*! \file ParticleData.cu
\brief ImplementsGPU kernel code and data structure functions used by ParticleData
*/
#ifdef ENABLE_MPI
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hipcub/hipcub.hpp>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scatter.h>
#pragma GCC diagnostic pop
namespace hoomd
{
namespace kernel
{
//! Kernel to partition particle data
__global__ void gpu_scatter_particle_data_kernel(const unsigned int nwork,
const Scalar4* d_pos,
const Scalar4* d_vel,
const Scalar3* d_accel,
const Scalar* d_charge,
const Scalar* d_diameter,
const int3* d_image,
const unsigned int* d_body,
const Scalar4* d_orientation,
const Scalar4* d_angmom,
const Scalar3* d_inertia,
const Scalar4* d_net_force,
const Scalar4* d_net_torque,
const Scalar* d_net_virial,
unsigned int net_virial_pitch,
const unsigned int* d_tag,
unsigned int* d_rtag,
Scalar4* d_pos_alt,
Scalar4* d_vel_alt,
Scalar3* d_accel_alt,
Scalar* d_charge_alt,
Scalar* d_diameter_alt,
int3* d_image_alt,
unsigned int* d_body_alt,
Scalar4* d_orientation_alt,
Scalar4* d_angmom_alt,
Scalar3* d_inertia_alt,
Scalar4* d_net_force_alt,
Scalar4* d_net_torque_alt,
Scalar* d_net_virial_alt,
unsigned int* d_tag_alt,
detail::pdata_element* d_out,
unsigned int* d_comm_flags,
unsigned int* d_comm_flags_out,
const unsigned int* d_scan,
const unsigned int offset)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
bool remove = d_comm_flags[idx];
unsigned int scan_remove = d_scan[idx];
unsigned int scan_keep = idx - scan_remove;
if (remove)
{
detail::pdata_element p;
p.pos = d_pos[idx];
p.vel = d_vel[idx];
p.accel = d_accel[idx];
p.charge = d_charge[idx];
p.diameter = d_diameter[idx];
p.image = d_image[idx];
p.body = d_body[idx];
p.orientation = d_orientation[idx];
p.angmom = d_angmom[idx];
p.inertia = d_inertia[idx];
p.net_force = d_net_force[idx];
p.net_torque = d_net_torque[idx];
for (unsigned int j = 0; j < 6; ++j)
p.net_virial[j] = d_net_virial[j * net_virial_pitch + idx];
p.tag = d_tag[idx];
d_out[scan_remove] = p;
d_comm_flags_out[scan_remove] = d_comm_flags[idx];
// reset communication flags
d_comm_flags[idx] = 0;
// reset rtag
d_rtag[p.tag] = NOT_LOCAL;
}
else
{
d_pos_alt[scan_keep] = d_pos[idx];
d_vel_alt[scan_keep] = d_vel[idx];
d_accel_alt[scan_keep] = d_accel[idx];
d_charge_alt[scan_keep] = d_charge[idx];
d_diameter_alt[scan_keep] = d_diameter[idx];
d_image_alt[scan_keep] = d_image[idx];
d_body_alt[scan_keep] = d_body[idx];
d_orientation_alt[scan_keep] = d_orientation[idx];
d_angmom_alt[scan_keep] = d_angmom[idx];
d_inertia_alt[scan_keep] = d_inertia[idx];
d_net_force_alt[scan_keep] = d_net_force[idx];
d_net_torque_alt[scan_keep] = d_net_torque[idx];
for (unsigned int j = 0; j < 6; ++j)
d_net_virial_alt[j * net_virial_pitch + scan_keep]
= d_net_virial[j * net_virial_pitch + idx];
unsigned int tag = d_tag[idx];
d_tag_alt[scan_keep] = tag;
// update rtag
d_rtag[tag] = scan_keep;
}
}
__global__ void
gpu_select_sent_particles(unsigned int N, unsigned int* d_comm_flags, unsigned int* d_tmp)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
d_tmp[idx] = d_comm_flags[idx] ? 1 : 0;
}
/*! \param N Number of local particles
\param d_pos Device array of particle positions
\param d_vel Device array of particle velocities
\param d_accel Device array of particle accelerations
\param d_charge Device array of particle charges
\param d_diameter Device array of particle diameters
\param d_image Device array of particle images
\param d_body Device array of particle body tags
\param d_orientation Device array of particle orientations
\param d_angmom Device array of particle angular momenta
\param d_inertia Device array of particle moments of inertia
\param d_net_force Net force
\param d_net_torque Net torque
\param d_net_virial Net virial
\param net_virial_pitch Pitch of net virial array
\param d_tag Device array of particle tags
\param d_rtag Device array for reverse-lookup table
\param d_pos_alt Device array of particle positions (output)
\param d_vel_alt Device array of particle velocities (output)
\param d_accel_alt Device array of particle accelerations (output)
\param d_charge_alt Device array of particle charges (output)
\param d_diameter_alt Device array of particle diameters (output)
\param d_image_alt Device array of particle images (output)
\param d_body_alt Device array of particle body tags (output)
\param d_orientation_alt Device array of particle orientations (output)
\param d_angmom_alt Device array of particle angular momenta (output)
\param d_inertia Device array of particle moments of inertia (output)
\param d_net_force Net force (output)
\param d_net_torque Net torque (output)
\param d_net_virial Net virial (output)
\param d_out Output array for packed particle data
\param max_n_out Maximum number of elements to write to output array
\returns Number of elements marked for removal
*/
unsigned int gpu_pdata_remove(const unsigned int N,
const Scalar4* d_pos,
const Scalar4* d_vel,
const Scalar3* d_accel,
const Scalar* d_charge,
const Scalar* d_diameter,
const int3* d_image,
const unsigned int* d_body,
const Scalar4* d_orientation,
const Scalar4* d_angmom,
const Scalar3* d_inertia,
const Scalar4* d_net_force,
const Scalar4* d_net_torque,
const Scalar* d_net_virial,
unsigned int net_virial_pitch,
const unsigned int* d_tag,
unsigned int* d_rtag,
Scalar4* d_pos_alt,
Scalar4* d_vel_alt,
Scalar3* d_accel_alt,
Scalar* d_charge_alt,
Scalar* d_diameter_alt,
int3* d_image_alt,
unsigned int* d_body_alt,
Scalar4* d_orientation_alt,
Scalar4* d_angmom_alt,
Scalar3* d_inertia_alt,
Scalar4* d_net_force_alt,
Scalar4* d_net_torque_alt,
Scalar* d_net_virial_alt,
unsigned int* d_tag_alt,
detail::pdata_element* d_out,
unsigned int* d_comm_flags,
unsigned int* d_comm_flags_out,
unsigned int max_n_out,
unsigned int* d_tmp,
CachedAllocator& alloc,
GPUPartition& gpu_partition)
{
if (!N)
return 0;
assert(d_pos);
assert(d_vel);
assert(d_accel);
assert(d_charge);
assert(d_diameter);
assert(d_image);
assert(d_body);
assert(d_orientation);
assert(d_angmom);
assert(d_inertia);
assert(d_net_force);
assert(d_net_torque);
assert(d_net_virial);
assert(d_tag);
assert(d_rtag);
assert(d_pos_alt);
assert(d_vel_alt);
assert(d_accel_alt);
assert(d_charge_alt);
assert(d_diameter_alt);
assert(d_image_alt);
assert(d_body_alt);
assert(d_orientation_alt);
assert(d_angmom_alt);
assert(d_inertia_alt);
assert(d_net_force_alt);
assert(d_net_torque_alt);
assert(d_net_virial_alt);
assert(d_tag_alt);
assert(d_out);
assert(d_comm_flags);
assert(d_comm_flags_out);
assert(d_tmp);
unsigned int n_out;
// partition particle data into local and removed particles
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
// select nonzero communication flags
hipLaunchKernelGGL(gpu_select_sent_particles,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_comm_flags,
d_tmp);
// perform a scan over the array of ones and zeroes
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// determine size of temporary storage
unsigned int* d_scan = alloc.getTemporaryBuffer<unsigned int>(N);
assert(d_scan);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N);
d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N);
alloc.deallocate((char*)d_temp_storage);
// determine total number of sent particles
d_temp_storage = NULL;
temp_storage_bytes = 0;
unsigned int* d_n_out = (unsigned int*)alloc.getTemporaryBuffer<unsigned int>(1);
assert(d_n_out);
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N);
alloc.deallocate((char*)d_temp_storage);
hipMemcpy(&n_out, d_n_out, sizeof(unsigned int), hipMemcpyDeviceToHost);
alloc.deallocate((char*)d_n_out);
// Don't write past end of buffer
if (n_out <= max_n_out)
{
// partition particle data into local and removed particles
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
unsigned int offset = range.first;
unsigned int block_size = 256;
unsigned int n_blocks = nwork / block_size + 1;
hipLaunchKernelGGL(gpu_scatter_particle_data_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
nwork,
d_pos,
d_vel,
d_accel,
d_charge,
d_diameter,
d_image,
d_body,
d_orientation,
d_angmom,
d_inertia,
d_net_force,
d_net_torque,
d_net_virial,
net_virial_pitch,
d_tag,
d_rtag,
d_pos_alt,
d_vel_alt,
d_accel_alt,
d_charge_alt,
d_diameter_alt,
d_image_alt,
d_body_alt,
d_orientation_alt,
d_angmom_alt,
d_inertia_alt,
d_net_force_alt,
d_net_torque_alt,
d_net_virial_alt,
d_tag_alt,
d_out,
d_comm_flags,
d_comm_flags_out,
d_scan,
offset);
}
}
// free temp buf
alloc.deallocate((char*)d_scan);
// return elements written to output stream
return n_out;
}
__global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
Scalar* d_charge,
Scalar* d_diameter,
int3* d_image,
unsigned int* d_body,
Scalar4* d_orientation,
Scalar4* d_angmom,
Scalar3* d_inertia,
Scalar4* d_net_force,
Scalar4* d_net_torque,
Scalar* d_net_virial,
unsigned int net_virial_pitch,
unsigned int* d_tag,
unsigned int* d_rtag,
const detail::pdata_element* d_in,
unsigned int* d_comm_flags)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_add_ptls)
return;
detail::pdata_element p = d_in[idx];
unsigned int add_idx = old_nparticles + idx;
d_pos[add_idx] = p.pos;
d_vel[add_idx] = p.vel;
d_accel[add_idx] = p.accel;
d_charge[add_idx] = p.charge;
d_diameter[add_idx] = p.diameter;
d_image[add_idx] = p.image;
d_body[add_idx] = p.body;
d_orientation[add_idx] = p.orientation;
d_angmom[add_idx] = p.angmom;
d_inertia[add_idx] = p.inertia;
d_net_force[add_idx] = p.net_force;
d_net_torque[add_idx] = p.net_torque;
for (unsigned int j = 0; j < 6; ++j)
d_net_virial[j * net_virial_pitch + add_idx] = p.net_virial[j];
d_tag[add_idx] = p.tag;
d_rtag[p.tag] = add_idx;
d_comm_flags[add_idx] = 0;
}
/*! \param old_nparticles old local particle count
\param num_add_ptls Number of particles in input array
\param d_pos Device array of particle positions
\param d_vel Device iarray of particle velocities
\param d_accel Device array of particle accelerations
\param d_charge Device array of particle charges
\param d_diameter Device array of particle diameters
\param d_image Device array of particle images
\param d_body Device array of particle body tags
\param d_orientation Device array of particle orientations
\param d_angmom Device array of particle angular momenta
\param d_inertia Device array of particle moments of inertia
\param d_net_force Net force
\param d_net_torque Net torque
\param d_net_virial Net virial
\param d_tag Device array of particle tags
\param d_rtag Device array for reverse-lookup table
\param d_in Device array of packed input particle data
\param d_comm_flags Device array of communication flags (pdata)
*/
void gpu_pdata_add_particles(const unsigned int old_nparticles,
const unsigned int num_add_ptls,
Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
Scalar* d_charge,
Scalar* d_diameter,
int3* d_image,
unsigned int* d_body,
Scalar4* d_orientation,
Scalar4* d_angmom,
Scalar3* d_inertia,
Scalar4* d_net_force,
Scalar4* d_net_torque,
Scalar* d_net_virial,
unsigned int net_virial_pitch,
unsigned int* d_tag,
unsigned int* d_rtag,
const detail::pdata_element* d_in,
unsigned int* d_comm_flags)
{
assert(d_pos);
assert(d_vel);
assert(d_accel);
assert(d_charge);
assert(d_diameter);
assert(d_image);
assert(d_body);
assert(d_orientation);
assert(d_angmom);
assert(d_inertia);
assert(d_net_force);
assert(d_net_torque);
assert(d_net_virial);
assert(d_tag);
assert(d_rtag);
assert(d_in);
unsigned int block_size = 256;
unsigned int n_blocks = num_add_ptls / block_size + 1;
hipLaunchKernelGGL(gpu_pdata_add_particles_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
old_nparticles,
num_add_ptls,
d_pos,
d_vel,
d_accel,
d_charge,
d_diameter,
d_image,
d_body,
d_orientation,
d_angmom,
d_inertia,
d_net_force,
d_net_torque,
d_net_virial,
net_virial_pitch,
d_tag,
d_rtag,
d_in,
d_comm_flags);
}
} // end namespace kernel
} // end namespace hoomd
#endif // ENABLE_MPI
|
77fa4b8044b3172bfc415eae71a4c96641ddfe97.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
77fa4b8044b3172bfc415eae71a4c96641ddfe97.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
3a810966d31595219352b1bab45c7f718c369db2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include "non_max_suppression_impl.h"
#include "core/providers/cpu/object_detection/non_max_suppression_helper.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include <hipcub/hipcub.hpp>
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
using namespace nms_helpers;
namespace {
struct __align__(16) Box {
float x1, y1, x2, y2;
};
// This is the width of the bitmask for masking boxes for each thread.
// This needs to be a multiple of 2(a POD width usually) so that division and
// modulo can be implemented as bit operations during host selection.
constexpr int kNmsBoxesPerThread = 8 * sizeof(int);
// Helper to calculate modulo mask and shift bits.
// For kNmsBoxesPerThread=32 ModuloMask will be 31, i.e 0x1F thus
// i % 32 == i & 31. Similarly ShiftBits will be 5 so that
// i / 32 == i >> 5. Using these bit operations should reduce the stall on host
// thread.
__device__ constexpr int NumBits(int n) { return (n == 0) ? 0 : NumBits(n >> 1) + 1; }
constexpr int kNmsBlockDim = 16;
constexpr int kNmsBlockDimMax = 128;
// Check whether two boxes have an IoU greater than threshold.
template <typename T>
__device__ inline bool OverThreshold(const Box* a, const Box* b,
const float a_area,
const T iou_threshold) {
const float b_area = (b->x2 - b->x1) * (b->y2 - b->y1);
if (a_area == 0.0f || b_area == 0.0f) return false;
const float xx1 = fmaxf(a->x1, b->x1);
const float yy1 = fmaxf(a->y1, b->y1);
const float xx2 = fminf(a->x2, b->x2);
const float yy2 = fminf(a->y2, b->y2);
// fdimf computes the positive difference between xx2+1 and xx1.
const float w = fdimf(xx2, xx1);
const float h = fdimf(yy2, yy1);
const float intersection = w * h;
// Testing for aa/bb > t
// eq with aa > bb*t (b is !=0)
// avoiding divisions.
const float aa = intersection;
const float bb = a_area + b_area - intersection;
const float bt = bb * iou_threshold;
return aa >= bt;
}
template <typename T>
__device__ inline bool CheckBit(T* bit_mask, int bit) {
constexpr int kShiftLen = NumBits(8 * sizeof(T)) - 1;
constexpr int kRemainderMask = 8 * sizeof(T) - 1;
int bin = bit >> kShiftLen;
return (bit_mask[bin] >> (bit & kRemainderMask)) & 1;
}
// Produce a global bitmask (result_mask) of selected boxes from bitmask
// generated by NMSKernel Abort early if max_boxes boxes are selected. Bitmask
// is num_boxes*bit_mask_len bits indicating whether to keep or remove a box.
__global__ void NMSReduce(const int* bitmask, const int bit_mask_len,
const int num_boxes, const int max_boxes,
char* result_mask) {
extern __shared__ int local[];
// set global mask to accept all boxes
for (int box = blockIdx.x * blockDim.x + threadIdx.x; box < bit_mask_len; box += blockDim.x * gridDim.x) {
local[box] = 0xFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
for (int box = 0; box < num_boxes - 1; ++box) {
// if current box is masked by an earlier box, skip it.
if (!CheckBit(local, box)) {
continue;
}
accepted_boxes += 1;
int offset = box * bit_mask_len;
// update global mask with current box's mask
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < bit_mask_len; b += blockDim.x * gridDim.x) {
local[b] &= ~bitmask[offset + b];
}
__syncthreads();
if (accepted_boxes > max_boxes) break;
}
// copy global mask to result_max char array. char array is needed for
// hipcub::DeviceSelect later.
for (int box = blockIdx.x * blockDim.x + threadIdx.x; box < num_boxes; box += blockDim.x * gridDim.x) {
result_mask[box] = CheckBit(local, box);
}
}
// For each box, compute a bitmask of boxes which has an overlap with given box
// above threshold.
//
// Starting from highes scoring box, mark any box which has IoU>threshold with
// given box. Each thread processes a kNmsBoxesPerThread boxes per stride, and
// each box has bitmask of overlaps of length bit_mask_len.
//
__launch_bounds__(kNmsBlockDim* kNmsBlockDim, 4) __global__
void NMSKernel(
const int64_t center_point_box,
const Box* d_desc_sorted_boxes,
const int num_boxes,
const float iou_threshold,
const int bit_mask_len,
int* d_delete_mask) {
for (int i_block_offset = blockIdx.x * blockDim.x; i_block_offset < num_boxes;
i_block_offset += blockDim.x * gridDim.x) {
const int i = i_block_offset + threadIdx.x;
if (i < num_boxes) {
for (int j_thread_offset =
kNmsBoxesPerThread * (blockIdx.y * blockDim.y + threadIdx.y);
j_thread_offset < num_boxes;
j_thread_offset += kNmsBoxesPerThread * blockDim.y * gridDim.y) {
// Note : We can do everything using multiplication,
// and use fp16 - we are comparing against a low precision
// threshold.
int above_threshold = 0;
// Make sure that threads are within valid domain.
bool valid = false;
// Loop over the next kNmsBoxesPerThread boxes and set corresponding bit
// if it is overlapping with current box
for (int ib = 0; ib < kNmsBoxesPerThread; ++ib) {
// This thread will compare Box i and Box j.
const int j = j_thread_offset + ib;
if (i >= j || i >= num_boxes || j >= num_boxes) continue;
valid = true;
if (SuppressByIOU(reinterpret_cast<const float*>(d_desc_sorted_boxes),
i, j, center_point_box, iou_threshold)) {
// we have score[j] <= score[i].
above_threshold |= (1U << ib);
}
}
if (valid) {
d_delete_mask[i * bit_mask_len + j_thread_offset / kNmsBoxesPerThread] =
above_threshold;
}
}
}
}
}
// Variadic template helpers for Index selecting multiple arrays at the same
// time
template <typename Index>
__device__ inline void SelectHelper(const Index i_selected,
const Index i_original) {}
template <typename Index, typename T, typename... Args>
__device__ inline void SelectHelper(const Index i_selected,
const Index i_original,
const T* original, T* selected,
Args... args) {
selected[i_selected] = original[i_original];
SelectHelper(i_selected, i_original, args...);
}
// Helper template to select elements from original arrays using the index
// mapping and store into selected array. Each array sharing same mapping need
// to be passed as pairs of pointers to original and selected arrays. For
// selecting 2 arrays call would be
// IndexMultiSelect(num_elements, indices, original1 ,selected1, original2,
// selected2).
template <typename Index, typename T, typename... Args>
__global__ void IndexMultiSelect(const int num_elements, const Index* indices,
const T* original, T* selected, Args... args) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
SelectHelper(idx, indices[idx], original, selected, args...);
}
}
template <typename T>
__global__ void SetZero(const int count, T* __restrict__ ptr) {
// Check that the grid is one dimensional and index doesn't overflow.
assert(blockDim.y == 1);
assert(blockDim.z == 1);
assert(blockDim.x * gridDim.x / blockDim.x == gridDim.x);
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
ptr[i] = T(0);
}
}
template <typename T>
__global__ void Iota(const int num_elements, const T offset, T* to_fill) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
to_fill[idx] = static_cast<T>(idx) + offset;
}
}
__global__ void NormalizeOutput(const int num_elements, const int* original, int64_t* to_normalize, int64_t batch_index, int64_t class_index) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
to_normalize[idx * 3] = batch_index;
to_normalize[idx * 3 + 1] = class_index;
to_normalize[idx * 3 + 2] = static_cast<int64_t>(original[idx]);
}
}
Status NmsGpu(hipStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const int64_t center_point_box,
const float* d_sorted_boxes_float_ptr,
const int num_boxes,
const float iou_threshold,
int* d_selected_indices,
int* h_nkeep,
const int max_boxes) {
// Making sure we respect the __align(16)__
// we promised to the compiler.
auto iptr = reinterpret_cast<std::uintptr_t>(d_sorted_boxes_float_ptr);
ORT_ENFORCE((iptr & 15) == 0);
const int bit_mask_len =
(num_boxes + kNmsBoxesPerThread - 1) / kNmsBoxesPerThread;
int max_nms_mask_size = num_boxes * bit_mask_len;
IAllocatorUniquePtr<void> d_nms_mask_ptr{allocator(max_nms_mask_size * sizeof(int))};
auto* d_nms_mask = static_cast<int*>(d_nms_mask_ptr.get());
int blocksPerGrid = (int)(ceil(static_cast<float>(max_nms_mask_size) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( SetZero<int>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, max_nms_mask_size, d_nms_mask);
int* d_delete_mask = d_nms_mask;
int* h_selected_count = h_nkeep;
const Box* d_sorted_boxes =
reinterpret_cast<const Box*>(d_sorted_boxes_float_ptr);
dim3 block_dim, thread_block;
int num_blocks = (num_boxes + kNmsBlockDim - 1) / kNmsBlockDim;
num_blocks = ::max(::min(num_blocks, kNmsBlockDimMax), 1);
block_dim.x = num_blocks;
block_dim.y = num_blocks;
block_dim.z = 1;
thread_block.x = kNmsBlockDim;
thread_block.y = kNmsBlockDim;
thread_block.z = 1;
hipLaunchKernelGGL(( NMSKernel), dim3(block_dim), dim3(thread_block), 0, stream, center_point_box,
d_sorted_boxes,
num_boxes,
iou_threshold,
bit_mask_len,
d_delete_mask);
IAllocatorUniquePtr<void> d_selected_boxes_ptr{allocator(num_boxes * sizeof(char))};
auto* d_selected_boxes = static_cast<char*>(d_selected_boxes_ptr.get());
IAllocatorUniquePtr<void> d_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_indices = static_cast<int*>(d_indices_ptr.get());
blocksPerGrid = (int)(ceil(static_cast<float>(num_boxes) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( Iota<int>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, num_boxes, 0, d_indices);
hipLaunchKernelGGL(( NMSReduce), dim3(1), dim3(1024), bit_mask_len * sizeof(int), stream, d_delete_mask, bit_mask_len, num_boxes, max_boxes, d_selected_boxes);
size_t flagged_buffer_size = 0;
CUDA_RETURN_IF_ERROR(hipcub::DeviceSelect::Flagged(static_cast<void*>(nullptr), // temp_storage
flagged_buffer_size,
static_cast<int*>(nullptr), // input
static_cast<char*>(nullptr), // selection flag
static_cast<int*>(nullptr), // selected items
static_cast<int*>(nullptr), // num_selected
num_boxes,
stream));
IAllocatorUniquePtr<void> d_cub_scratch_buffer_ptr{allocator(flagged_buffer_size)};
auto* d_cub_scratch_buffer = static_cast<uint8_t*>(d_cub_scratch_buffer_ptr.get());
IAllocatorUniquePtr<void> d_num_selected_ptr{allocator(sizeof(int))};
auto* d_num_selected = static_cast<int*>(d_num_selected_ptr.get());
CUDA_RETURN_IF_ERROR(hipcub::DeviceSelect::Flagged(
d_cub_scratch_buffer, // temp_storage
flagged_buffer_size,
d_indices, // input
d_selected_boxes, // selection flag
d_selected_indices, // selected items
d_num_selected, num_boxes, stream));
CUDA_RETURN_IF_ERROR(hipMemcpyAsync(h_selected_count, d_num_selected, sizeof(int), hipMemcpyDeviceToHost, stream));
// hipStreamSynchronize is needed since the value of h_selected_count will be used by host after this function.
CUDA_RETURN_IF_ERROR(hipStreamSynchronize(stream));
return Status::OK();
}
struct DeviceGreaterThan {
float threshold_;
__host__ __device__ __forceinline__ DeviceGreaterThan(float threshold)
: threshold_(threshold) {}
__host__ __device__ __forceinline__ bool operator()(const float& val) const {
return (val > threshold_);
}
};
} // namespace
Status NonMaxSuppressionImpl(
hipStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const PrepareContext& pc,
const int64_t center_point_box,
int64_t batch_index,
int64_t class_index,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold,
IAllocatorUniquePtr<void>& selected_indices,
int* h_number_selected) {
// STEP 1. Prepare data
int num_boxes = pc.num_boxes_;
const float* boxes_data = pc.boxes_data_ + batch_index * num_boxes * 4;
const float* scores_data = pc.scores_data_ + (batch_index * pc.num_classes_ + class_index) * num_boxes;
// prepare temporary memory for sorting scores
// calculate temporary size that used for sorting
size_t cub_sort_temp_storage_bytes = 0;
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairsDescending(
nullptr, cub_sort_temp_storage_bytes,
static_cast<float*>(nullptr), // scores
static_cast<float*>(nullptr), // sorted scores
static_cast<int*>(nullptr), // input indices
static_cast<int*>(nullptr), // sorted indices
num_boxes, // num items
0, 8 * sizeof(float), // sort all bits
stream));
// allocate temporary memory
IAllocatorUniquePtr<void> d_cub_sort_buffer_ptr{allocator(cub_sort_temp_storage_bytes)};
auto* d_cub_sort_buffer = static_cast<uint8_t*>(d_cub_sort_buffer_ptr.get());
IAllocatorUniquePtr<void> d_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_indices = static_cast<int*>(d_indices_ptr.get());
IAllocatorUniquePtr<void> d_sorted_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_sorted_indices = static_cast<int*>(d_sorted_indices_ptr.get());
IAllocatorUniquePtr<void> d_selected_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_selected_indices = static_cast<int*>(d_selected_indices_ptr.get());
IAllocatorUniquePtr<void> d_sorted_scores_ptr{allocator(num_boxes * sizeof(float))};
auto* d_sorted_scores = static_cast<float*>(d_sorted_scores_ptr.get());
IAllocatorUniquePtr<void> d_sorted_boxes_ptr{allocator(num_boxes * 4 * sizeof(float))};
auto* d_sorted_boxes = static_cast<float*>(d_sorted_boxes_ptr.get());
// create sequense of indices
int blocksPerGrid = (int)(ceil(static_cast<float>(num_boxes) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( Iota<int>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, num_boxes, 0, d_indices);
CUDA_RETURN_IF_ERROR(hipGetLastError());
// sort scores
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairsDescending(
d_cub_sort_buffer,
cub_sort_temp_storage_bytes,
scores_data,
d_sorted_scores,
d_indices,
d_sorted_indices,
num_boxes,
0,
8 * sizeof(float), // sort all bits
stream));
// pick sorted scores
const Box* original_boxes = reinterpret_cast<const Box*>(boxes_data);
Box* sorted_boxes = reinterpret_cast<Box*>(d_sorted_boxes);
hipLaunchKernelGGL(( IndexMultiSelect<int, Box>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, num_boxes, d_sorted_indices, original_boxes, sorted_boxes);
CUDA_RETURN_IF_ERROR(hipGetLastError());
// STEP 2. filter boxes by scores
int limited_num_boxes = num_boxes;
if (pc.score_threshold_ != nullptr) {
thrust::device_ptr<float> sorted_scores_device_ptr(d_sorted_scores);
limited_num_boxes = thrust::count_if(
thrust::hip::par.on(stream),
sorted_scores_device_ptr,
sorted_scores_device_ptr + num_boxes,
DeviceGreaterThan(score_threshold));
CUDA_RETURN_IF_ERROR(hipGetLastError());
if (limited_num_boxes == 0) {
*h_number_selected = 0;
return Status::OK();
}
}
// STEP 3. launch NMS kernels
ORT_RETURN_IF_ERROR(NmsGpu(stream,
allocator,
center_point_box,
d_sorted_boxes,
limited_num_boxes,
iou_threshold,
d_selected_indices,
h_number_selected,
max_output_boxes_per_class));
CUDA_RETURN_IF_ERROR(hipGetLastError());
// STEP 4. map back to sorted indices
*h_number_selected = ::min(*h_number_selected, max_output_boxes_per_class);
int num_to_keep = *h_number_selected;
if (num_to_keep > 0) {
IAllocatorUniquePtr<void> d_output_indices_ptr{allocator(num_to_keep * sizeof(int))};
auto* d_output_indices = static_cast<int*>(d_output_indices_ptr.get());
IAllocatorUniquePtr<void> d_normalized_output_indices_ptr{allocator(num_to_keep * 3 * sizeof(int64_t))};
auto* d_normalized_output_indices = static_cast<int64_t*>(d_normalized_output_indices_ptr.get());
blocksPerGrid = (int)(ceil(static_cast<float>(num_to_keep) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( IndexMultiSelect<int, int>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, num_to_keep, d_selected_indices, d_sorted_indices, d_output_indices);
hipLaunchKernelGGL(( NormalizeOutput), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, num_to_keep, d_output_indices, d_normalized_output_indices, batch_index, class_index);
CUDA_RETURN_IF_ERROR(hipGetLastError());
selected_indices = std::move(d_normalized_output_indices_ptr);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
3a810966d31595219352b1bab45c7f718c369db2.cu
|
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include "non_max_suppression_impl.h"
#include "core/providers/cpu/object_detection/non_max_suppression_helper.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include <cub/cub.cuh>
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
using namespace nms_helpers;
namespace {
struct __align__(16) Box {
float x1, y1, x2, y2;
};
// This is the width of the bitmask for masking boxes for each thread.
// This needs to be a multiple of 2(a POD width usually) so that division and
// modulo can be implemented as bit operations during host selection.
constexpr int kNmsBoxesPerThread = 8 * sizeof(int);
// Helper to calculate modulo mask and shift bits.
// For kNmsBoxesPerThread=32 ModuloMask will be 31, i.e 0x1F thus
// i % 32 == i & 31. Similarly ShiftBits will be 5 so that
// i / 32 == i >> 5. Using these bit operations should reduce the stall on host
// thread.
__device__ constexpr int NumBits(int n) { return (n == 0) ? 0 : NumBits(n >> 1) + 1; }
constexpr int kNmsBlockDim = 16;
constexpr int kNmsBlockDimMax = 128;
// Check whether two boxes have an IoU greater than threshold.
template <typename T>
__device__ inline bool OverThreshold(const Box* a, const Box* b,
const float a_area,
const T iou_threshold) {
const float b_area = (b->x2 - b->x1) * (b->y2 - b->y1);
if (a_area == 0.0f || b_area == 0.0f) return false;
const float xx1 = fmaxf(a->x1, b->x1);
const float yy1 = fmaxf(a->y1, b->y1);
const float xx2 = fminf(a->x2, b->x2);
const float yy2 = fminf(a->y2, b->y2);
// fdimf computes the positive difference between xx2+1 and xx1.
const float w = fdimf(xx2, xx1);
const float h = fdimf(yy2, yy1);
const float intersection = w * h;
// Testing for aa/bb > t
// eq with aa > bb*t (b is !=0)
// avoiding divisions.
const float aa = intersection;
const float bb = a_area + b_area - intersection;
const float bt = bb * iou_threshold;
return aa >= bt;
}
template <typename T>
__device__ inline bool CheckBit(T* bit_mask, int bit) {
constexpr int kShiftLen = NumBits(8 * sizeof(T)) - 1;
constexpr int kRemainderMask = 8 * sizeof(T) - 1;
int bin = bit >> kShiftLen;
return (bit_mask[bin] >> (bit & kRemainderMask)) & 1;
}
// Produce a global bitmask (result_mask) of selected boxes from bitmask
// generated by NMSKernel Abort early if max_boxes boxes are selected. Bitmask
// is num_boxes*bit_mask_len bits indicating whether to keep or remove a box.
__global__ void NMSReduce(const int* bitmask, const int bit_mask_len,
const int num_boxes, const int max_boxes,
char* result_mask) {
extern __shared__ int local[];
// set global mask to accept all boxes
for (int box = blockIdx.x * blockDim.x + threadIdx.x; box < bit_mask_len; box += blockDim.x * gridDim.x) {
local[box] = 0xFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
for (int box = 0; box < num_boxes - 1; ++box) {
// if current box is masked by an earlier box, skip it.
if (!CheckBit(local, box)) {
continue;
}
accepted_boxes += 1;
int offset = box * bit_mask_len;
// update global mask with current box's mask
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < bit_mask_len; b += blockDim.x * gridDim.x) {
local[b] &= ~bitmask[offset + b];
}
__syncthreads();
if (accepted_boxes > max_boxes) break;
}
// copy global mask to result_max char array. char array is needed for
// cub::DeviceSelect later.
for (int box = blockIdx.x * blockDim.x + threadIdx.x; box < num_boxes; box += blockDim.x * gridDim.x) {
result_mask[box] = CheckBit(local, box);
}
}
// For each box, compute a bitmask of boxes which has an overlap with given box
// above threshold.
//
// Starting from highes scoring box, mark any box which has IoU>threshold with
// given box. Each thread processes a kNmsBoxesPerThread boxes per stride, and
// each box has bitmask of overlaps of length bit_mask_len.
//
__launch_bounds__(kNmsBlockDim* kNmsBlockDim, 4) __global__
void NMSKernel(
const int64_t center_point_box,
const Box* d_desc_sorted_boxes,
const int num_boxes,
const float iou_threshold,
const int bit_mask_len,
int* d_delete_mask) {
for (int i_block_offset = blockIdx.x * blockDim.x; i_block_offset < num_boxes;
i_block_offset += blockDim.x * gridDim.x) {
const int i = i_block_offset + threadIdx.x;
if (i < num_boxes) {
for (int j_thread_offset =
kNmsBoxesPerThread * (blockIdx.y * blockDim.y + threadIdx.y);
j_thread_offset < num_boxes;
j_thread_offset += kNmsBoxesPerThread * blockDim.y * gridDim.y) {
// Note : We can do everything using multiplication,
// and use fp16 - we are comparing against a low precision
// threshold.
int above_threshold = 0;
// Make sure that threads are within valid domain.
bool valid = false;
// Loop over the next kNmsBoxesPerThread boxes and set corresponding bit
// if it is overlapping with current box
for (int ib = 0; ib < kNmsBoxesPerThread; ++ib) {
// This thread will compare Box i and Box j.
const int j = j_thread_offset + ib;
if (i >= j || i >= num_boxes || j >= num_boxes) continue;
valid = true;
if (SuppressByIOU(reinterpret_cast<const float*>(d_desc_sorted_boxes),
i, j, center_point_box, iou_threshold)) {
// we have score[j] <= score[i].
above_threshold |= (1U << ib);
}
}
if (valid) {
d_delete_mask[i * bit_mask_len + j_thread_offset / kNmsBoxesPerThread] =
above_threshold;
}
}
}
}
}
// Variadic template helpers for Index selecting multiple arrays at the same
// time
template <typename Index>
__device__ inline void SelectHelper(const Index i_selected,
const Index i_original) {}
template <typename Index, typename T, typename... Args>
__device__ inline void SelectHelper(const Index i_selected,
const Index i_original,
const T* original, T* selected,
Args... args) {
selected[i_selected] = original[i_original];
SelectHelper(i_selected, i_original, args...);
}
// Helper template to select elements from original arrays using the index
// mapping and store into selected array. Each array sharing same mapping need
// to be passed as pairs of pointers to original and selected arrays. For
// selecting 2 arrays call would be
// IndexMultiSelect(num_elements, indices, original1 ,selected1, original2,
// selected2).
template <typename Index, typename T, typename... Args>
__global__ void IndexMultiSelect(const int num_elements, const Index* indices,
const T* original, T* selected, Args... args) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
SelectHelper(idx, indices[idx], original, selected, args...);
}
}
template <typename T>
__global__ void SetZero(const int count, T* __restrict__ ptr) {
// Check that the grid is one dimensional and index doesn't overflow.
assert(blockDim.y == 1);
assert(blockDim.z == 1);
assert(blockDim.x * gridDim.x / blockDim.x == gridDim.x);
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
ptr[i] = T(0);
}
}
template <typename T>
__global__ void Iota(const int num_elements, const T offset, T* to_fill) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
to_fill[idx] = static_cast<T>(idx) + offset;
}
}
__global__ void NormalizeOutput(const int num_elements, const int* original, int64_t* to_normalize, int64_t batch_index, int64_t class_index) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
to_normalize[idx * 3] = batch_index;
to_normalize[idx * 3 + 1] = class_index;
to_normalize[idx * 3 + 2] = static_cast<int64_t>(original[idx]);
}
}
Status NmsGpu(cudaStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const int64_t center_point_box,
const float* d_sorted_boxes_float_ptr,
const int num_boxes,
const float iou_threshold,
int* d_selected_indices,
int* h_nkeep,
const int max_boxes) {
// Making sure we respect the __align(16)__
// we promised to the compiler.
auto iptr = reinterpret_cast<std::uintptr_t>(d_sorted_boxes_float_ptr);
ORT_ENFORCE((iptr & 15) == 0);
const int bit_mask_len =
(num_boxes + kNmsBoxesPerThread - 1) / kNmsBoxesPerThread;
int max_nms_mask_size = num_boxes * bit_mask_len;
IAllocatorUniquePtr<void> d_nms_mask_ptr{allocator(max_nms_mask_size * sizeof(int))};
auto* d_nms_mask = static_cast<int*>(d_nms_mask_ptr.get());
int blocksPerGrid = (int)(ceil(static_cast<float>(max_nms_mask_size) / GridDim::maxThreadsPerBlock));
SetZero<int><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(max_nms_mask_size, d_nms_mask);
int* d_delete_mask = d_nms_mask;
int* h_selected_count = h_nkeep;
const Box* d_sorted_boxes =
reinterpret_cast<const Box*>(d_sorted_boxes_float_ptr);
dim3 block_dim, thread_block;
int num_blocks = (num_boxes + kNmsBlockDim - 1) / kNmsBlockDim;
num_blocks = std::max(std::min(num_blocks, kNmsBlockDimMax), 1);
block_dim.x = num_blocks;
block_dim.y = num_blocks;
block_dim.z = 1;
thread_block.x = kNmsBlockDim;
thread_block.y = kNmsBlockDim;
thread_block.z = 1;
NMSKernel<<<block_dim, thread_block, 0, stream>>>(center_point_box,
d_sorted_boxes,
num_boxes,
iou_threshold,
bit_mask_len,
d_delete_mask);
IAllocatorUniquePtr<void> d_selected_boxes_ptr{allocator(num_boxes * sizeof(char))};
auto* d_selected_boxes = static_cast<char*>(d_selected_boxes_ptr.get());
IAllocatorUniquePtr<void> d_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_indices = static_cast<int*>(d_indices_ptr.get());
blocksPerGrid = (int)(ceil(static_cast<float>(num_boxes) / GridDim::maxThreadsPerBlock));
Iota<int><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(num_boxes, 0, d_indices);
NMSReduce<<<1, 1024, bit_mask_len * sizeof(int), stream>>>(d_delete_mask, bit_mask_len, num_boxes, max_boxes, d_selected_boxes);
size_t flagged_buffer_size = 0;
CUDA_RETURN_IF_ERROR(cub::DeviceSelect::Flagged(static_cast<void*>(nullptr), // temp_storage
flagged_buffer_size,
static_cast<int*>(nullptr), // input
static_cast<char*>(nullptr), // selection flag
static_cast<int*>(nullptr), // selected items
static_cast<int*>(nullptr), // num_selected
num_boxes,
stream));
IAllocatorUniquePtr<void> d_cub_scratch_buffer_ptr{allocator(flagged_buffer_size)};
auto* d_cub_scratch_buffer = static_cast<uint8_t*>(d_cub_scratch_buffer_ptr.get());
IAllocatorUniquePtr<void> d_num_selected_ptr{allocator(sizeof(int))};
auto* d_num_selected = static_cast<int*>(d_num_selected_ptr.get());
CUDA_RETURN_IF_ERROR(cub::DeviceSelect::Flagged(
d_cub_scratch_buffer, // temp_storage
flagged_buffer_size,
d_indices, // input
d_selected_boxes, // selection flag
d_selected_indices, // selected items
d_num_selected, num_boxes, stream));
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(h_selected_count, d_num_selected, sizeof(int), cudaMemcpyDeviceToHost, stream));
// cudaStreamSynchronize is needed since the value of h_selected_count will be used by host after this function.
CUDA_RETURN_IF_ERROR(cudaStreamSynchronize(stream));
return Status::OK();
}
struct DeviceGreaterThan {
float threshold_;
__host__ __device__ __forceinline__ DeviceGreaterThan(float threshold)
: threshold_(threshold) {}
__host__ __device__ __forceinline__ bool operator()(const float& val) const {
return (val > threshold_);
}
};
} // namespace
Status NonMaxSuppressionImpl(
cudaStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const PrepareContext& pc,
const int64_t center_point_box,
int64_t batch_index,
int64_t class_index,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold,
IAllocatorUniquePtr<void>& selected_indices,
int* h_number_selected) {
// STEP 1. Prepare data
int num_boxes = pc.num_boxes_;
const float* boxes_data = pc.boxes_data_ + batch_index * num_boxes * 4;
const float* scores_data = pc.scores_data_ + (batch_index * pc.num_classes_ + class_index) * num_boxes;
// prepare temporary memory for sorting scores
// calculate temporary size that used for sorting
size_t cub_sort_temp_storage_bytes = 0;
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairsDescending(
nullptr, cub_sort_temp_storage_bytes,
static_cast<float*>(nullptr), // scores
static_cast<float*>(nullptr), // sorted scores
static_cast<int*>(nullptr), // input indices
static_cast<int*>(nullptr), // sorted indices
num_boxes, // num items
0, 8 * sizeof(float), // sort all bits
stream));
// allocate temporary memory
IAllocatorUniquePtr<void> d_cub_sort_buffer_ptr{allocator(cub_sort_temp_storage_bytes)};
auto* d_cub_sort_buffer = static_cast<uint8_t*>(d_cub_sort_buffer_ptr.get());
IAllocatorUniquePtr<void> d_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_indices = static_cast<int*>(d_indices_ptr.get());
IAllocatorUniquePtr<void> d_sorted_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_sorted_indices = static_cast<int*>(d_sorted_indices_ptr.get());
IAllocatorUniquePtr<void> d_selected_indices_ptr{allocator(num_boxes * sizeof(int))};
auto* d_selected_indices = static_cast<int*>(d_selected_indices_ptr.get());
IAllocatorUniquePtr<void> d_sorted_scores_ptr{allocator(num_boxes * sizeof(float))};
auto* d_sorted_scores = static_cast<float*>(d_sorted_scores_ptr.get());
IAllocatorUniquePtr<void> d_sorted_boxes_ptr{allocator(num_boxes * 4 * sizeof(float))};
auto* d_sorted_boxes = static_cast<float*>(d_sorted_boxes_ptr.get());
// create sequense of indices
int blocksPerGrid = (int)(ceil(static_cast<float>(num_boxes) / GridDim::maxThreadsPerBlock));
Iota<int><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(num_boxes, 0, d_indices);
CUDA_RETURN_IF_ERROR(cudaGetLastError());
// sort scores
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairsDescending(
d_cub_sort_buffer,
cub_sort_temp_storage_bytes,
scores_data,
d_sorted_scores,
d_indices,
d_sorted_indices,
num_boxes,
0,
8 * sizeof(float), // sort all bits
stream));
// pick sorted scores
const Box* original_boxes = reinterpret_cast<const Box*>(boxes_data);
Box* sorted_boxes = reinterpret_cast<Box*>(d_sorted_boxes);
IndexMultiSelect<int, Box><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(num_boxes, d_sorted_indices, original_boxes, sorted_boxes);
CUDA_RETURN_IF_ERROR(cudaGetLastError());
// STEP 2. filter boxes by scores
int limited_num_boxes = num_boxes;
if (pc.score_threshold_ != nullptr) {
thrust::device_ptr<float> sorted_scores_device_ptr(d_sorted_scores);
limited_num_boxes = thrust::count_if(
thrust::cuda::par.on(stream),
sorted_scores_device_ptr,
sorted_scores_device_ptr + num_boxes,
DeviceGreaterThan(score_threshold));
CUDA_RETURN_IF_ERROR(cudaGetLastError());
if (limited_num_boxes == 0) {
*h_number_selected = 0;
return Status::OK();
}
}
// STEP 3. launch NMS kernels
ORT_RETURN_IF_ERROR(NmsGpu(stream,
allocator,
center_point_box,
d_sorted_boxes,
limited_num_boxes,
iou_threshold,
d_selected_indices,
h_number_selected,
max_output_boxes_per_class));
CUDA_RETURN_IF_ERROR(cudaGetLastError());
// STEP 4. map back to sorted indices
*h_number_selected = std::min(*h_number_selected, max_output_boxes_per_class);
int num_to_keep = *h_number_selected;
if (num_to_keep > 0) {
IAllocatorUniquePtr<void> d_output_indices_ptr{allocator(num_to_keep * sizeof(int))};
auto* d_output_indices = static_cast<int*>(d_output_indices_ptr.get());
IAllocatorUniquePtr<void> d_normalized_output_indices_ptr{allocator(num_to_keep * 3 * sizeof(int64_t))};
auto* d_normalized_output_indices = static_cast<int64_t*>(d_normalized_output_indices_ptr.get());
blocksPerGrid = (int)(ceil(static_cast<float>(num_to_keep) / GridDim::maxThreadsPerBlock));
IndexMultiSelect<int, int><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(num_to_keep, d_selected_indices, d_sorted_indices, d_output_indices);
NormalizeOutput<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(num_to_keep, d_output_indices, d_normalized_output_indices, batch_index, class_index);
CUDA_RETURN_IF_ERROR(cudaGetLastError());
selected_indices = std::move(d_normalized_output_indices_ptr);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
369d4db867a5b5a9d819fd2eddc93a7522d26b86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <new>
struct Foo
{
int value = 0x1234;
};
__global__ void kernel_simple(Foo* storage, Foo** initialized)
{
Foo* start = storage + threadIdx.x * 2;
start[0].value = 0x1234;
start[1].value = 0x1234;
initialized[threadIdx.x] = start;
}
|
369d4db867a5b5a9d819fd2eddc93a7522d26b86.cu
|
#include <new>
struct Foo
{
int value = 0x1234;
};
__global__ void kernel_simple(Foo* storage, Foo** initialized)
{
Foo* start = storage + threadIdx.x * 2;
start[0].value = 0x1234;
start[1].value = 0x1234;
initialized[threadIdx.x] = start;
}
|
6098e3c37c8773b185f33bee86e35f19b7b55ff1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file intermediatePressure.cu
* \author Christopher Minar ([email protected])
* \brief kernels to generate the right hand side of the poission equation
*/
#include "intermediatePressure.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void intermediatePressure_luo(double *rhs2, double *detA, int *hybridTagsP, double *alpha, double *stencilCoef,
double *xv, double *yu,
double *b11, double *b12, double *b13, double *b14, double *b21, double *b22, double *b23, double *b24,
double *b31, double *b32, double *b33, double *b34, double *b41, double *b42, double *b43, double *b44,
double *q1, double *q2, double *q3, double *q4,
bool *q1flag, bool *q2flag, bool *q3flag, bool *q4flag,
int nx, int ny)
{
int ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= nx*ny || hybridTagsP[ip]<=0)
return;
int I = ip % nx,
J = ip / nx;
double temp = 0;
double x = xv[I],
y = yu[J];
if (q1flag[ip] == true)
{
temp = (b11[ip] + b21[ip]*x + b31[ip]*y + b41[ip]*x*y)*q1[ip]/detA[ip];
}
else if (q2flag[ip] == true)
{
temp = (b12[ip] + b22[ip]*x + b32[ip]*y + b42[ip]*x*y)*q2[ip]/detA[ip];
}
else if (q3flag[ip] == true)
{
temp = (b13[ip] + b23[ip]*x + b33[ip]*y + b43[ip]*x*y)*q3[ip]/detA[ip];
}
else if (q4flag[ip] == true)
{
temp = (b14[ip] + b24[ip]*x + b34[ip]*y + b44[ip]*x*y)*q4[ip]/detA[ip];
}
rhs2[ip] = (1-alpha[ip])*rhs2[ip]/stencilCoef[ip] + alpha[ip]*temp;
}
//calc b, det(A), q, qflag, index
__global__
void interpolate_P_HN_setup(double *detA, int *hybridTagsP, double *bx, double *by,
double *uB, double *uB0, double *vB, double *vB0,
double *yu, double *xv,
double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y,
int *i_start, int *j_start, int width, int nx, int ny, double dt, double totalPoints,
double *b11, double *b12, double *b13, double *b14, double *b21, double *b22, double *b23, double *b24,
double *b31, double *b32, double *b33, double *b34, double *b41, double *b42, double *b43, double *b44,
double *q1, double *q2, double *q3, double *q4,
bool *q1flag, bool *q2flag, bool *q3flag, bool *q4flag,
int *index1, int *index2, int *index3, int *index4,
double *x1, double *x2, double *x3, double *x4,
double *y1, double *y2, double *y3, double *y4,
double *dudt, double *ududx, double *vdudy, double *dvdt, double *udvdx, double *vdvdy)//test
{//flag u not used anymore
int idx = threadIdx.x + blockDim.x * blockIdx.x,
i = idx % (width),
j = idx / (width),
I = i_start[0] + i,
J = j_start[0] + j,
ip = J*nx + I,
ii= I-5,
jj = J-5;
//if (ip > J*nx + I) //return if we're out of bound
if (ip >= nx*ny)
return;
if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point
return;
double n_x, //distance from ip_x to BI_x
n_y, //distance from ip_y to BI_y
nl, //distance from ip to BI
distance, //placeholder
distance2, //placeholder
min, //minimum distance from BI to body node (used in finding closest body node to the BI)
min2, //second closest distance
matDi, //du/dt at BN 1
matDj, //dv/dt at BN 1
matD2i, //du/dt at BN 2
matD2j, //dv/dt at BN 2
matDBIi, //du/dt at BI
matDBIj; //dv/dt at BI
int bodyindex, //index of body node 1
bodyindex2; //index of body node 2
double a11, a12, a13, a14,
a21, a22, a23, a24,
a31, a32, a33, a34,
a41, a42, a43, a44;
/*
* an example of a node setup you might find, this would occur on the top right section of the body
* In this case, we would use the field data from points 2,3 and 4 alongside the bc at BI to interpolate for a value at point 1
* In the general case, field data from 3 nodes will be used alongside the bc at the node closest to the body (node ip in this kernel)
* (x3,y3)__________(x4,y4)
* | |
* | |
* | |
* | *ip |
*\ | |
* \ (x1,y1)__________(x2,y2)
* \
* *(BI_x,BI_y)
* \
*
*Here are some references for solving the equation for bilinear interpolation of values
*http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly
*https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary
*We want to solving the following equation for the a coefficients using the data from the for corenrs
*p= @(X,Y) a0 + a1*X + a2*Y + a3*X*Y;
*This results in a system of equations (before applying the neuman BC) that looks like this:
* A a q
* |1 x1 y1 x1y1| |a0| = |q1|
* |1 x2 y2 x2y2| |a1| = |q2|
* |1 x3 y3 x3y3| |a2| = |q3|
* |1 x4 y4 x4y4| |a3| = |q4|
*
* when there is a neumann BC, the equation will become:
* |0 nx ny ny*x+nx*y| |a| |Du/Dt . n|
* nx and ny represent the unit vector compoents at the BI (in the code they are n_x/nl and n_y/nl
*
* A
* |a11 a12 a13 a14|
* |a21 a22 a23 a24|
* |a31 a13 a33 a34|
* |a41 a14 a43 a44|
*
*
* B
* |b11 b12 b13 b14|
* |b21 b22 b23 b24|
* |b31 b32 b33 b34|
* |b41 b42 b43 b44|
*
* Ainv = B/det(A)
* a = Ainv*q';
*/
//find x and y locations of nodes 1,2,3,4 by finding the nodes that bound the image point
while (xv[ii] < image_point_p_x[ip])
ii++;
while (yu[jj] <image_point_p_y[ip])
jj++;
//set x values at corners
x1[ip] = xv[ii-1];
x2[ip] = xv[ii];
x3[ip] = x1[ip];
x4[ip] = x2[ip];
//set y values at corners
y1[ip] = yu[jj-1];
y2[ip] = y1[ip];
y3[ip] = yu[jj];
y4[ip] = y3[ip];
//set index values
index1[ip] = (jj-1)*nx+ii-1,
index2[ip] = (jj-1)*nx+ii,
index3[ip] = jj*nx+ii-1,
index4[ip] = jj*nx+ii;
//
q1flag[ip] = false;
q2flag[ip] = false;
q3flag[ip] = false;
q4flag[ip] = false;
a11 = 1, a12 = x1[ip], a13 = y1[ip], a14 = x1[ip]*y1[ip];
a21 = 1, a22 = x2[ip], a23 = y2[ip], a24 = x2[ip]*y2[ip];
a31 = 1, a32 = x3[ip], a33 = y3[ip], a34 = x3[ip]*y3[ip];
a41 = 1, a42 = x4[ip], a43 = y4[ip], a44 = x4[ip]*y4[ip];
//setup for neuman BC
//move the closes node to the body to the surface then calculate the neuman boundary condition for it
//point 1
if (hybridTagsP[index1[ip]] == ip)
{
//setup
x1[ip] = body_intercept_p_x[ip];
y1[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x1[ip];
n_y = image_point_p_y[ip] - y1[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x1[ip],2)+pow(by[k]-y1[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x1[ip],2)+pow(by[k]-y1[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q1flag[ip] = true;
q1[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a11 = 0;
a12 = n_x/nl;
a13 = n_y/nl;
a14 = a13*x1[ip]+a12*y1[ip];
}
//point 2
else if (hybridTagsP[index2[ip]] == ip)
{
x2[ip] = body_intercept_p_x[ip];
y2[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x2[ip];
n_y = image_point_p_y[ip] - y2[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x2[ip],2)+pow(by[k]-y2[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x2[ip],2)+pow(by[k]-y2[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q2flag[ip] = true;
q2[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a21 = 0;
a22 = n_x/nl;
a23 = n_y/nl;
a24 = a23*x2[ip]+a22*y2[ip];
}
//point 3
else if (hybridTagsP[index3[ip]] == ip)
{
x3[ip] = body_intercept_p_x[ip];
y3[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x3[ip];
n_y = image_point_p_y[ip] - y3[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x3[ip],2)+pow(by[k]-y3[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x3[ip],2)+pow(by[k]-y3[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q3flag[ip] = true;
q3[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a31 = 0;
a32 = n_x/nl;
a33 = n_y/nl;
a34 = a33*x3[ip]+a32*y3[ip];
}
//4
if (hybridTagsP[index4[ip]] == ip)
{
x4[ip] = body_intercept_p_x[ip];
y4[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x4[ip];
n_y = image_point_p_y[ip] - y4[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x4[ip],2)+pow(by[k]-y4[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x4[ip],2)+pow(by[k]-y4[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q4flag[ip] = true;
q4[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a41 = 0;
a42 = n_x/nl;
a43 = n_y/nl;
a44 = a43*x4[ip]+a42*y4[ip];
}
detA[ip] = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43
+a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41
+a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42
+a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41
-a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42
-a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43
-a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41
-a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42;
b11[ip] = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42;
b12[ip] = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43;
b13[ip] = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42;
b14[ip] = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33;
b21[ip] = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43;
b22[ip] = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41;
b23[ip] = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43;
b24[ip] = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31;
b31[ip] = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41;
b32[ip] = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42;
b33[ip] = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41;
b34[ip] = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32;
b41[ip] = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42;
b42[ip] = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41;
b43[ip] = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42;
b44[ip] = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31;
}
//this kernel is bad
__global__
void hybridPressureNodeCount(int *countD, int *index1, int *index2, int *index3, int *index4, int *hybridTagsP,
int *i_start, int *j_start, int width, int height, int nx, int ny)
{
int ip = 0, count = 0;
for (int j=j_start[0]; j<j_start[0]+height; j++)
{
for (int i=i_start[0]; i<i_start[0]+width; i++)
{
ip = j*nx+i;
if (hybridTagsP[ip]>0)
{
if (index1[ip] != ip+nx && index1[ip] != ip-1 && index1[ip] != ip && index1[ip] != ip+1 && index1[ip] != ip-nx)
{
count+=1;
}
if (index2[ip] != ip+nx && index2[ip] != ip-1 && index2[ip] != ip && index2[ip] != ip+1 && index2[ip] != ip-nx)
{
count+=1;
}
if (index3[ip] != ip+nx && index3[ip] != ip-1 && index3[ip] != ip && index3[ip] != ip+1 && index3[ip] != ip-nx)
{
count+=1;
}
if (index4[ip] != ip+nx && index4[ip] != ip-1 && index4[ip] != ip && index4[ip] != ip+1 && index4[ip] != ip-nx)
{
count+=1;
}
countD[ip] = count;
}
}
}
}
}
|
6098e3c37c8773b185f33bee86e35f19b7b55ff1.cu
|
/***************************************************************************//**
* \file intermediatePressure.cu
* \author Christopher Minar ([email protected])
* \brief kernels to generate the right hand side of the poission equation
*/
#include "intermediatePressure.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void intermediatePressure_luo(double *rhs2, double *detA, int *hybridTagsP, double *alpha, double *stencilCoef,
double *xv, double *yu,
double *b11, double *b12, double *b13, double *b14, double *b21, double *b22, double *b23, double *b24,
double *b31, double *b32, double *b33, double *b34, double *b41, double *b42, double *b43, double *b44,
double *q1, double *q2, double *q3, double *q4,
bool *q1flag, bool *q2flag, bool *q3flag, bool *q4flag,
int nx, int ny)
{
int ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= nx*ny || hybridTagsP[ip]<=0)
return;
int I = ip % nx,
J = ip / nx;
double temp = 0;
double x = xv[I],
y = yu[J];
if (q1flag[ip] == true)
{
temp = (b11[ip] + b21[ip]*x + b31[ip]*y + b41[ip]*x*y)*q1[ip]/detA[ip];
}
else if (q2flag[ip] == true)
{
temp = (b12[ip] + b22[ip]*x + b32[ip]*y + b42[ip]*x*y)*q2[ip]/detA[ip];
}
else if (q3flag[ip] == true)
{
temp = (b13[ip] + b23[ip]*x + b33[ip]*y + b43[ip]*x*y)*q3[ip]/detA[ip];
}
else if (q4flag[ip] == true)
{
temp = (b14[ip] + b24[ip]*x + b34[ip]*y + b44[ip]*x*y)*q4[ip]/detA[ip];
}
rhs2[ip] = (1-alpha[ip])*rhs2[ip]/stencilCoef[ip] + alpha[ip]*temp;
}
//calc b, det(A), q, qflag, index
__global__
void interpolate_P_HN_setup(double *detA, int *hybridTagsP, double *bx, double *by,
double *uB, double *uB0, double *vB, double *vB0,
double *yu, double *xv,
double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y,
int *i_start, int *j_start, int width, int nx, int ny, double dt, double totalPoints,
double *b11, double *b12, double *b13, double *b14, double *b21, double *b22, double *b23, double *b24,
double *b31, double *b32, double *b33, double *b34, double *b41, double *b42, double *b43, double *b44,
double *q1, double *q2, double *q3, double *q4,
bool *q1flag, bool *q2flag, bool *q3flag, bool *q4flag,
int *index1, int *index2, int *index3, int *index4,
double *x1, double *x2, double *x3, double *x4,
double *y1, double *y2, double *y3, double *y4,
double *dudt, double *ududx, double *vdudy, double *dvdt, double *udvdx, double *vdvdy)//test
{//flag u not used anymore
int idx = threadIdx.x + blockDim.x * blockIdx.x,
i = idx % (width),
j = idx / (width),
I = i_start[0] + i,
J = j_start[0] + j,
ip = J*nx + I,
ii= I-5,
jj = J-5;
//if (ip > J*nx + I) //return if we're out of bound
if (ip >= nx*ny)
return;
if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point
return;
double n_x, //distance from ip_x to BI_x
n_y, //distance from ip_y to BI_y
nl, //distance from ip to BI
distance, //placeholder
distance2, //placeholder
min, //minimum distance from BI to body node (used in finding closest body node to the BI)
min2, //second closest distance
matDi, //du/dt at BN 1
matDj, //dv/dt at BN 1
matD2i, //du/dt at BN 2
matD2j, //dv/dt at BN 2
matDBIi, //du/dt at BI
matDBIj; //dv/dt at BI
int bodyindex, //index of body node 1
bodyindex2; //index of body node 2
double a11, a12, a13, a14,
a21, a22, a23, a24,
a31, a32, a33, a34,
a41, a42, a43, a44;
/*
* an example of a node setup you might find, this would occur on the top right section of the body
* In this case, we would use the field data from points 2,3 and 4 alongside the bc at BI to interpolate for a value at point 1
* In the general case, field data from 3 nodes will be used alongside the bc at the node closest to the body (node ip in this kernel)
* (x3,y3)__________(x4,y4)
* | |
* | |
* | |
* | *ip |
*\ | |
* \ (x1,y1)__________(x2,y2)
* \
* *(BI_x,BI_y)
* \
*
*Here are some references for solving the equation for bilinear interpolation of values
*http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly
*https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary
*We want to solving the following equation for the a coefficients using the data from the for corenrs
*p= @(X,Y) a0 + a1*X + a2*Y + a3*X*Y;
*This results in a system of equations (before applying the neuman BC) that looks like this:
* A a q
* |1 x1 y1 x1y1| |a0| = |q1|
* |1 x2 y2 x2y2| |a1| = |q2|
* |1 x3 y3 x3y3| |a2| = |q3|
* |1 x4 y4 x4y4| |a3| = |q4|
*
* when there is a neumann BC, the equation will become:
* |0 nx ny ny*x+nx*y| |a| |Du/Dt . n|
* nx and ny represent the unit vector compoents at the BI (in the code they are n_x/nl and n_y/nl
*
* A
* |a11 a12 a13 a14|
* |a21 a22 a23 a24|
* |a31 a13 a33 a34|
* |a41 a14 a43 a44|
*
*
* B
* |b11 b12 b13 b14|
* |b21 b22 b23 b24|
* |b31 b32 b33 b34|
* |b41 b42 b43 b44|
*
* Ainv = B/det(A)
* a = Ainv*q';
*/
//find x and y locations of nodes 1,2,3,4 by finding the nodes that bound the image point
while (xv[ii] < image_point_p_x[ip])
ii++;
while (yu[jj] <image_point_p_y[ip])
jj++;
//set x values at corners
x1[ip] = xv[ii-1];
x2[ip] = xv[ii];
x3[ip] = x1[ip];
x4[ip] = x2[ip];
//set y values at corners
y1[ip] = yu[jj-1];
y2[ip] = y1[ip];
y3[ip] = yu[jj];
y4[ip] = y3[ip];
//set index values
index1[ip] = (jj-1)*nx+ii-1,
index2[ip] = (jj-1)*nx+ii,
index3[ip] = jj*nx+ii-1,
index4[ip] = jj*nx+ii;
//
q1flag[ip] = false;
q2flag[ip] = false;
q3flag[ip] = false;
q4flag[ip] = false;
a11 = 1, a12 = x1[ip], a13 = y1[ip], a14 = x1[ip]*y1[ip];
a21 = 1, a22 = x2[ip], a23 = y2[ip], a24 = x2[ip]*y2[ip];
a31 = 1, a32 = x3[ip], a33 = y3[ip], a34 = x3[ip]*y3[ip];
a41 = 1, a42 = x4[ip], a43 = y4[ip], a44 = x4[ip]*y4[ip];
//setup for neuman BC
//move the closes node to the body to the surface then calculate the neuman boundary condition for it
//point 1
if (hybridTagsP[index1[ip]] == ip)
{
//setup
x1[ip] = body_intercept_p_x[ip];
y1[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x1[ip];
n_y = image_point_p_y[ip] - y1[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x1[ip],2)+pow(by[k]-y1[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x1[ip],2)+pow(by[k]-y1[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q1flag[ip] = true;
q1[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a11 = 0;
a12 = n_x/nl;
a13 = n_y/nl;
a14 = a13*x1[ip]+a12*y1[ip];
}
//point 2
else if (hybridTagsP[index2[ip]] == ip)
{
x2[ip] = body_intercept_p_x[ip];
y2[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x2[ip];
n_y = image_point_p_y[ip] - y2[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x2[ip],2)+pow(by[k]-y2[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x2[ip],2)+pow(by[k]-y2[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q2flag[ip] = true;
q2[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a21 = 0;
a22 = n_x/nl;
a23 = n_y/nl;
a24 = a23*x2[ip]+a22*y2[ip];
}
//point 3
else if (hybridTagsP[index3[ip]] == ip)
{
x3[ip] = body_intercept_p_x[ip];
y3[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x3[ip];
n_y = image_point_p_y[ip] - y3[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x3[ip],2)+pow(by[k]-y3[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x3[ip],2)+pow(by[k]-y3[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q3flag[ip] = true;
q3[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a31 = 0;
a32 = n_x/nl;
a33 = n_y/nl;
a34 = a33*x3[ip]+a32*y3[ip];
}
//4
if (hybridTagsP[index4[ip]] == ip)
{
x4[ip] = body_intercept_p_x[ip];
y4[ip] = body_intercept_p_y[ip];
n_x = image_point_p_x[ip] - x4[ip];
n_y = image_point_p_y[ip] - y4[ip];
nl = sqrt(n_x*n_x+n_y*n_y);
//find two closest body nodes
min = 1;
min2 = 1;
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x4[ip],2)+pow(by[k]-y4[ip],2));
if (distance<min)
{
min = distance;
bodyindex = k;
}
}
for (int k=0; k<totalPoints; k++)
{
distance = sqrt(pow(bx[k]-x4[ip],2)+pow(by[k]-y4[ip],2));
distance2 = sqrt(pow(bx[bodyindex]-bx[k],2)+pow(by[bodyindex]-bx[k],2));
if (distance<min2 && distance2>0)
{
min2 = distance;
bodyindex2 = k;
}
}
//calc Du/Dt at body nodes
matDi = (uB[bodyindex]-uB0[bodyindex])/dt;
matDj = (vB[bodyindex]-vB0[bodyindex])/dt;
matD2i = (uB[bodyindex2]-uB0[bodyindex2])/dt;
matD2j = (vB[bodyindex2]-vB0[bodyindex2])/dt;
//interp to BI
matDBIi = matDi + (matD2i-matDi)/(min+min2)*min;
matDBIj = matDj + (matD2j-matDj)/(min+min2)*min;
q4flag[ip] = true;
q4[ip] = - ( matDBIi*n_x/nl + matDBIj*n_y/nl ) ;
a41 = 0;
a42 = n_x/nl;
a43 = n_y/nl;
a44 = a43*x4[ip]+a42*y4[ip];
}
detA[ip] = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43
+a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41
+a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42
+a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41
-a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42
-a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43
-a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41
-a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42;
b11[ip] = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42;
b12[ip] = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43;
b13[ip] = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42;
b14[ip] = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33;
b21[ip] = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43;
b22[ip] = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41;
b23[ip] = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43;
b24[ip] = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31;
b31[ip] = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41;
b32[ip] = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42;
b33[ip] = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41;
b34[ip] = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32;
b41[ip] = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42;
b42[ip] = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41;
b43[ip] = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42;
b44[ip] = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31;
}
//this kernel is bad
__global__
void hybridPressureNodeCount(int *countD, int *index1, int *index2, int *index3, int *index4, int *hybridTagsP,
int *i_start, int *j_start, int width, int height, int nx, int ny)
{
int ip = 0, count = 0;
for (int j=j_start[0]; j<j_start[0]+height; j++)
{
for (int i=i_start[0]; i<i_start[0]+width; i++)
{
ip = j*nx+i;
if (hybridTagsP[ip]>0)
{
if (index1[ip] != ip+nx && index1[ip] != ip-1 && index1[ip] != ip && index1[ip] != ip+1 && index1[ip] != ip-nx)
{
count+=1;
}
if (index2[ip] != ip+nx && index2[ip] != ip-1 && index2[ip] != ip && index2[ip] != ip+1 && index2[ip] != ip-nx)
{
count+=1;
}
if (index3[ip] != ip+nx && index3[ip] != ip-1 && index3[ip] != ip && index3[ip] != ip+1 && index3[ip] != ip-nx)
{
count+=1;
}
if (index4[ip] != ip+nx && index4[ip] != ip-1 && index4[ip] != ip && index4[ip] != ip+1 && index4[ip] != ip-nx)
{
count+=1;
}
countD[ip] = count;
}
}
}
}
}
|
4ec2f7348f1a1f599fc7df90753277ad46b3a795.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
gpu_atomic_add(accuracies + labelid, static_cast<float>(1));
}
gpu_atomic_add(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
CUDA_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
TORCH_DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
TORCH_DCHECK_EQ(label.dim(), 1);
TORCH_DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, CUDAContext>(D, 0.0, accuracies, &context_);
math::Set<int, CUDAContext>(D, 0, amounts, &context_);
hipLaunchKernelGGL(( MultiClassAccuracyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, Xdata, labeldata, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( MultiClassAccuracyDivideKernel), dim3(CAFFE_GET_BLOCKS(D)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, CUDAContext>);
} // namespace caffe2
|
4ec2f7348f1a1f599fc7df90753277ad46b3a795.cu
|
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
gpu_atomic_add(accuracies + labelid, static_cast<float>(1));
}
gpu_atomic_add(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
CUDA_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
TORCH_DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
TORCH_DCHECK_EQ(label.dim(), 1);
TORCH_DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, CUDAContext>(D, 0.0, accuracies, &context_);
math::Set<int, CUDAContext>(D, 0, amounts, &context_);
MultiClassAccuracyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, Xdata, labeldata, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
MultiClassAccuracyDivideKernel<<<CAFFE_GET_BLOCKS(D), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, CUDAContext>);
} // namespace caffe2
|
ed6ee6c5dc2d64d4253529f8ea2875821c6c075b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h> // hipMalloc, hipMemcpy, etc.
#include <hipsparse.h> // hipsparseSpMV
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include <string.h>
#define CHECK_CUDA(func) \
{ \
hipError_t status = (func); \
if (status != hipSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, hipGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
hipsparseStatus_t status = (func); \
if (status != HIPSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, hipsparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
struct BS_DiffEq { //struct for parameters of BS
double _nu1, _nu2, _dt, _sigma, _intRate;
long long int _NS, _NT;
};
__global__
void calc_coef(double* _S, double* A, double* B, double* C, double* nA, double* nB, double* nC, BS_DiffEq* pbs_diff_eq) //kernel to calculate coefficient matrices
{
int th = threadIdx.x; //thread number in block
int blk_sz = blockDim.x; //block size
int blk_id = blockIdx.x; //block number in grid
int index = blk_sz * blk_id + th; // global index
double _nu1 = pbs_diff_eq->_nu1; // dt / dS^2
double _nu2 = pbs_diff_eq->_nu2; // dt / dS
double _dt = pbs_diff_eq->_dt; // time step size
double _volatility = pbs_diff_eq->_sigma;
double _intRate = pbs_diff_eq->_intRate;
long int N_S = pbs_diff_eq->_NS; //number of stock levels
long int N_t = pbs_diff_eq->_NT; //number of time levels
double a_k = 0.5 * pow(_volatility, 2) * pow(_S[index], 2);
double b_k = _intRate * _S[index];
double c_k = -_intRate;
double Ak = 0.0;
double Bk = 0.0;
double Ck = 0.0;
if (index < N_S) //conditional for fitting FD column size with GPU grid
{
Ak = 0.5 * _nu1 * a_k - 0.25 * _nu2 * b_k;
Bk = -_nu1 * a_k + 0.5 * _dt * c_k;
Ck = 0.5 * _nu1 * a_k + 0.25 * _nu2 * b_k;
A[index] = -Ak; //lhs A
B[index] = 1.0 - Bk; //lhs B
C[index] = -Ck; //lhs C
nA[index] = Ak; //rhs A
nB[index] = 1.0 + Bk; //rhs B
nC[index] = Ck; //rhs C
if (index == N_S - 1) // lower boundary condition
{
A[index] = -Ak + Ck; //lower boundary condition for lhs A
B[index] = 1.0 - Bk - 2 * Ck; //lower boundary condition for lhs B
nA[index] = Ak - Ck; //lower boundary condition for rhs A
nB[index] = 1.0 + Bk + 2 * Ck; //lower boundary condition for rhs B
}
}
}
//function to calculate mean squared error
double mean(double *V, double Vf, double Vo, long long int N_S)
{
double acum=0.0; //acumulator
double mn=0.0; //mean
int inc=N_S/100;
for (long long int i = 0; i < N_S; i+=inc)
{
if(i==0)
{
acum += Vo; //Initial Value
}
acum+=V[i];
if(i==N_S-1)
{
acum += Vf; //Last Value
}
}
mn=acum/100; //Normalize
return mn;
}
__global__
void sp_matvec(double* nA, double* nB, double* nC, double* V, double* Vmul, long int N_S)
{
int th = threadIdx.x; //thread number in block
int blk_sz = blockDim.x; //block size
int blk_id = blockIdx.x; //block number in grid
int index = blk_sz * blk_id + th; // global index
extern __shared__ double shdV[]; //dynamic shared memory array
int thp1 = th + 1;
if (index < N_S) //conditional for fitting FD column size with GPU grid
{
shdV[thp1] = V[index]; // each thread stores its current value
shdV[0] = 0.0; // initialize lower neighbour
shdV[blk_sz + 1] = 0.0; // initialize upper neighbour
if (blk_id > 0) // if not in first block
{
//lower neighbour comes from lower neighbouring block
shdV[0] = V[blk_id * blk_sz - 1];
}
if (blk_id < (gridDim.x - 1)) // if not in last block
{
//upper neighbour comes from upper neighbouring block
shdV[blk_sz + 1] = V[(blk_id + 1) * blk_sz];
}
double s = shdV[thp1], sm1 = 0.0, sp1 = 0.0; //define FD stencil
__syncthreads(); //wait for all threads in block to gather their values
if ((index - 1) > 0)
{
sm1 = shdV[thp1 - 1]; //read level i-1 from shared memory
}
if ((index + 1) < N_S)
{
sp1 = shdV[thp1 + 1]; //read level i+1 from shared memory
}
if (index == 0) //is first row
{
//First row tridiagonal matrix - dense vector product
Vmul[index] = nB[index] * s + nC[index] * sp1;
}
else
{
if (index == N_S - 1) //is last row
{
//Last row tridiagonal matrix - dense vector product
Vmul[index] = nA[index] * sm1 + nB[index] * s;
}
else
{
//Middle row tridiagonal matrix - dense vector product
Vmul[index] = nA[index] * sm1 + nB[index] * s + nC[index] *sp1;
}
}
}
}
int main(int argc, char** argv)
//int main(void)
{
// Host problem definition
int gridsize = atoi(argv[1]); // grid size (number of asset levels)
double volatility = atof(argv[2]);
double expiration = atof(argv[3]);
int blksize = atoi(argv[4]); //block size
int tstsize = atoi(argv[5]); //number of time levels
double Vol = volatility, Int_Rate = 0.05, Expiration = expiration, Strike = 100.0; //params of BS
int block_size = blksize;
long long int N_Sp = gridsize; // total number of asset levels
long long int N_S = N_Sp - 2; // total number of asset levels in FD matrices without boundary elements
clock_t t0, t1, t2; //timing variables
double t1sum = 0.0; //timing sum
double t2sum = 0.0; //timing sum
double dS = (2 * Strike) / N_Sp; //asset step
long long int N_t = tstsize;
double dt = Expiration / N_t; //time step
const int nrh = 1; // number of right hand sides in algebraic solver
const float h_one = 1;
const float h_zero = 0;
size_t lworkInBytes = 0;
char* d_work = NULL;
t0 = clock(); //start timer for setup
double* hX = (double*)malloc(N_Sp * sizeof(*hX)); //host V^k array
double* hY = (double*)malloc(N_S * sizeof(*hY)); //host V^k+1 array
double* S = (double*)malloc(N_Sp * sizeof(*S)); // host stock array
double* A = (double*)malloc(N_S * sizeof(*A)); // host coefficient lhs A array
double* B = (double*)malloc(N_S * sizeof(*B)); // host coefficient lhs B array
double* C = (double*)malloc(N_S * sizeof(*C)); // host coefficient lhs C array
double* nA = (double*)malloc(N_S * sizeof(*A)); // host coefficient rhs A array
double* nB = (double*)malloc(N_S * sizeof(*B)); // host coefficient rhs B array
double* nC = (double*)malloc(N_S * sizeof(*C)); // host coefficient rhs C array
BS_DiffEq* pbs_diff_eq = (BS_DiffEq*)malloc(sizeof(*pbs_diff_eq)); // params structure
double alpha = 1.0f; // alpha in y= alpha *Ax + beta*y
double beta = 0.0f; // beta in y= alpha *Ax + beta*y
//--------------------------------------------------------------------------
// Device memory management
double* d_S; // device stock array
double* d_A; // device coefficient lhs A array
double* d_B; // device coefficient lhs B array
double* d_C; // device coefficient lhs C array
double* d_nA; // device coefficient rhs A array
double* d_nB; // device coefficient rhs B array
double* d_nC; // device coefficient rhs C array
BS_DiffEq* d_pbs_diff_eq;
double * dX, * dY; // device V^k, V^k+1 arrays
// memory allocation of all device arrays
CHECK_CUDA(hipMalloc((void**)&d_S, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_A, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_B, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_C, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_nA, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_nB, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_nC, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&d_pbs_diff_eq, N_S * sizeof(BS_DiffEq)))
CHECK_CUDA(hipMalloc((void**)&dX, N_S * sizeof(double)))
CHECK_CUDA(hipMalloc((void**)&dY, N_S * sizeof(double)))
for (int i = 0; i < N_Sp; i++) // fill in stock value array
{
S[i] = i * dS;
//printf("%lf\n", S[i]);
}
//printf("\n");
/*for (int i = 0; i < N_Sp; i++)
{
S[i] = i*dS;
}*/
printf("%lf\n", S[N_Sp - 1]);
// set initial condition
for (int i = 0; i < N_Sp; i++) //initial V^k array
{
hX[i] = fmaxf(S[i] - Strike, 0.0); //payoff function
}
printf("%lf\n", hX[N_Sp - 1]);
// evaluate coefficients that are needed in finite difference approximation
double nu1 = (dt / (dS * dS)); // dt / dS^2
double nu2 = (dt / dS); // dt / dS
//store in params struct
pbs_diff_eq->_nu1 = nu1;
pbs_diff_eq->_nu2 = nu2;
pbs_diff_eq->_dt = dt;
pbs_diff_eq->_sigma = Vol;
pbs_diff_eq->_intRate = Int_Rate;
pbs_diff_eq->_NS = N_S;
pbs_diff_eq->_NT = N_t;
int numBlocks = (N_S + block_size - 1) / block_size; //number of blocks
// copy and set initial values for device arrays from host arrays
CHECK_CUDA(hipMemcpy(d_pbs_diff_eq, pbs_diff_eq, sizeof(BS_DiffEq), hipMemcpyHostToDevice))
CHECK_CUDA(hipMemcpy(dX, &hX[1], N_S * sizeof(double), hipMemcpyHostToDevice))
CHECK_CUDA(hipMemcpy(d_S, &S[1], N_S * sizeof(double), hipMemcpyHostToDevice))
CHECK_CUDA(hipMemset(d_A, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(d_B, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(d_C, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(dY, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(d_nA, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(d_nB, 0, N_S * sizeof(double)))
CHECK_CUDA(hipMemset(d_nC, 0, N_S * sizeof(double)))
//printf("%lld\n", N_t);
hipsparseHandle_t handle = NULL; //handle to cuSPARSE
CHECK_CUSPARSE(hipsparseCreate(&handle)) //cuSPARSE matrix descriptor
double V_o = 0.0; // first value in V^k array (upper boundary condition)
double V_lo = hX[0]; // first value in V^k array first time step
double V_f = 0.0; // last value in V^k array (upper boundary condition)
double* V_fi = (double*)malloc(2*sizeof(double)); // two final values in V^k array required for computing the lower boundary condition
t1 = clock(); //setup time
t1sum = ((double)(t1 - t0)) / CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
//launch calculate coefficients kernel
hipLaunchKernelGGL(( calc_coef) , dim3(numBlocks), dim3(block_size) , 0, 0, d_S, d_A, d_B, d_C, d_nA, d_nB, d_nC, d_pbs_diff_eq);
//device syncrhonization after kernel execution
hipDeviceSynchronize();
// Create sparse matrix A in CSR format
CHECK_CUSPARSE(hipsparseDgtsv2_nopivot_bufferSizeExt(
handle, N_S,
nrh, d_A, d_B, d_C, dY, N_S,
&lworkInBytes))
CHECK_CUDA(hipMalloc((void**)&d_work, lworkInBytes))
for (int i = 0; i < N_t; i++) //time step loop
{
//call sparse matrix vector product kernel
hipLaunchKernelGGL(( sp_matvec) , dim3(numBlocks), dim3(block_size), (block_size + 2) * sizeof(double) , 0, d_nA, d_nB, d_nC, dX, dY, N_S);
CHECK_CUDA(hipDeviceSynchronize()) //device syncrhonization after kernel execution
//solve tridiagonal system using CR-PCR algorithm
CHECK_CUSPARSE(hipsparseDgtsv2_nopivot(
handle, N_S,
nrh, d_A, d_B, d_C, dY, N_S,
d_work))
// synchronize device after product
CHECK_CUDA(hipDeviceSynchronize())
V_o = V_lo * (1 - Int_Rate * dt); //Calculate upper boundary condition
V_lo = V_o; // update first value for next iteration
//--------------------------------------------------------------------------
// device result check
CHECK_CUDA(hipMemcpy(dX, dY, N_S * sizeof(double), hipMemcpyDeviceToDevice)) //update V^k with V^k+1
//Copy last two values of V^k to compute upper boundary conditions
CHECK_CUDA(hipMemcpy(V_fi, &dY[N_S-2], 2 * sizeof(double), hipMemcpyDeviceToHost))
V_f = 2 * V_fi[1] - V_fi[0]; // calculate lower boundary conditions
}
t2 = clock(); // computation time of full solution
t2sum = ((double)(t2 - t1)) / CLOCKS_PER_SEC;
printf("Computing took %f seconds. Finish to compute\n", t2sum);
CHECK_CUDA(hipMemcpy(hY, dY, N_S * sizeof(double), hipMemcpyDeviceToHost)) //copy solution
printf("%lf\n", V_f); // print final value of V^k
printf("\n");
double mn=0.0; //initialize mean squared value variable
mn=mean(hY,V_f,V_o,N_Sp); // call mean squared value function
printf("%lf\n",mn); // print mean squared value
printf("End\n"); // print END
//--------------------------------------------------------------------------
//
// device memory deallocation
CHECK_CUSPARSE(hipsparseDestroy(handle))
CHECK_CUDA(hipFree(d_work))
CHECK_CUDA(hipFree(dX))
CHECK_CUDA(hipFree(dY))
CHECK_CUDA(hipFree(d_A))
CHECK_CUDA(hipFree(d_B))
CHECK_CUDA(hipFree(d_C))
CHECK_CUDA(hipFree(d_nA))
CHECK_CUDA(hipFree(d_nB))
CHECK_CUDA(hipFree(d_nC))
CHECK_CUDA(hipFree(d_pbs_diff_eq))
// host memory deallocation
free(S);
free(A);
free(B);
free(C);
free(nA);
free(nB);
free(nC);
free(hX);
free(hY);
free(V_fi);
free(pbs_diff_eq);
return EXIT_SUCCESS;
}
|
ed6ee6c5dc2d64d4253529f8ea2875821c6c075b.cu
|
#include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
#include <cusparse.h> // cusparseSpMV
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include <string.h>
#define CHECK_CUDA(func) \
{ \
cudaError_t status = (func); \
if (status != cudaSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, cudaGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
cusparseStatus_t status = (func); \
if (status != CUSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, cusparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
struct BS_DiffEq { //struct for parameters of BS
double _nu1, _nu2, _dt, _sigma, _intRate;
long long int _NS, _NT;
};
__global__
void calc_coef(double* _S, double* A, double* B, double* C, double* nA, double* nB, double* nC, BS_DiffEq* pbs_diff_eq) //kernel to calculate coefficient matrices
{
int th = threadIdx.x; //thread number in block
int blk_sz = blockDim.x; //block size
int blk_id = blockIdx.x; //block number in grid
int index = blk_sz * blk_id + th; // global index
double _nu1 = pbs_diff_eq->_nu1; // dt / dS^2
double _nu2 = pbs_diff_eq->_nu2; // dt / dS
double _dt = pbs_diff_eq->_dt; // time step size
double _volatility = pbs_diff_eq->_sigma;
double _intRate = pbs_diff_eq->_intRate;
long int N_S = pbs_diff_eq->_NS; //number of stock levels
long int N_t = pbs_diff_eq->_NT; //number of time levels
double a_k = 0.5 * pow(_volatility, 2) * pow(_S[index], 2);
double b_k = _intRate * _S[index];
double c_k = -_intRate;
double Ak = 0.0;
double Bk = 0.0;
double Ck = 0.0;
if (index < N_S) //conditional for fitting FD column size with GPU grid
{
Ak = 0.5 * _nu1 * a_k - 0.25 * _nu2 * b_k;
Bk = -_nu1 * a_k + 0.5 * _dt * c_k;
Ck = 0.5 * _nu1 * a_k + 0.25 * _nu2 * b_k;
A[index] = -Ak; //lhs A
B[index] = 1.0 - Bk; //lhs B
C[index] = -Ck; //lhs C
nA[index] = Ak; //rhs A
nB[index] = 1.0 + Bk; //rhs B
nC[index] = Ck; //rhs C
if (index == N_S - 1) // lower boundary condition
{
A[index] = -Ak + Ck; //lower boundary condition for lhs A
B[index] = 1.0 - Bk - 2 * Ck; //lower boundary condition for lhs B
nA[index] = Ak - Ck; //lower boundary condition for rhs A
nB[index] = 1.0 + Bk + 2 * Ck; //lower boundary condition for rhs B
}
}
}
//function to calculate mean squared error
double mean(double *V, double Vf, double Vo, long long int N_S)
{
double acum=0.0; //acumulator
double mn=0.0; //mean
int inc=N_S/100;
for (long long int i = 0; i < N_S; i+=inc)
{
if(i==0)
{
acum += Vo; //Initial Value
}
acum+=V[i];
if(i==N_S-1)
{
acum += Vf; //Last Value
}
}
mn=acum/100; //Normalize
return mn;
}
__global__
void sp_matvec(double* nA, double* nB, double* nC, double* V, double* Vmul, long int N_S)
{
int th = threadIdx.x; //thread number in block
int blk_sz = blockDim.x; //block size
int blk_id = blockIdx.x; //block number in grid
int index = blk_sz * blk_id + th; // global index
extern __shared__ double shdV[]; //dynamic shared memory array
int thp1 = th + 1;
if (index < N_S) //conditional for fitting FD column size with GPU grid
{
shdV[thp1] = V[index]; // each thread stores its current value
shdV[0] = 0.0; // initialize lower neighbour
shdV[blk_sz + 1] = 0.0; // initialize upper neighbour
if (blk_id > 0) // if not in first block
{
//lower neighbour comes from lower neighbouring block
shdV[0] = V[blk_id * blk_sz - 1];
}
if (blk_id < (gridDim.x - 1)) // if not in last block
{
//upper neighbour comes from upper neighbouring block
shdV[blk_sz + 1] = V[(blk_id + 1) * blk_sz];
}
double s = shdV[thp1], sm1 = 0.0, sp1 = 0.0; //define FD stencil
__syncthreads(); //wait for all threads in block to gather their values
if ((index - 1) > 0)
{
sm1 = shdV[thp1 - 1]; //read level i-1 from shared memory
}
if ((index + 1) < N_S)
{
sp1 = shdV[thp1 + 1]; //read level i+1 from shared memory
}
if (index == 0) //is first row
{
//First row tridiagonal matrix - dense vector product
Vmul[index] = nB[index] * s + nC[index] * sp1;
}
else
{
if (index == N_S - 1) //is last row
{
//Last row tridiagonal matrix - dense vector product
Vmul[index] = nA[index] * sm1 + nB[index] * s;
}
else
{
//Middle row tridiagonal matrix - dense vector product
Vmul[index] = nA[index] * sm1 + nB[index] * s + nC[index] *sp1;
}
}
}
}
int main(int argc, char** argv)
//int main(void)
{
// Host problem definition
int gridsize = atoi(argv[1]); // grid size (number of asset levels)
double volatility = atof(argv[2]);
double expiration = atof(argv[3]);
int blksize = atoi(argv[4]); //block size
int tstsize = atoi(argv[5]); //number of time levels
double Vol = volatility, Int_Rate = 0.05, Expiration = expiration, Strike = 100.0; //params of BS
int block_size = blksize;
long long int N_Sp = gridsize; // total number of asset levels
long long int N_S = N_Sp - 2; // total number of asset levels in FD matrices without boundary elements
clock_t t0, t1, t2; //timing variables
double t1sum = 0.0; //timing sum
double t2sum = 0.0; //timing sum
double dS = (2 * Strike) / N_Sp; //asset step
long long int N_t = tstsize;
double dt = Expiration / N_t; //time step
const int nrh = 1; // number of right hand sides in algebraic solver
const float h_one = 1;
const float h_zero = 0;
size_t lworkInBytes = 0;
char* d_work = NULL;
t0 = clock(); //start timer for setup
double* hX = (double*)malloc(N_Sp * sizeof(*hX)); //host V^k array
double* hY = (double*)malloc(N_S * sizeof(*hY)); //host V^k+1 array
double* S = (double*)malloc(N_Sp * sizeof(*S)); // host stock array
double* A = (double*)malloc(N_S * sizeof(*A)); // host coefficient lhs A array
double* B = (double*)malloc(N_S * sizeof(*B)); // host coefficient lhs B array
double* C = (double*)malloc(N_S * sizeof(*C)); // host coefficient lhs C array
double* nA = (double*)malloc(N_S * sizeof(*A)); // host coefficient rhs A array
double* nB = (double*)malloc(N_S * sizeof(*B)); // host coefficient rhs B array
double* nC = (double*)malloc(N_S * sizeof(*C)); // host coefficient rhs C array
BS_DiffEq* pbs_diff_eq = (BS_DiffEq*)malloc(sizeof(*pbs_diff_eq)); // params structure
double alpha = 1.0f; // alpha in y= alpha *Ax + beta*y
double beta = 0.0f; // beta in y= alpha *Ax + beta*y
//--------------------------------------------------------------------------
// Device memory management
double* d_S; // device stock array
double* d_A; // device coefficient lhs A array
double* d_B; // device coefficient lhs B array
double* d_C; // device coefficient lhs C array
double* d_nA; // device coefficient rhs A array
double* d_nB; // device coefficient rhs B array
double* d_nC; // device coefficient rhs C array
BS_DiffEq* d_pbs_diff_eq;
double * dX, * dY; // device V^k, V^k+1 arrays
// memory allocation of all device arrays
CHECK_CUDA(cudaMalloc((void**)&d_S, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_A, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_B, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_C, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_nA, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_nB, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_nC, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&d_pbs_diff_eq, N_S * sizeof(BS_DiffEq)))
CHECK_CUDA(cudaMalloc((void**)&dX, N_S * sizeof(double)))
CHECK_CUDA(cudaMalloc((void**)&dY, N_S * sizeof(double)))
for (int i = 0; i < N_Sp; i++) // fill in stock value array
{
S[i] = i * dS;
//printf("%lf\n", S[i]);
}
//printf("\n");
/*for (int i = 0; i < N_Sp; i++)
{
S[i] = i*dS;
}*/
printf("%lf\n", S[N_Sp - 1]);
// set initial condition
for (int i = 0; i < N_Sp; i++) //initial V^k array
{
hX[i] = fmaxf(S[i] - Strike, 0.0); //payoff function
}
printf("%lf\n", hX[N_Sp - 1]);
// evaluate coefficients that are needed in finite difference approximation
double nu1 = (dt / (dS * dS)); // dt / dS^2
double nu2 = (dt / dS); // dt / dS
//store in params struct
pbs_diff_eq->_nu1 = nu1;
pbs_diff_eq->_nu2 = nu2;
pbs_diff_eq->_dt = dt;
pbs_diff_eq->_sigma = Vol;
pbs_diff_eq->_intRate = Int_Rate;
pbs_diff_eq->_NS = N_S;
pbs_diff_eq->_NT = N_t;
int numBlocks = (N_S + block_size - 1) / block_size; //number of blocks
// copy and set initial values for device arrays from host arrays
CHECK_CUDA(cudaMemcpy(d_pbs_diff_eq, pbs_diff_eq, sizeof(BS_DiffEq), cudaMemcpyHostToDevice))
CHECK_CUDA(cudaMemcpy(dX, &hX[1], N_S * sizeof(double), cudaMemcpyHostToDevice))
CHECK_CUDA(cudaMemcpy(d_S, &S[1], N_S * sizeof(double), cudaMemcpyHostToDevice))
CHECK_CUDA(cudaMemset(d_A, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(d_B, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(d_C, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(dY, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(d_nA, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(d_nB, 0, N_S * sizeof(double)))
CHECK_CUDA(cudaMemset(d_nC, 0, N_S * sizeof(double)))
//printf("%lld\n", N_t);
cusparseHandle_t handle = NULL; //handle to cuSPARSE
CHECK_CUSPARSE(cusparseCreate(&handle)) //cuSPARSE matrix descriptor
double V_o = 0.0; // first value in V^k array (upper boundary condition)
double V_lo = hX[0]; // first value in V^k array first time step
double V_f = 0.0; // last value in V^k array (upper boundary condition)
double* V_fi = (double*)malloc(2*sizeof(double)); // two final values in V^k array required for computing the lower boundary condition
t1 = clock(); //setup time
t1sum = ((double)(t1 - t0)) / CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
//launch calculate coefficients kernel
calc_coef <<<numBlocks, block_size >>> (d_S, d_A, d_B, d_C, d_nA, d_nB, d_nC, d_pbs_diff_eq);
//device syncrhonization after kernel execution
cudaDeviceSynchronize();
// Create sparse matrix A in CSR format
CHECK_CUSPARSE(cusparseDgtsv2_nopivot_bufferSizeExt(
handle, N_S,
nrh, d_A, d_B, d_C, dY, N_S,
&lworkInBytes))
CHECK_CUDA(cudaMalloc((void**)&d_work, lworkInBytes))
for (int i = 0; i < N_t; i++) //time step loop
{
//call sparse matrix vector product kernel
sp_matvec <<<numBlocks, block_size, (block_size + 2) * sizeof(double) >>> (d_nA, d_nB, d_nC, dX, dY, N_S);
CHECK_CUDA(cudaDeviceSynchronize()) //device syncrhonization after kernel execution
//solve tridiagonal system using CR-PCR algorithm
CHECK_CUSPARSE(cusparseDgtsv2_nopivot(
handle, N_S,
nrh, d_A, d_B, d_C, dY, N_S,
d_work))
// synchronize device after product
CHECK_CUDA(cudaDeviceSynchronize())
V_o = V_lo * (1 - Int_Rate * dt); //Calculate upper boundary condition
V_lo = V_o; // update first value for next iteration
//--------------------------------------------------------------------------
// device result check
CHECK_CUDA(cudaMemcpy(dX, dY, N_S * sizeof(double), cudaMemcpyDeviceToDevice)) //update V^k with V^k+1
//Copy last two values of V^k to compute upper boundary conditions
CHECK_CUDA(cudaMemcpy(V_fi, &dY[N_S-2], 2 * sizeof(double), cudaMemcpyDeviceToHost))
V_f = 2 * V_fi[1] - V_fi[0]; // calculate lower boundary conditions
}
t2 = clock(); // computation time of full solution
t2sum = ((double)(t2 - t1)) / CLOCKS_PER_SEC;
printf("Computing took %f seconds. Finish to compute\n", t2sum);
CHECK_CUDA(cudaMemcpy(hY, dY, N_S * sizeof(double), cudaMemcpyDeviceToHost)) //copy solution
printf("%lf\n", V_f); // print final value of V^k
printf("\n");
double mn=0.0; //initialize mean squared value variable
mn=mean(hY,V_f,V_o,N_Sp); // call mean squared value function
printf("%lf\n",mn); // print mean squared value
printf("End\n"); // print END
//--------------------------------------------------------------------------
//
// device memory deallocation
CHECK_CUSPARSE(cusparseDestroy(handle))
CHECK_CUDA(cudaFree(d_work))
CHECK_CUDA(cudaFree(dX))
CHECK_CUDA(cudaFree(dY))
CHECK_CUDA(cudaFree(d_A))
CHECK_CUDA(cudaFree(d_B))
CHECK_CUDA(cudaFree(d_C))
CHECK_CUDA(cudaFree(d_nA))
CHECK_CUDA(cudaFree(d_nB))
CHECK_CUDA(cudaFree(d_nC))
CHECK_CUDA(cudaFree(d_pbs_diff_eq))
// host memory deallocation
free(S);
free(A);
free(B);
free(C);
free(nA);
free(nB);
free(nC);
free(hX);
free(hY);
free(V_fi);
free(pbs_diff_eq);
return EXIT_SUCCESS;
}
|
d504bb43a27aaf16e7b76c6661f6fc8750ad29e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define n 6
#define k 1
int main(void){
hipblasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
hipMallocManaged(&a,n*n*sizeof(float));
hipMallocManaged(&x,n*sizeof(float));
hipMallocManaged(&y,n*sizeof(float));
int ind=11;
for(i = 0; i < n; i++) a[i*n] = (float)ind++;
for(i = 0; i < n-1; i++) a[i*n+1] = (float)ind++;
for(i = 0; i < n; i++){x[i]=1.0f; y[i]=0.0f;};
hipblasCreate(&handle);
float al = 1.0f;
float bet = 1.0f;
hipblasSsbmv(handle, HIPBLAS_FILL_MODE_LOWER,n,k,&al,a,n,x,1,&bet,y,1);
hipDeviceSynchronize();
printf("y after ssbmv:\n");
for(j = 0; j < n; j++){
printf("%7.0f\n",y[j]);
}
hipFree(a);
hipFree(x);
hipFree(y);
hipblasDestroy(handle);
return EXIT_SUCCESS;
}
|
d504bb43a27aaf16e7b76c6661f6fc8750ad29e9.cu
|
#include <stdio.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define n 6
#define k 1
int main(void){
cublasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
cudaMallocManaged(&a,n*n*sizeof(float));
cudaMallocManaged(&x,n*sizeof(float));
cudaMallocManaged(&y,n*sizeof(float));
int ind=11;
for(i = 0; i < n; i++) a[i*n] = (float)ind++;
for(i = 0; i < n-1; i++) a[i*n+1] = (float)ind++;
for(i = 0; i < n; i++){x[i]=1.0f; y[i]=0.0f;};
cublasCreate(&handle);
float al = 1.0f;
float bet = 1.0f;
cublasSsbmv(handle, CUBLAS_FILL_MODE_LOWER,n,k,&al,a,n,x,1,&bet,y,1);
cudaDeviceSynchronize();
printf("y after ssbmv:\n");
for(j = 0; j < n; j++){
printf("%7.0f\n",y[j]);
}
cudaFree(a);
cudaFree(x);
cudaFree(y);
cublasDestroy(handle);
return EXIT_SUCCESS;
}
|
39580d62be9ff88a397353737d245b4b55637c48.hip
|
// !!! This is a file automatically generated by hipify!!!
//Doesn't sieve for >1024 threads
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
typedef unsigned __int64 integer;
const size_t size = sizeof(integer);
const integer pow2_32 = 4294967296;
const int threads = 256;
//Utility function to calculate postive integer-powers
integer power(integer val, integer exp)
{
integer temp = val;
for (integer i = 1; i < exp; i++) temp *= val;
return temp;
}
//Utility function to approximate no. of primes between 1->n as n/ln(n)
integer trimSize(integer n)
{
long double e = 2.7183;
integer exp = 1;
while (pow(e, exp) < n)
exp++;
return n / (exp - 2);
}
///////////////////////////KERNEL START///////////////////////////
__device__ void SievePrime(bool *mark, integer p, integer min, integer minb, integer max)
{
integer j;
for (j = minb;j <= max;j += (p << 1))
mark[(j - min) >> 1] = true;
}
__global__ void SieveBlock(integer *P, integer completed)
{
integer id = threadIdx.x, i, j, minb;
integer segsize = pow2_32 / threads;
integer min = (completed * pow2_32) + (id * segsize) + 1;
integer max = min + segsize - 2;
bool mark[(pow2_32 >> 1) / threads];
for (i = 0;P[i] * P[i] <= max;i++)
{
minb = (min / P[i]) * (P[i]) + P[i];
if (~minb & 1) minb += P[i];
for (j = minb;j <= max;j += (P[i] << 1))
mark[(j - min + 1) >> 1] = true;
}
//printf("Kernel %3llu stopped at %llu [Max: %llu]\n", id + completed, j - (P[i] << 2), max);
for (j = max; j >= min; j -= 2)
{
if (!mark[(j - min + 1) >> 1])
{
printf("Test\n");
//printf("Kernel %llu: %llu\n", id , j);
break;
}
}
}
////////////////////////////KERNEL END////////////////////////////
// SEGMENTED SIEVE
// n RAM Time
// E07 552KB 0.026s
// E08 620KB 0.206s
// E09 704KB 1.895s
// E10 668KB 20.02s
// E11 904KB 205.2s
//PARALLEL SEGMENTED SIEVE
// n RAM Time
// E07 203MB 0.481s
// E08 202MB 4.405s
// E09
// E10
// E11
//Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti
//Can't take n>
//Driver function
int main(int argc, char* argv[])
{
//Range: Data-type dependent
integer n;
printf("Enter n: ");
scanf("%llu", &n);
integer m = sqrt(n);
integer marklen = n >> 1;
bool smallsieve = false; //Use serial sieve for n<2^32
if (n <= pow2_32)
smallsieve = true;
else if (n % pow2_32 > 0) //If n>2^32 then round n to nearest multiple of 2^32
{
printf("Rounded %llu to ", n);
n = ((n / pow2_32) + 1) * pow2_32;
printf("%llu\n\n", n);
m = 65536; //sqrt(pow2_32)
marklen = pow2_32 >> 1;
}
integer limit = (smallsieve) ? n : pow2_32;
integer plen = trimSize(pow2_32);
if (~n & 1) n--;
if (~m & 1) m--;
//Boolean array initialized to false
bool *mark = (bool *)calloc(marklen + 1, sizeof(bool)); //Represents [2,3,5,7,9,11,...,sqrt(n)]
//Array to store primes b/w [2,m]
integer *P = (integer *)calloc(plen + 1, (size_t)size);
if (mark == NULL || P == NULL) { printf("Memory Allocation Failed!\n"); exit(1); }
integer i, j, k, offset;
//Log execution time
clock_t START_TIME, END_TIME;
double CPU_TIME = 0.0;
float GPU_TIME = 0.0;
float temp_t;
//Setup-Phase: Calculate all primes in the range [3,m]
START_TIME = clock();
for (i = 5, k = 1, offset = 2; i < m; i += offset, offset = 6 - offset) //i->[3,5,7,9...,sqrt(n)] | i corresponds to mark[(i-3)/2]
{
if (!mark[i >> 1])
{
if (i*i <= limit)
for (j = i * i; j <= limit; j += (i << 1)) //j->[i^2,n] | increments by 2*i
mark[j >> 1] = 1;
P[k++] = i;
}
}
END_TIME = clock();
CPU_TIME = ((double)(END_TIME - START_TIME)) / CLOCKS_PER_SEC;
printf("Stopped primary sieve at prime %llu\n", P[k - 1]);
for (;i <= limit;i += offset, offset = 6 - offset)
{
if(!mark[i >> 1])
P[k++] = i;
}
P[0] = 3;
plen = k;
free(mark);
printf("Last prime: %llu @ index [%llu]\n\n", P[plen - 1], plen - 1);
if (smallsieve) goto end;
integer chunksize = pow2_32 >> 1; //Elements per chunk of 2^32 digits
integer chunkcount = (n - pow2_32 - 1) / chunksize; //No. of chunks
integer completed = 1;
printf("%llu chunk(s) for [%llu->%llu]\n", chunkcount, pow2_32 - 1, n);
integer *d_P;
//CUDA Malloc
hipMalloc(&d_P, (plen + 1) * (size));
//Calculate dimensions
dim3 TPB(threads);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(d_P, P, plen * size, hipMemcpyHostToDevice);
while (completed <= chunkcount)
{
hipEventRecord(start);
hipLaunchKernelGGL(( SieveBlock), dim3(1), dim3(TPB) , 0, 0, d_P, completed); //Execute sieving kernel
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&temp_t, start, stop);
GPU_TIME += temp_t;
completed++;
}
free(P);
hipFree(d_P);
GPU_TIME /= 1000;
end:printf("\nSETUP-PHASE CPU Time: %0.3f seconds\n", CPU_TIME);
printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME);
//printf("FILE_WRITE CPU Time: %0.3f seconds\n", F_CPU_TIME);
return 0;
}
|
39580d62be9ff88a397353737d245b4b55637c48.cu
|
//Doesn't sieve for >1024 threads
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
typedef unsigned __int64 integer;
const size_t size = sizeof(integer);
const integer pow2_32 = 4294967296;
const int threads = 256;
//Utility function to calculate postive integer-powers
integer power(integer val, integer exp)
{
integer temp = val;
for (integer i = 1; i < exp; i++) temp *= val;
return temp;
}
//Utility function to approximate no. of primes between 1->n as n/ln(n)
integer trimSize(integer n)
{
long double e = 2.7183;
integer exp = 1;
while (pow(e, exp) < n)
exp++;
return n / (exp - 2);
}
///////////////////////////KERNEL START///////////////////////////
__device__ void SievePrime(bool *mark, integer p, integer min, integer minb, integer max)
{
integer j;
for (j = minb;j <= max;j += (p << 1))
mark[(j - min) >> 1] = true;
}
__global__ void SieveBlock(integer *P, integer completed)
{
integer id = threadIdx.x, i, j, minb;
integer segsize = pow2_32 / threads;
integer min = (completed * pow2_32) + (id * segsize) + 1;
integer max = min + segsize - 2;
bool mark[(pow2_32 >> 1) / threads];
for (i = 0;P[i] * P[i] <= max;i++)
{
minb = (min / P[i]) * (P[i]) + P[i];
if (~minb & 1) minb += P[i];
for (j = minb;j <= max;j += (P[i] << 1))
mark[(j - min + 1) >> 1] = true;
}
//printf("Kernel %3llu stopped at %llu [Max: %llu]\n", id + completed, j - (P[i] << 2), max);
for (j = max; j >= min; j -= 2)
{
if (!mark[(j - min + 1) >> 1])
{
printf("Test\n");
//printf("Kernel %llu: %llu\n", id , j);
break;
}
}
}
////////////////////////////KERNEL END////////////////////////////
// SEGMENTED SIEVE
// n RAM Time
// E07 552KB 0.026s
// E08 620KB 0.206s
// E09 704KB 1.895s
// E10 668KB 20.02s
// E11 904KB 205.2s
//PARALLEL SEGMENTED SIEVE
// n RAM Time
// E07 203MB 0.481s
// E08 202MB 4.405s
// E09
// E10
// E11
//Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti
//Can't take n>
//Driver function
int main(int argc, char* argv[])
{
//Range: Data-type dependent
integer n;
printf("Enter n: ");
scanf("%llu", &n);
integer m = sqrt(n);
integer marklen = n >> 1;
bool smallsieve = false; //Use serial sieve for n<2^32
if (n <= pow2_32)
smallsieve = true;
else if (n % pow2_32 > 0) //If n>2^32 then round n to nearest multiple of 2^32
{
printf("Rounded %llu to ", n);
n = ((n / pow2_32) + 1) * pow2_32;
printf("%llu\n\n", n);
m = 65536; //sqrt(pow2_32)
marklen = pow2_32 >> 1;
}
integer limit = (smallsieve) ? n : pow2_32;
integer plen = trimSize(pow2_32);
if (~n & 1) n--;
if (~m & 1) m--;
//Boolean array initialized to false
bool *mark = (bool *)calloc(marklen + 1, sizeof(bool)); //Represents [2,3,5,7,9,11,...,sqrt(n)]
//Array to store primes b/w [2,m]
integer *P = (integer *)calloc(plen + 1, (size_t)size);
if (mark == NULL || P == NULL) { printf("Memory Allocation Failed!\n"); exit(1); }
integer i, j, k, offset;
//Log execution time
clock_t START_TIME, END_TIME;
double CPU_TIME = 0.0;
float GPU_TIME = 0.0;
float temp_t;
//Setup-Phase: Calculate all primes in the range [3,m]
START_TIME = clock();
for (i = 5, k = 1, offset = 2; i < m; i += offset, offset = 6 - offset) //i->[3,5,7,9...,sqrt(n)] | i corresponds to mark[(i-3)/2]
{
if (!mark[i >> 1])
{
if (i*i <= limit)
for (j = i * i; j <= limit; j += (i << 1)) //j->[i^2,n] | increments by 2*i
mark[j >> 1] = 1;
P[k++] = i;
}
}
END_TIME = clock();
CPU_TIME = ((double)(END_TIME - START_TIME)) / CLOCKS_PER_SEC;
printf("Stopped primary sieve at prime %llu\n", P[k - 1]);
for (;i <= limit;i += offset, offset = 6 - offset)
{
if(!mark[i >> 1])
P[k++] = i;
}
P[0] = 3;
plen = k;
free(mark);
printf("Last prime: %llu @ index [%llu]\n\n", P[plen - 1], plen - 1);
if (smallsieve) goto end;
integer chunksize = pow2_32 >> 1; //Elements per chunk of 2^32 digits
integer chunkcount = (n - pow2_32 - 1) / chunksize; //No. of chunks
integer completed = 1;
printf("%llu chunk(s) for [%llu->%llu]\n", chunkcount, pow2_32 - 1, n);
integer *d_P;
//CUDA Malloc
cudaMalloc(&d_P, (plen + 1) * (size));
//Calculate dimensions
dim3 TPB(threads);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_P, P, plen * size, cudaMemcpyHostToDevice);
while (completed <= chunkcount)
{
cudaEventRecord(start);
SieveBlock<<<1, TPB >>> (d_P, completed); //Execute sieving kernel
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&temp_t, start, stop);
GPU_TIME += temp_t;
completed++;
}
free(P);
cudaFree(d_P);
GPU_TIME /= 1000;
end:printf("\nSETUP-PHASE CPU Time: %0.3f seconds\n", CPU_TIME);
printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME);
//printf("FILE_WRITE CPU Time: %0.3f seconds\n", F_CPU_TIME);
return 0;
}
|
c5fe26e7c67ec4be9133172779fd4bb206067212.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <device_launch_parameters.h>
#include <utils/graphs/traversal.h>
#include <glog/logging.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(epsilon);
namespace data_driven_append_warp_pr {
template<typename WorkSource,
typename WorkTarget,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON)
work_target.append_warp(dest);
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append_warp(dest);
}
}
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource, typename WorkTarget>
void Init__Single__(const WorkSource &workSource, WorkTarget workTarget, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, workTarget, FLAGS_epsilon, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
Relax__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool DataDrivenAppendWarpPR() {
VLOG(0) << "DataDrivenAppendWarpPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_append_warp_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
data_driven_append_warp_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream = context.CreateStream(0);
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
Worklist *in_wl = &wl1, *out_wl = &wl2;
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
in_wl->DeviceObject(), stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
int iteration = 0;
while (work_seg.GetSegmentSize() > 0) {
solver.Relax__Single__(
groute::dev::WorkSourceArray<index_t>(
work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "INPUT " << work_seg.GetSegmentSize() << " OUTPUT " << out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1) << data_driven_append_warp_pr::Algo::Name() << " terminated after " << iteration << " iterations (max: "
<< FLAGS_max_pr_iterations << ")";
VLOG(0) << "EPSILON: " << FLAGS_epsilon;
VLOG(0) << data_driven_append_warp_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_append_warp_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_append_warp_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_append_warp_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_append_warp_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
}
|
c5fe26e7c67ec4be9133172779fd4bb206067212.cu
|
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda.h>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <device_launch_parameters.h>
#include <utils/graphs/traversal.h>
#include <glog/logging.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(epsilon);
namespace data_driven_append_warp_pr {
template<typename WorkSource,
typename WorkTarget,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON)
work_target.append_warp(dest);
}
}
}
template<
typename WorkSource, typename WorkTarget,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, WorkTarget work_target,
float EPSILON, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (prev <= EPSILON && prev + update > EPSILON) {
work_target.append_warp(dest);
}
}
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource, typename WorkTarget>
void Init__Single__(const WorkSource &workSource, WorkTarget workTarget, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, workTarget, FLAGS_epsilon, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource,
typename WorkTarget>
void
Relax__Single__(const WorkSource &work_source, WorkTarget &output_worklist, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
float EPSILON = FLAGS_epsilon;
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, output_worklist.DeviceObject(), EPSILON, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool DataDrivenAppendWarpPR() {
VLOG(0) << "DataDrivenAppendWarpPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_append_warp_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
data_driven_append_warp_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream = context.CreateStream(0);
Worklist wl1(max_work_size, 0, "input queue"), wl2(max_work_size, 0, "output queue");
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
Worklist *in_wl = &wl1, *out_wl = &wl2;
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
in_wl->DeviceObject(), stream);
groute::Segment<index_t> work_seg;
work_seg = in_wl->GetSeg(stream);
int iteration = 0;
while (work_seg.GetSegmentSize() > 0) {
solver.Relax__Single__(
groute::dev::WorkSourceArray<index_t>(
work_seg.GetSegmentPtr(),
work_seg.GetSegmentSize()),
*out_wl, stream);
VLOG(1) << "INPUT " << work_seg.GetSegmentSize() << " OUTPUT " << out_wl->GetCount(stream);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
in_wl->ResetAsync(stream.cuda_stream);
std::swap(in_wl, out_wl);
work_seg = in_wl->GetSeg(stream);
}
sw.stop();
VLOG(1) << data_driven_append_warp_pr::Algo::Name() << " terminated after " << iteration << " iterations (max: "
<< FLAGS_max_pr_iterations << ")";
VLOG(0) << "EPSILON: " << FLAGS_epsilon;
VLOG(0) << data_driven_append_warp_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_append_warp_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_append_warp_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_append_warp_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_append_warp_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
}
|
d33127e8a5fb66fc3c894b7ca91afee5012428f7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//-----------------------------------------------------------------------------
// NVIDIA(R) GVDB VOXELS
// Copyright 2017 NVIDIA Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Version 1.0: Rama Hoetzlein, 5/1/2017
//-----------------------------------------------------------------------------
// File: cuda_gvdb_copydata.cu
//
// GVDB Data Transfers
// - CopyData 3D volume into sub-volume
// - CopyDataZYX 3D volume into sub-volume with ZYX swizzle
// - RetreiveData 3D sub-volume into cuda buffer
// - CopyTexToBuf 2D texture into cuda buffer
// - CopyBufToTex cuda buffer into 2D texture
//-----------------------------------------------
#include "cuda_math.cuh"
// Zero memory of 3D volume
extern "C" __global__ void kernelFillTex ( int3 res, int dsize, hipSurfaceObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
surf3Dwrite ( 0, volTexOut, t.x*dsize, t.y, t.z );
}
// Copy 3D texture into sub-volume of another 3D texture (char)
extern "C" __global__ void kernelCopyTexC ( int3 offs, int3 res, hipSurfaceObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
uchar val = surf3Dread<uchar>(volTexOut, t.x * sizeof(uchar), t.y, t.z);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) );
}
// Copy 3D texture into sub-volume of another 3D texture (float)
extern "C" __global__ void kernelCopyTexF ( int3 offs, int3 res, hipSurfaceObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = surf3Dread<float>(volTexOut, t.x * sizeof(float), t.y, t.z);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Copy linear memory as 3D volume into sub-volume of a 3D texture
extern "C" __global__ void kernelCopyBufToTexC ( int3 offs, int3 res, uchar* inbuf, hipSurfaceObject volTexOut)
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
unsigned char val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ];
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) );
}
// Copy linear memory as 3D volume into sub-volume of a 3D texture
extern "C" __global__ void kernelCopyBufToTexF ( int3 offs, int3 res, float* inbuf, hipSurfaceObject volTexOut)
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ];
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Copy 3D texture into sub-volume of another 3D texture with ZYX swizzle (float)
extern "C" __global__ void kernelCopyTexZYX ( int3 offs, int3 res, hipSurfaceObject volTexInF, hipSurfaceObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = surf3Dread<float>(volTexInF, t.z * sizeof(float), t.y, t.x);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Retrieve 3D texture into linear memory (float)
extern "C" __global__ void kernelRetrieveTexXYZ ( int3 offs, int3 brickRes, float* buf, hipSurfaceObject volTexInF )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= brickRes.x || t.y >= brickRes.y || t.z >= brickRes.z ) return;
float val = surf3Dread<float>(volTexInF, (t.x + offs.x) * sizeof(float), t.y + offs.y, t.z + offs.z);
buf[ (t.x*brickRes.y + t.y)*brickRes.x + t.z ] = val;
}
|
d33127e8a5fb66fc3c894b7ca91afee5012428f7.cu
|
//-----------------------------------------------------------------------------
// NVIDIA(R) GVDB VOXELS
// Copyright 2017 NVIDIA Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Version 1.0: Rama Hoetzlein, 5/1/2017
//-----------------------------------------------------------------------------
// File: cuda_gvdb_copydata.cu
//
// GVDB Data Transfers
// - CopyData 3D volume into sub-volume
// - CopyDataZYX 3D volume into sub-volume with ZYX swizzle
// - RetreiveData 3D sub-volume into cuda buffer
// - CopyTexToBuf 2D texture into cuda buffer
// - CopyBufToTex cuda buffer into 2D texture
//-----------------------------------------------
#include "cuda_math.cuh"
// Zero memory of 3D volume
extern "C" __global__ void kernelFillTex ( int3 res, int dsize, CUsurfObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
surf3Dwrite ( 0, volTexOut, t.x*dsize, t.y, t.z );
}
// Copy 3D texture into sub-volume of another 3D texture (char)
extern "C" __global__ void kernelCopyTexC ( int3 offs, int3 res, CUsurfObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
uchar val = surf3Dread<uchar>(volTexOut, t.x * sizeof(uchar), t.y, t.z);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) );
}
// Copy 3D texture into sub-volume of another 3D texture (float)
extern "C" __global__ void kernelCopyTexF ( int3 offs, int3 res, CUsurfObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = surf3Dread<float>(volTexOut, t.x * sizeof(float), t.y, t.z);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Copy linear memory as 3D volume into sub-volume of a 3D texture
extern "C" __global__ void kernelCopyBufToTexC ( int3 offs, int3 res, uchar* inbuf, CUsurfObject volTexOut)
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
unsigned char val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ];
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) );
}
// Copy linear memory as 3D volume into sub-volume of a 3D texture
extern "C" __global__ void kernelCopyBufToTexF ( int3 offs, int3 res, float* inbuf, CUsurfObject volTexOut)
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ];
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Copy 3D texture into sub-volume of another 3D texture with ZYX swizzle (float)
extern "C" __global__ void kernelCopyTexZYX ( int3 offs, int3 res, CUsurfObject volTexInF, CUsurfObject volTexOut )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return;
float val = surf3Dread<float>(volTexInF, t.z * sizeof(float), t.y, t.x);
surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) );
}
// Retrieve 3D texture into linear memory (float)
extern "C" __global__ void kernelRetrieveTexXYZ ( int3 offs, int3 brickRes, float* buf, CUsurfObject volTexInF )
{
uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx;
if ( t.x >= brickRes.x || t.y >= brickRes.y || t.z >= brickRes.z ) return;
float val = surf3Dread<float>(volTexInF, (t.x + offs.x) * sizeof(float), t.y + offs.y, t.z + offs.z);
buf[ (t.x*brickRes.y + t.y)*brickRes.x + t.z ] = val;
}
|
bb695d315076e970175b9825815abbb6c72221df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/******************************************************************************
* Mathias Bourgoin, Universit Pierre et Marie Curie (2011)
*
* [email protected]
*
* This software is a computer program whose purpose is to allow
* GPU programming with the OCaml language.
*
* This software is governed by the CeCILL-B license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL-B
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL-B license and that you accept its terms.
*******************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
/****** Single precision *****/
/****** Double precision *****/
#ifdef __cplusplus
}
#endif
__global__ void vec_add(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] + B[i];
}
|
bb695d315076e970175b9825815abbb6c72221df.cu
|
#include "includes.h"
/******************************************************************************
* Mathias Bourgoin, Université Pierre et Marie Curie (2011)
*
* [email protected]
*
* This software is a computer program whose purpose is to allow
* GPU programming with the OCaml language.
*
* This software is governed by the CeCILL-B license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL-B
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL-B license and that you accept its terms.
*******************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
/****** Single precision *****/
/****** Double precision *****/
#ifdef __cplusplus
}
#endif
__global__ void vec_add(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] + B[i];
}
|
3fa6f9f560992511ab05c3b87941012012cdf22c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "core.h"
#include "common_coll.h"
#include "enqueue.h"
#include "primitives.h"
#define NUM_SUBSTEPS 4
#define NUM_BUFCHUNKS 2
// Increase Step and boffset for buffer sync
#define NEXT_STEP \
step++; \
boffset += sliceSize; \
if (boffset == buffSize) boffset = 0;
#define ALIGN_SIZE(size, align) \
size = ((size + (align) - 1) / (align)) * (align);
template<int THREADS, int UNROLL, class FUNC, typename T>
__launch_bounds__(THREADS+WARP_SIZE, 1)
__global__ void BroadcastKernel(const KernelArgs<T> args) {
const int tid = threadIdx.x;
__shared__ T* sharedNextOutput;
__shared__ DevRing<T> ring;
bool pushrecv = args.pushrecv;
LoadRing<THREADS>(args.ring, &ring);
__syncthreads();
if (tid == 0) {
WaitFlag prevCommOp(ring.prevOpCounter, 0);
WaitFlag nextCommOp(ring.nextOpCounter, 0);
prevCommOp.wait(args.opIndex);
nextCommOp.wait(args.opIndex);
if (pushrecv) {
*ring.sendPtrToPrev = (T*)args.ThisOutput;
Wait([=] {
return *ring.recvPtrFromNext != nullptr;
});
sharedNextOutput = *ring.recvPtrFromNext;
*ring.recvPtrFromNext = nullptr;
}
}
__syncthreads();
WaitFlag waitDoneFromNext(ring.recvFlagFromNext, (1-NUM_BUFCHUNKS)*NUM_SUBSTEPS);
WaitFlag waitReadyFromPrev(ring.recvFlagFromPrev, 0);
PostFlag postDoneToPrev(ring.sendFlagToPrev, 0);
PostFlag postReadyToNext(ring.sendFlagToNext, 0);
typedef Primitives<THREADS, UNROLL, NUM_SUBSTEPS, T> Prims;
const int size = args.N;
const int rank = ring.userRank[0];
const int nextRank = ring.userRank[1];
const int root = args.root;
const int buffSize = args.buffSize / sizeof(T);
const int sliceSize = buffSize / NUM_BUFCHUNKS;
int step = 0;
int boffset = 0;
// Compute pointers
const T * __restrict__ thisInput = args.ThisInput;
T * __restrict__ thisOutput = args.ThisOutput;
T * __restrict__ prevInput = ring.recvBuffer;
T * __restrict__ nextOutput = ring.sendBuffer;
for (int offset = 0; offset < size; offset += sliceSize) {
int maxOffset = size-offset;
if (rank == root) {
Prims::Copy(
thisInput + offset,
pushrecv ? sharedNextOutput + offset : nextOutput + boffset,
sliceSize, maxOffset,
step,
waitDoneFromNext,
postReadyToNext);
} else if (nextRank == root) {
if (pushrecv) maxOffset = 0; // Only wait for signals
Prims::Copy(
prevInput + boffset,
thisOutput + offset,
sliceSize, maxOffset,
step,
waitReadyFromPrev,
postDoneToPrev);
} else {
if (pushrecv) {
Prims::Copy(
thisOutput + offset,
sharedNextOutput + offset,
sliceSize, maxOffset,
step,
waitDoneFromNext, waitReadyFromPrev,
postReadyToNext, postDoneToPrev);
} else {
Prims::DoubleCopy(
prevInput + boffset,
thisOutput + offset,
nextOutput + boffset,
sliceSize, maxOffset,
step,
waitDoneFromNext, waitReadyFromPrev,
postReadyToNext, postDoneToPrev);
}
}
NEXT_STEP; // Increases step, boffset
}
// wait for the last data to be pushed to us
if (tid == 0) {
if (nextRank != root) {
// Wait for last update from next then reset the flag
waitDoneFromNext.wait(NUM_SUBSTEPS*(step+NUM_BUFCHUNKS-1));
*ring.recvFlagFromNext = 0;
}
if (rank != root) {
// reset the flag
*ring.recvFlagFromPrev = 0;
}
incrementOpCounter(&args);
}
}
#define THREADS 256
#define UNROLL 8
template<class FUNC, typename T>
ncclResult_t RingBroadcast(void* buff, const int count, const int root,
ncclComm* comm, hipStream_t stream) {
if (comm->nRanks != 1) {
KernelArgs<T> args;
ArgsSetup(&args, buff, buff, root, count, comm);
LAUNCH_KERNEL(BroadcastKernel, THREADS, UNROLL, FUNC, T, args, stream);
}
return ncclSuccess;
}
template<typename T, template<typename> class RedOp>
class Broadcast {
public:
static ncclResult_t entry(const void* sendbuff, void* recvbuff,
int count, int root, ncclComm* comm, hipStream_t stream) {
return RingBroadcast<RedOp<T>, T>(recvbuff, count, root, comm, stream);
}
};
NCCL_API(ncclResult_t, ncclBcast, void* buff, int count, ncclDataType_t datatype, int root,
ncclComm_t comm, hipStream_t stream);
ncclResult_t ncclBcast(void* buff, int count, ncclDataType_t datatype, int root,
ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ArgsCheck(buff, buff, count, datatype, ncclSum, root, comm, "Bcast"));
return enqueue<Broadcast, FuncNull>(nullptr, buff, count, datatype, root, comm, stream);
}
|
3fa6f9f560992511ab05c3b87941012012cdf22c.cu
|
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "core.h"
#include "common_coll.h"
#include "enqueue.h"
#include "primitives.h"
#define NUM_SUBSTEPS 4
#define NUM_BUFCHUNKS 2
// Increase Step and boffset for buffer sync
#define NEXT_STEP \
step++; \
boffset += sliceSize; \
if (boffset == buffSize) boffset = 0;
#define ALIGN_SIZE(size, align) \
size = ((size + (align) - 1) / (align)) * (align);
template<int THREADS, int UNROLL, class FUNC, typename T>
__launch_bounds__(THREADS+WARP_SIZE, 1)
__global__ void BroadcastKernel(const KernelArgs<T> args) {
const int tid = threadIdx.x;
__shared__ T* sharedNextOutput;
__shared__ DevRing<T> ring;
bool pushrecv = args.pushrecv;
LoadRing<THREADS>(args.ring, &ring);
__syncthreads();
if (tid == 0) {
WaitFlag prevCommOp(ring.prevOpCounter, 0);
WaitFlag nextCommOp(ring.nextOpCounter, 0);
prevCommOp.wait(args.opIndex);
nextCommOp.wait(args.opIndex);
if (pushrecv) {
*ring.sendPtrToPrev = (T*)args.ThisOutput;
Wait([=] {
return *ring.recvPtrFromNext != nullptr;
});
sharedNextOutput = *ring.recvPtrFromNext;
*ring.recvPtrFromNext = nullptr;
}
}
__syncthreads();
WaitFlag waitDoneFromNext(ring.recvFlagFromNext, (1-NUM_BUFCHUNKS)*NUM_SUBSTEPS);
WaitFlag waitReadyFromPrev(ring.recvFlagFromPrev, 0);
PostFlag postDoneToPrev(ring.sendFlagToPrev, 0);
PostFlag postReadyToNext(ring.sendFlagToNext, 0);
typedef Primitives<THREADS, UNROLL, NUM_SUBSTEPS, T> Prims;
const int size = args.N;
const int rank = ring.userRank[0];
const int nextRank = ring.userRank[1];
const int root = args.root;
const int buffSize = args.buffSize / sizeof(T);
const int sliceSize = buffSize / NUM_BUFCHUNKS;
int step = 0;
int boffset = 0;
// Compute pointers
const T * __restrict__ thisInput = args.ThisInput;
T * __restrict__ thisOutput = args.ThisOutput;
T * __restrict__ prevInput = ring.recvBuffer;
T * __restrict__ nextOutput = ring.sendBuffer;
for (int offset = 0; offset < size; offset += sliceSize) {
int maxOffset = size-offset;
if (rank == root) {
Prims::Copy(
thisInput + offset,
pushrecv ? sharedNextOutput + offset : nextOutput + boffset,
sliceSize, maxOffset,
step,
waitDoneFromNext,
postReadyToNext);
} else if (nextRank == root) {
if (pushrecv) maxOffset = 0; // Only wait for signals
Prims::Copy(
prevInput + boffset,
thisOutput + offset,
sliceSize, maxOffset,
step,
waitReadyFromPrev,
postDoneToPrev);
} else {
if (pushrecv) {
Prims::Copy(
thisOutput + offset,
sharedNextOutput + offset,
sliceSize, maxOffset,
step,
waitDoneFromNext, waitReadyFromPrev,
postReadyToNext, postDoneToPrev);
} else {
Prims::DoubleCopy(
prevInput + boffset,
thisOutput + offset,
nextOutput + boffset,
sliceSize, maxOffset,
step,
waitDoneFromNext, waitReadyFromPrev,
postReadyToNext, postDoneToPrev);
}
}
NEXT_STEP; // Increases step, boffset
}
// wait for the last data to be pushed to us
if (tid == 0) {
if (nextRank != root) {
// Wait for last update from next then reset the flag
waitDoneFromNext.wait(NUM_SUBSTEPS*(step+NUM_BUFCHUNKS-1));
*ring.recvFlagFromNext = 0;
}
if (rank != root) {
// reset the flag
*ring.recvFlagFromPrev = 0;
}
incrementOpCounter(&args);
}
}
#define THREADS 256
#define UNROLL 8
template<class FUNC, typename T>
ncclResult_t RingBroadcast(void* buff, const int count, const int root,
ncclComm* comm, cudaStream_t stream) {
if (comm->nRanks != 1) {
KernelArgs<T> args;
ArgsSetup(&args, buff, buff, root, count, comm);
LAUNCH_KERNEL(BroadcastKernel, THREADS, UNROLL, FUNC, T, args, stream);
}
return ncclSuccess;
}
template<typename T, template<typename> class RedOp>
class Broadcast {
public:
static ncclResult_t entry(const void* sendbuff, void* recvbuff,
int count, int root, ncclComm* comm, cudaStream_t stream) {
return RingBroadcast<RedOp<T>, T>(recvbuff, count, root, comm, stream);
}
};
NCCL_API(ncclResult_t, ncclBcast, void* buff, int count, ncclDataType_t datatype, int root,
ncclComm_t comm, cudaStream_t stream);
ncclResult_t ncclBcast(void* buff, int count, ncclDataType_t datatype, int root,
ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ArgsCheck(buff, buff, count, datatype, ncclSum, root, comm, "Bcast"));
return enqueue<Broadcast, FuncNull>(nullptr, buff, count, datatype, root, comm, stream);
}
|
95a21a3f5e2e7b3d94a3b231bad6bb0a00c81862.hip
|
// !!! This is a file automatically generated by hipify!!!
//GTX 750, ( 4) Multiprocessors, (192) CUDA Cores/MP: 768 CUDA Cores
// OpenCV and I/O libraries
#include <bits/stdc++.h>
#include <fstream>
#include <opencv2/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
using namespace std;
using namespace chrono;
using namespace cv;
int total_blocks;
int total_threads;
Mat h_Original;
Mat h_Resized;
const int output_height = 480;
const int output_width = 720;
void my_cudaError(hipError_t err, string errorMessage){
if(err != hipSuccess){
fprintf(stderr, "\nError: %s", errorMessage, hipGetErrorString(err));
}
}
__global__ void downSizeImage(const unsigned char *original, unsigned char *resized, int W, int H, int w, int h, int all_threats){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int start = idx * ((h * w + all_threats - 1) / all_threats);
int end = min(h * w, (idx + 1) * ((h * w + all_threats - 1) / all_threats));
for(int i = start; i < end; ++i){
#pragma unroll
for(int k = 0; k < 3; ++k){
*(resized + i*3 + k) = *(original + (((H * (i / w)) / h)*W + ((W * (i % w)) / w))*3 + k);
}
}
}
/* Host main routine */
int main(int argc, char** argv){
// Command line input
string input_name = "../images/" + string(argv[1]);
total_blocks = atoi(argv[3]);
total_threads = atoi(argv[4]);
ofstream fout;
fout.open("informe_cuda.txt", ios_base::app);
hipError_t err = hipSuccess;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Read images
h_Original = imread(input_name);
h_Resized = Mat(output_height, output_width, CV_8UC3);
if(!h_Original.data){
return cout << "Couldn't open or find the image: " << input_name << '\n', -1;
}
size_t og_size = h_Original.cols * h_Original.rows * 3 * sizeof(unsigned char);
size_t re_size = output_height * output_width * 3 * sizeof(unsigned char);
// Allocate the device input arrays
unsigned char* d_Original;
unsigned char* d_Resized;
err = hipMalloc((void **)&d_Original, og_size);
my_cudaError(err, "Fallo el malloc en el device del array de la imagen original");
err = hipMalloc((void **)&d_Resized, re_size);
my_cudaError(err, "Fallo el malloc en el device del array de la imagen de salida");
// Copy the host input arrays in host memory to the device input arrays in device memory
err = hipMemcpy(d_Original, h_Original.ptr(), og_size, hipMemcpyHostToDevice);
my_cudaError(err, "Fallo en el memcpy del devie para la imagen original");
err = hipMemcpy(d_Resized, h_Resized.ptr(), re_size, hipMemcpyHostToDevice);
my_cudaError(err, "Fallo en el memcpy del devie para la imagen de salida");
//-------------------------------------- Launch the downsize CUDA Kernel-------------------------------------------------------------------------------------------
hipEventRecord(start);
hipLaunchKernelGGL(( downSizeImage), dim3(total_blocks), dim3(total_threads), 0, 0, d_Original, d_Resized, h_Original.cols, h_Original.rows, output_width, output_height, total_blocks * total_threads);
hipEventRecord(stop);
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
// Copy the device result array in device memory to the host result array in host memory
err = hipMemcpy(h_Resized.ptr(), d_Resized, re_size, hipMemcpyDeviceToHost);
my_cudaError(err, "Fallo al traer la imagen del device");
imwrite(string(argv[2]), ResizedImage);
// Gather cuda time
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Free device global memory
err = hipFree(d_Original);
my_cudaError(err, "Error al liberar la memoria global de device");
err = hipFree(d_Resized);
my_cudaError(err, "Error al liberar la memoria global de device");
// Prints
fout << fixed << setprecision(12);
fout << "----------------------------------------------------------------------------\n";
fout << "Nmero total de hilos: " << total_blocks * total_threads << " = (" << total_blocks << " bloque(s) x " << total_threads << " hilo(s)/bloque)\n";
fout << "Tiempo de respuesta (CUDA): " << milliseconds / 1000 << '\n';
fout << "Dimensiones de la imagen de entrada: " << h_Original.cols << "," << h_Original.rows << "\n";
fout << "----------------------------------------------------------------------------\n\n";
return 0;
}
|
95a21a3f5e2e7b3d94a3b231bad6bb0a00c81862.cu
|
//GTX 750, ( 4) Multiprocessors, (192) CUDA Cores/MP: 768 CUDA Cores
// OpenCV and I/O libraries
#include <bits/stdc++.h>
#include <fstream>
#include <opencv2/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
using namespace std;
using namespace chrono;
using namespace cv;
int total_blocks;
int total_threads;
Mat h_Original;
Mat h_Resized;
const int output_height = 480;
const int output_width = 720;
void my_cudaError(cudaError_t err, string errorMessage){
if(err != cudaSuccess){
fprintf(stderr, "\nError: %s", errorMessage, cudaGetErrorString(err));
}
}
__global__ void downSizeImage(const unsigned char *original, unsigned char *resized, int W, int H, int w, int h, int all_threats){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int start = idx * ((h * w + all_threats - 1) / all_threats);
int end = min(h * w, (idx + 1) * ((h * w + all_threats - 1) / all_threats));
for(int i = start; i < end; ++i){
#pragma unroll
for(int k = 0; k < 3; ++k){
*(resized + i*3 + k) = *(original + (((H * (i / w)) / h)*W + ((W * (i % w)) / w))*3 + k);
}
}
}
/* Host main routine */
int main(int argc, char** argv){
// Command line input
string input_name = "../images/" + string(argv[1]);
total_blocks = atoi(argv[3]);
total_threads = atoi(argv[4]);
ofstream fout;
fout.open("informe_cuda.txt", ios_base::app);
cudaError_t err = cudaSuccess;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Read images
h_Original = imread(input_name);
h_Resized = Mat(output_height, output_width, CV_8UC3);
if(!h_Original.data){
return cout << "Couldn't open or find the image: " << input_name << '\n', -1;
}
size_t og_size = h_Original.cols * h_Original.rows * 3 * sizeof(unsigned char);
size_t re_size = output_height * output_width * 3 * sizeof(unsigned char);
// Allocate the device input arrays
unsigned char* d_Original;
unsigned char* d_Resized;
err = cudaMalloc((void **)&d_Original, og_size);
my_cudaError(err, "Fallo el malloc en el device del array de la imagen original");
err = cudaMalloc((void **)&d_Resized, re_size);
my_cudaError(err, "Fallo el malloc en el device del array de la imagen de salida");
// Copy the host input arrays in host memory to the device input arrays in device memory
err = cudaMemcpy(d_Original, h_Original.ptr(), og_size, cudaMemcpyHostToDevice);
my_cudaError(err, "Fallo en el memcpy del devie para la imagen original");
err = cudaMemcpy(d_Resized, h_Resized.ptr(), re_size, cudaMemcpyHostToDevice);
my_cudaError(err, "Fallo en el memcpy del devie para la imagen de salida");
//-------------------------------------- Launch the downsize CUDA Kernel-------------------------------------------------------------------------------------------
cudaEventRecord(start);
downSizeImage<<<total_blocks, total_threads>>>(d_Original, d_Resized, h_Original.cols, h_Original.rows, output_width, output_height, total_blocks * total_threads);
cudaEventRecord(stop);
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
// Copy the device result array in device memory to the host result array in host memory
err = cudaMemcpy(h_Resized.ptr(), d_Resized, re_size, cudaMemcpyDeviceToHost);
my_cudaError(err, "Fallo al traer la imagen del device");
imwrite(string(argv[2]), ResizedImage);
// Gather cuda time
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Free device global memory
err = cudaFree(d_Original);
my_cudaError(err, "Error al liberar la memoria global de device");
err = cudaFree(d_Resized);
my_cudaError(err, "Error al liberar la memoria global de device");
// Prints
fout << fixed << setprecision(12);
fout << "----------------------------------------------------------------------------\n";
fout << "Número total de hilos: " << total_blocks * total_threads << " = (" << total_blocks << " bloque(s) x " << total_threads << " hilo(s)/bloque)\n";
fout << "Tiempo de respuesta (CUDA): " << milliseconds / 1000 << '\n';
fout << "Dimensiones de la imagen de entrada: " << h_Original.cols << "," << h_Original.rows << "\n";
fout << "----------------------------------------------------------------------------\n\n";
return 0;
}
|
c9a869e21d735f28d2cc53038f529e92fc685f43.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011 Chen-Hsiung Liu, Lung-Sheng Chien, Cheng-Hung Lin,and Shih-Chieh Chang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* F = number of final states, we label final states from s{1}, s{2}, ... s{F}
* and initial state is s{F+1}. s{0} is of no use.
*
* if maximum pattern length is less than 512, then we will load transition function
* of initial state to shared memory, so we requires BLOCK_SIZE * k = 256 such that
* each thread load sevral one transition pairs into shared memory
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <ctype.h>
#include "thrust/device_vector.h"
#include "thrust/scan.h"
#include "../include/PFAC_P.h"
#ifdef __cplusplus
extern "C" {
PFAC_status_t PFAC_reduce_kernel( PFAC_handle_t handle, int *d_input_string, int input_size,
int *d_match_result, int *d_pos, int *h_num_matched, int *h_match_result, int *h_pos );
}
#endif // __cplusplus
#define BLOCK_EXP (7)
#define BLOCK_SIZE (1 << BLOCK_EXP)
#define EXTRA_SIZE_PER_TB (128)
#define NUM_INTS_PER_THREAD (2)
#define BLOCK_SIZE_DIV_256 (2)
#define NUM_WARPS_PER_BLOCK (4)
#if 256 != (BLOCK_SIZE_DIV_256 * BLOCK_SIZE)
#error 256 != BLOCK_SIZE_DIV_256 * BLOCK_SIZE
#endif
#if BLOCK_SIZE != 32 * NUM_WARPS_PER_BLOCK
#error BLOCK_SIZE != 32 * NUM_WARPS_PER_BLOCK
#endif
texture < int, 1, hipReadModeElementType > tex_PFAC_table_reduce;
static __inline__ __device__ int tex_lookup(int state, int inputChar)
{
return tex1Dfetch(tex_PFAC_table_reduce, state*CHAR_SET + inputChar);
}
/* declaration */
template <int TEXTURE_ON , int SMEM_ON >
__global__ void PFAC_reduce_kernel_device(
int *d_PFAC_table,
int *d_input_string,
int input_size,
int n_hat,
int num_finalState,
int initial_state,
int num_blocks_minus1,
int *d_pos,
int *d_match_result,
int *d_nnz_per_block ) ;
__host__ PFAC_status_t PFAC_reduce_kernel_stage1(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int n_hat,
int num_blocks,
dim3 dimBlock,
dim3 dimGrid,
int *d_match_result,
int *d_pos,
int *d_nnz_per_block,
int *h_num_matched );
__global__ void zip_kernel(
int *d_pos,
int *d_match_result,
int *d_nnz_per_block,
int num_blocks_minus1,
int elements_per_block,
int *d_pos_zip,
int *d_match_result_zip);
/* end of declaration */
// ---------------------------- main ----------------------
/*
*
* Input -
* handle
* pointer to a legal PFAC context
* d_input_string
* input stream in device memory, its size is "input_size" bytes
* input_size
* size of input stream
*
* Output -
* h_num_matched
* pointer to a host memory, it denotes number of matched patterns in the input stream
* for example, if device mode is set, and h_num_matched = 5, then
* d_pos[0:4] contains startig position of each matched pattern in the input stream
* d_match_result[0:4] contains pattern ID of matched pattern
*
* NOTE: if h_num_matched = 0, then d_pos and d_match_result are not touched,
* their value is at random.
* also at this time, (d_pos_zip, d_match_result_zip) is not allocated, so
* space is efficient.
*
* support 2 mode:
*
* Device mode:
* (d_pos, d_match_result) pair is device memory
* (h_pos, h_match_result) is (NULL,NULL)
*
* 1) (d_pos, d_match_result) is used as working space, store local compressed (match,pos)
* 2) zip (d_pos, d_match_result) to working space (d_pos_zip, d_match_result_zip)
* 3) copy (d_pos_zip, d_match_result_zip) to (d_pos, d_match_result) via DeviceToDevice
*
* Host mode:
* (d_pos, d_match_result) pair is working space
* (h_pos, h_match_result) is not (NULL,NULL)
*
* 1) (d_pos, d_match_result) is used as working space, store local compressed (match,pos)
* 2) zip (d_pos, d_match_result) to working space (d_pos_zip, d_match_result_zip)
* 3) copy (d_pos_zip, d_match_result_zip) to (h_pos, h_match_result) via DeviceToHost
*
* We can combine two modes in a simple way,
* (d_pos, h_pos) is mutually exclusive, so is (d_match_result, h_match_result).
* i.e.
* if ( h_pos ) then
* h_pos <-- d_pos_zip
* else
* d_pos <-- d_pos_zip
* end
*
* if ( h_match_result ) then
* h_match_result <-- d_match_result_zip
* else
* d_match_result <-- d_match_result_zip
* end
*
*/
__host__ PFAC_status_t PFAC_reduce_kernel(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int *d_match_result,
int *d_pos,
int *h_num_matched,
int *h_match_result,
int *h_pos )
{
int *d_nnz_per_block = NULL ; // working space, d_nnz_per_block[j] = nnz of block j
int *d_pos_zip = NULL ; // working space, compression of initial d_pos
int *d_match_result_zip = NULL ; // working space, compression of initial d_match_result
hipError_t cuda_status ;
PFAC_status_t PFAC_status ;
// n_hat = (input_size + 3)/4 = number of integers of input string
int n_hat = (input_size + sizeof(int)-1)/sizeof(int) ;
// num_blocks = # of thread blocks to cover input stream
int num_blocks = (n_hat + BLOCK_SIZE*NUM_INTS_PER_THREAD-1)/(BLOCK_SIZE*NUM_INTS_PER_THREAD) ;
cuda_status = hipMalloc((void **)&d_nnz_per_block, num_blocks*sizeof(int) );
if ( hipSuccess != cuda_status ){
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
dim3 dimBlock( BLOCK_SIZE, 1 ) ;
dim3 dimGrid ;
/*
* hardware limitatin of 2-D grid is (65535, 65535),
* 1-D grid is not enough to cover large input stream.
* For example, input_size = 1G (input stream has 1Gbyte), then
* num_blocks = # of thread blocks = 1G / 1024 = 1M > 65535
*
* However when using 2-D grid, then number of invoke blocks = dimGrid.x * dimGrid.y
* which is bigger than > num_blocks
*
* we need to check this boundary condition inside kernel because
* size of d_nnz_per_block is num_blocks
*
* trick: decompose num_blocks = p * 2^15 + q
*/
int p = num_blocks >> 15 ;
dimGrid.x = num_blocks ;
if ( p ){
dimGrid.x = 1<<15 ;
dimGrid.y = p+1 ;
}
PFAC_status = PFAC_reduce_kernel_stage1( handle, d_input_string, input_size,
n_hat, num_blocks, dimBlock, dimGrid,
d_match_result, d_pos, d_nnz_per_block, h_num_matched );
if ( PFAC_STATUS_SUCCESS != PFAC_status ){
hipFree(d_nnz_per_block);
return PFAC_STATUS_INTERNAL_ERROR ;
}
if ( 0 == *h_num_matched ){
hipFree(d_nnz_per_block);
return PFAC_STATUS_SUCCESS;
}
/*
* stage 3: compression (d_match_result, d_pos) to working space (d_pos_zip, d_match_result_zip)
* by information of d_nnz_per_block
*
* after stage 3, d_nnz_per_block is useless
*/
hipError_t cuda_status1 = hipMalloc((void **) &d_pos_zip, (*h_num_matched)*sizeof(int) );
hipError_t cuda_status2 = hipMalloc((void **) &d_match_result_zip, (*h_num_matched)*sizeof(int) );
if ( (hipSuccess != cuda_status1) || (hipSuccess != cuda_status2) ){
if ( NULL != d_pos_zip ) { hipFree(d_pos_zip); }
if ( NULL != d_match_result_zip ) { hipFree(d_match_result_zip); }
hipFree(d_nnz_per_block);
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
int elements_per_block = BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ;
hipLaunchKernelGGL(( zip_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_pos, d_match_result, d_nnz_per_block,
num_blocks - 1, elements_per_block,
d_pos_zip, d_match_result_zip );
cuda_status = hipGetLastError() ;
if ( hipSuccess != cuda_status ){
hipFree(d_pos_zip);
hipFree(d_match_result_zip);
hipFree(d_nnz_per_block);
return PFAC_STATUS_INTERNAL_ERROR ;
}
hipFree(d_nnz_per_block);
/*
* stage 4: copy data back to d_pos and d_match_result
* we can write hand-copy kernel to copy (d_pos_zip, d_match_result)
* this should be efficient
*/
if ( NULL != h_pos ){
cuda_status1 = hipMemcpy(h_pos, d_pos_zip, (*h_num_matched)*sizeof(int), hipMemcpyDeviceToHost);
}else{
cuda_status1 = hipMemcpy(d_pos, d_pos_zip, (*h_num_matched)*sizeof(int), hipMemcpyDeviceToDevice);
}
if ( NULL != h_match_result ){
cuda_status2 = hipMemcpy(h_match_result, d_match_result_zip, (*h_num_matched)*sizeof(int), hipMemcpyDeviceToHost);
}else{
cuda_status2 = hipMemcpy(d_match_result, d_match_result_zip, (*h_num_matched)*sizeof(int), hipMemcpyDeviceToDevice);
}
if ( (hipSuccess != cuda_status1) ||
(hipSuccess != cuda_status2) )
{
hipFree(d_pos_zip);
hipFree(d_match_result_zip);
return PFAC_STATUS_INTERNAL_ERROR ;
}
hipFree(d_pos_zip);
hipFree(d_match_result_zip);
return PFAC_STATUS_SUCCESS;
}
/*
* stage 1: perform matching process and zip non-zero (matched thread) into continuous
* memory block and keep order. Morever nnz of each thread block is stored in d_nnz_per_block
*
* d_nnz_per_block[j] = nnz of thread block j
*
* since each thread block processes 1024 substrings, so range of d_nnz_per_block[j] is [0,1024]
*/
__host__ PFAC_status_t PFAC_reduce_kernel_stage1(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int n_hat,
int num_blocks,
dim3 dimBlock,
dim3 dimGrid,
int *d_match_result,
int *d_pos,
int *d_nnz_per_block,
int *h_num_matched )
{
hipError_t cuda_status ;
PFAC_status_t pfac_status = PFAC_STATUS_SUCCESS;
int num_finalState = handle->numOfFinalStates;
int initial_state = handle->initial_state;
bool smem_on = ((4*EXTRA_SIZE_PER_TB-1) >= handle->maxPatternLen) ;
bool texture_on = (PFAC_TEXTURE_ON == handle->textureMode );
if ( texture_on ){
// #### lock mutex, only one thread can bind texture
pfac_status = PFAC_tex_mutex_lock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
textureReference *texRefTable ;
hipGetTextureReference( (const struct textureReference**)&texRefTable, &tex_PFAC_table_reduce );
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int>();
// set texture parameters
tex_PFAC_table_reduce.addressMode[0] = hipAddressModeClamp;
tex_PFAC_table_reduce.addressMode[1] = hipAddressModeClamp;
tex_PFAC_table_reduce.filterMode = hipFilterModePoint;
tex_PFAC_table_reduce.normalized = 0;
size_t offset ;
cuda_status = hipBindTexture( &offset, (const struct textureReference*) texRefTable,
(const void*) handle->d_PFAC_table, (const struct hipChannelFormatDesc*) &channelDesc, handle->sizeOfTableInBytes ) ;
// #### unlock mutex
pfac_status = PFAC_tex_mutex_unlock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
if ( hipSuccess != cuda_status ){
PFAC_PRINTF("Error: cannot bind texture, %s\n", hipGetErrorString(cuda_status) );
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
if ( 0 != offset ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
}
if (smem_on) {
if ( texture_on ){
hipLaunchKernelGGL(( PFAC_reduce_kernel_device<1, 1>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}else{
hipLaunchKernelGGL(( PFAC_reduce_kernel_device<0, 1>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}
}else{
if ( texture_on ){
hipLaunchKernelGGL(( PFAC_reduce_kernel_device<1, 0>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}else{
hipLaunchKernelGGL(( PFAC_reduce_kernel_device<0, 0>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}
}
cuda_status = hipGetLastError() ;
if ( texture_on ){
// #### lock mutex, only one thread can unbind texture
pfac_status = PFAC_tex_mutex_lock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
hipUnbindTexture(tex_PFAC_table_reduce);
// #### unlock mutex
pfac_status = PFAC_tex_mutex_unlock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
}
if ( hipSuccess != cuda_status ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
/*
* stage 2: use Thrust to do in-place prefix_sum( d_nnz_per_block[0:num_blocks-1] )
*
* after inclusive_scan, then
*
* d_nnz_per_block[j] = prefix_sum( d_nnz_per_block[0:j] )
*
* d_nnz_per_block[num_blocks-1] = total number of non-zero = h_num_matched
*
*/
thrust::device_ptr<int> dev_nnz_per_block ( d_nnz_per_block ) ;
thrust::inclusive_scan(dev_nnz_per_block, dev_nnz_per_block + num_blocks, dev_nnz_per_block );
cuda_status = hipMemcpy( h_num_matched, d_nnz_per_block + num_blocks-1, sizeof(int), hipMemcpyDeviceToHost) ;
if ( hipSuccess != cuda_status ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
return PFAC_STATUS_SUCCESS ;
}
__global__ void zip_kernel(
int *d_pos,
int *d_match_result,
int *d_nnz_per_block,
int num_blocks_minus1,
int elements_per_block,
int *d_pos_zip,
int *d_match_result_zip)
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
if ( gbid > num_blocks_minus1 ){
return ; // d_nnz_per_block[0:num_blocks-1]
}
int start = 0 ;
if ( 0 < gbid ){
start = d_nnz_per_block[gbid - 1] ;
}
int nnz = d_nnz_per_block[gbid] - start ;
int base = gbid * elements_per_block ;
for( int colIdx = tid ; colIdx < nnz ; colIdx += BLOCK_SIZE ){
d_pos_zip[ start + colIdx ] = d_pos[ base + colIdx ] ;
d_match_result_zip[ start + colIdx ] = d_match_result[ base + colIdx ] ;
}
}
/*
* (1) transition table of initial state is in the shared memory phi_s02s1
* we don't need to look up table in texture tex_PFAC_table
*
* (2) final states are reordered as 0, 1, 2, ..., k -1
* so state number < k (number of final states) means final state
*/
#define SUBSEG_MATCH( j, match ) \
pos = tid + j * BLOCK_SIZE ;\
if ( pos < bdy ){ \
inputChar = s_char[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < bdy ) { \
inputChar = s_char[pos]; \
state = tex_lookup(state, inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOSMEM( j, match ) \
pos = ( gbid * BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ) + tid + j * BLOCK_SIZE ;\
if ( pos < input_size ){ \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < input_size ) { \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = tex_lookup(state, inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOTEX( j, match ) \
pos = tid + j * BLOCK_SIZE ;\
if ( pos < bdy ){ \
inputChar = s_char[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < bdy ) { \
inputChar = s_char[pos]; \
state = *(d_PFAC_table + state*CHAR_SET + inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOSMEM_NOTEX( j, match ) \
pos = ( gbid * BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ) + tid + j * BLOCK_SIZE ;\
if ( pos < input_size ){ \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < input_size ) { \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = *(d_PFAC_table + state*CHAR_SET + inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
/*
* caller must reset working space s_Data first
*
* This device function comes from SDK/scan
*
* original code
* [code]
* //assuming size <= WARP_SIZE
* inline __device__ uint warpScanInclusive(uint idata, uint *s_Data, uint size){
* uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
* s_Data[pos] = 0;
* pos += size;
* s_Data[pos] = idata;
*
* for(uint offset = 1; offset < size; offset <<= 1)
* s_Data[pos] += s_Data[pos - offset];
*
* return s_Data[pos];
* }
* [/code]
*
* Question: one may wonder why volatile keyword is missing?
* nvcc 3.2 will keep "s_Data[pos] = ..." and cuobjdump shows
* [code]
* int pos = 2 * id - (id &31);
* s_Data[pos] = 0 ;
* pos += 32 ;
* s_Data[pos] = idata ;
* R0 = idata ;
* for( int offset = 1 ; offset < 32 ; offset <<= 1 ){
* R0 += s_Data[pos - offset];
* s_Data[pos] = R0 ;
* }
* return s_Data[pos];
* [/code]
*
* check http://forums.nvidia.com/index.php?showtopic=193730
*
*/
inline __device__ int warpScanInclusive(int idata, int id, int *s_Data)
{
int pos = 2 * id - (id &31);
// s_Data[pos] = 0 ;
pos += 32 ;
s_Data[pos] = idata ;
for( int offset = 1 ; offset < 32 ; offset <<= 1 ){
s_Data[pos] += s_Data[pos - offset];
}
return s_Data[pos];
}
#define MANUAL_EXPAND_2( X ) { X ; X ; }
#define MANUAL_EXPAND_4( X ) { MANUAL_EXPAND_2( MANUAL_EXPAND_2( X ) ) }
#define MANUAL_EXPAND_8( X ) { MANUAL_EXPAND_4( MANUAL_EXPAND_4( X ) ) }
/*
* resource usage
* sm20:
* 1) use smem
* 32 regs, 5120B smem, 96B cmem, => 1024 threads / SM
* 2) no smem
* 40 regs, 5120B smem, 96B cmem, => 768 threads / SM
* sm13:
* 1) use smem
* 24 regs, 5200B smem, 52B cmem, => 256 threads / SM
* 2) no smem
* 32 regs, 5200B smem, 52B cmem, => 256 threads / SM
*
* sm11:
* 1) use smem
* 24 regs, 5200B smem, 52B cmem, => 256 threads / SM
* 2) no smem
* 32 regs, 5200B smem, 8B cmem, => 256 threads / SM
*/
template <int TEXTURE_ON , int SMEM_ON >
__global__ void PFAC_reduce_kernel_device(
int *d_PFAC_table,
int *d_input_string,
int input_size,
int n_hat,
int num_finalState,
int initial_state,
int num_blocks_minus1,
int *d_pos,
int *d_match_result,
int *d_nnz_per_block )
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
int start ;
int pos;
int state;
int inputChar;
int match[4*NUM_INTS_PER_THREAD] ;
__shared__ int s_input[ BLOCK_SIZE*NUM_INTS_PER_THREAD*4];
__shared__ int phi_s02s1[ 256 ] ;
volatile unsigned char *s_char;
char * char_d_input_string ;
if ( gbid > num_blocks_minus1 ){
return ; // whole block is outside input stream
}
#pragma unroll
for(int i = 0 ; i < 4*NUM_INTS_PER_THREAD ; i++){
match[i] = 0 ;
}
// load transition table of initial state to shared memory
if ( TEXTURE_ON ){
#pragma unroll
for(int i = 0 ; i < BLOCK_SIZE_DIV_256 ; i++){
phi_s02s1[ tid + i*BLOCK_SIZE ] = tex_lookup(initial_state, tid + i*BLOCK_SIZE);
}
}else{
#pragma unroll
for(int i = 0 ; i < BLOCK_SIZE_DIV_256 ; i++){
phi_s02s1[ tid + i*BLOCK_SIZE ] = *(d_PFAC_table + initial_state*CHAR_SET + (tid + i*BLOCK_SIZE) );
}
}
#if BLOCK_SIZE < EXTRA_SIZE_PER_TB
#error BLOCK_SIZE should be bigger than EXTRA_SIZE_PER_TB
#endif
if ( SMEM_ON ){
// legal thread block which contains some input stream
s_char = (unsigned char *)s_input;
// read global data to shared memory
start = gbid * (BLOCK_SIZE*NUM_INTS_PER_THREAD) + tid ;
#pragma unroll
for(int i = 0 ; i < NUM_INTS_PER_THREAD ; i++){
if ( start < n_hat ){
s_input[tid + i*BLOCK_SIZE] = d_input_string[start];
}
start += BLOCK_SIZE ;
}
if ( (start < n_hat) && (tid < EXTRA_SIZE_PER_TB) ){
s_input[tid + NUM_INTS_PER_THREAD*BLOCK_SIZE] = d_input_string[start];
}
}
__syncthreads();
// bdy = number of legal characters starting at gbid*BLOCKSIZE*4
int bdy = input_size - gbid*(BLOCK_SIZE * NUM_INTS_PER_THREAD * 4);
#if 2 != NUM_INTS_PER_THREAD
#error NUM_INTS_PER_THREAD must be 2, or MANUAL_EXPAND_8 is wrong
#endif
if ( SMEM_ON ){
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOTEX(j, match[j]) ; j++ ;)
}
}else{
char_d_input_string = (char*)d_input_string ; // used only when SMEM_ON = 0
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOSMEM(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOSMEM_NOTEX(j, match[j]) ; j++ ;)
}
}
// matching is done, we can re-use shared memory s_input and phi_s02s1
// to do inclusive_scan
// we have 128 thread per block (4 warps per block) and each thread needs to
// process 8 (4*NUM_INTS_PER_THREAD) substrings. It is equivalent to say
// 4 x 8 = 32 warps processing 1024 substrings.
// if we concatenate match[j] to a linear array of 1024 entries, then
// acc_pos[j] of lane_id = number of non-zero of match[j] of thread k, k <= lane_id
// = prefix_sum( match[32*j:32*j+land_id] )
// acc_warp[j] is number of nonzero of match[32*j:32*j+31]
// = prefix_sum( match[32*j:32*j+31] )
//
// stage 1: inclusive scan inside a warp
int warp_id = tid >> 5 ;
int lane_id = tid & 31 ;
int acc_pos[4*NUM_INTS_PER_THREAD] ;
int *acc_warp = phi_s02s1 ; // alias acc_warp[32] to phi_s02s1
// reuse phi_s02s1
#if 32 != (NUM_WARPS_PER_BLOCK * 4*NUM_INTS_PER_THREAD)
#error 32 != (NUM_WARPS_PER_BLOCK * 4*NUM_INTS_PER_THREAD)
#endif
__syncthreads(); // s_input and phi_s02s1 can be re-used
#if 200 <= __CUDA_ARCH__
if ( 0 == warp_id ){
s_input[lane_id] = 0 ;
}
int k = 0 ;
unsigned int match_pattern ;
MANUAL_EXPAND_8( match_pattern = __ballot( match[k] > 0 ); \
match_pattern <<= (31-lane_id); \
acc_pos[k] = __popc(match_pattern); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
}\
k++ ; )
__syncthreads();
#else
// clear supplemet area of s_input
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int id = tid + k*BLOCK_SIZE ;
int pos = 2 * id - (id &31);
s_input[pos] = 0 ;
}
__syncthreads();
int k = 0 ;
int idata ;
MANUAL_EXPAND_4( idata = match[k] > 0 ; \
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
} \
k++ ; )
// __syncthreads(); // not necessary
k = 0 ;
MANUAL_EXPAND_4( idata = match[4+k] > 0 ; \
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;\
} \
k++ ; )
__syncthreads();
#endif
// stage 2: acc_pos[0:7] and acc_warp[0:31] are done, we can re-use s_input again
// s_input[32+j] = prefix_sum( acc_warp[0:j] )
// note that s_input[0:31] always keeps zero
if ( 0 == warp_id ){
warpScanInclusive(acc_warp[lane_id], lane_id, s_input);
}
__syncthreads();
// stage 3: s_input[32:63] contains information as
// s_input[32+j+1] - s_input[32+j] = nnz of warp j
// s_input[63] = prefix_sum( match[0:1023] )
// correct local position of each matched substring
// note that position starts from 0, so we need minus 1,
// for example, suppose acc_pos[0] of warp 0 is
// 1 1 1 2 3 ...
// then t0, t3, t4 match, and should write to position of matched result
// 0 0 0 1 2 ...
// d_match_result[ t0, t3, t4, ...]
// d_pos[ 0, 3, 4, ...]
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
acc_pos[j] += ( s_input[31 + warp_id + j * NUM_WARPS_PER_BLOCK] - 1 ) ;
}
int nnz = s_input[63];
__syncthreads();
// stage 4: all data are in acc_pos[] and match[], s_input can be reused again
// collect non-zero data to s_input, then do coalesced write
start = gbid * (BLOCK_SIZE * NUM_INTS_PER_THREAD * 4) ;
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
if ( match[j] ){
s_input[ acc_pos[j] ] = match[j];
}
}
__syncthreads();
for (int j = tid ; j < nnz; j+= BLOCK_SIZE ){
d_match_result[start + j ] = s_input[j] ;
}
__syncthreads();
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
if ( match[j] ){
s_input[ acc_pos[j] ] = start + tid + j * BLOCK_SIZE ;
}
}
__syncthreads();
for (int j = tid ; j < nnz; j+= BLOCK_SIZE ){
d_pos[start + j ] = s_input[j] ;
}
if ( 0 == tid ){
d_nnz_per_block[ gbid ] = nnz ;
}
}
/*
******************* technical note of PFAC_reduce_kernel_device:
----------------------------------------------------------------------------------------
1) nvcc uses lmem on following code, so
[code]
__syncthreads();
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int idata = match[k] > 0 ;
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input);
if ( 31 == lane_id ){
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;
}
}
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int idata = match[4+k] > 0 ;
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input);
if ( 31 == lane_id ){
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;
}
}
__syncthreads();
[/code]
is manually unrolled as
[code]
__syncthreads();
int k = 0 ;
int idata ;
MANUAL_EXPAND_4( idata = match[k] > 0 ; \
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
} \
k++ ; )
// __syncthreads(); // not necessary
k = 0 ;
MANUAL_EXPAND_4( idata = match[4+k] > 0 ; \
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;\
} \
k++ ; )
__syncthreads();
[/code]
---------------------------------------------------------------------------------
2) simplify following code
[code]
if ( TEXTURE_ON ){
SUBSEG_MATCH(0, match[0]) ;
SUBSEG_MATCH(1, match[1]) ;
SUBSEG_MATCH(2, match[2]) ;
SUBSEG_MATCH(3, match[3]) ;
#if 2 == NUM_INTS_PER_THREAD
SUBSEG_MATCH(4, match[4]) ;
SUBSEG_MATCH(5, match[5]) ;
SUBSEG_MATCH(6, match[6]) ;
SUBSEG_MATCH(7, match[7]) ;
#endif
}else{
SUBSEG_MATCH_NOTEX(0, match[0]) ;
SUBSEG_MATCH_NOTEX(1, match[1]) ;
SUBSEG_MATCH_NOTEX(2, match[2]) ;
SUBSEG_MATCH_NOTEX(3, match[3]) ;
#if 2 == NUM_INTS_PER_THREAD
SUBSEG_MATCH_NOTEX(4, match[4]) ;
SUBSEG_MATCH_NOTEX(5, match[5]) ;
SUBSEG_MATCH_NOTEX(6, match[6]) ;
SUBSEG_MATCH_NOTEX(7, match[7]) ;
#endif
}
[/code]
by compact macro
[code]
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOTEX(j, match[j]) ; j++ ;)
}
[/code]
-----------------------------------------------------------------------------------------
3. optimization on Fermi
[code]
#pragma unroll
for(int k = 0 ; k < 4*NUM_INTS_PER_THREAD ; k++ ){
unsigned int match_pattern = __ballot( match[k] > 0 ) ;
match_pattern <<= (31 - lane_id);
acc_pos[k] = __popc(match_pattern) ;
if ( 31 == lane_id ){
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;
}
}
[/code]
becomes
[code]
int k = 0 ;
unsigned int match_pattern ;
MANUAL_EXPAND_8( match_pattern = __ballot( match[k] > 0 ); \
match_pattern <<= (31-lane_id); \
acc_pos[k] = __popc(match_pattern); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
}\
k++ ; )
[/code]
-----------------------------------------------------------------------------------------
*/
|
c9a869e21d735f28d2cc53038f529e92fc685f43.cu
|
/*
* Copyright 2011 Chen-Hsiung Liu, Lung-Sheng Chien, Cheng-Hung Lin,and Shih-Chieh Chang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* F = number of final states, we label final states from s{1}, s{2}, ... s{F}
* and initial state is s{F+1}. s{0} is of no use.
*
* if maximum pattern length is less than 512, then we will load transition function
* of initial state to shared memory, so we requires BLOCK_SIZE * k = 256 such that
* each thread load sevral one transition pairs into shared memory
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <ctype.h>
#include "thrust/device_vector.h"
#include "thrust/scan.h"
#include "../include/PFAC_P.h"
#ifdef __cplusplus
extern "C" {
PFAC_status_t PFAC_reduce_kernel( PFAC_handle_t handle, int *d_input_string, int input_size,
int *d_match_result, int *d_pos, int *h_num_matched, int *h_match_result, int *h_pos );
}
#endif // __cplusplus
#define BLOCK_EXP (7)
#define BLOCK_SIZE (1 << BLOCK_EXP)
#define EXTRA_SIZE_PER_TB (128)
#define NUM_INTS_PER_THREAD (2)
#define BLOCK_SIZE_DIV_256 (2)
#define NUM_WARPS_PER_BLOCK (4)
#if 256 != (BLOCK_SIZE_DIV_256 * BLOCK_SIZE)
#error 256 != BLOCK_SIZE_DIV_256 * BLOCK_SIZE
#endif
#if BLOCK_SIZE != 32 * NUM_WARPS_PER_BLOCK
#error BLOCK_SIZE != 32 * NUM_WARPS_PER_BLOCK
#endif
texture < int, 1, cudaReadModeElementType > tex_PFAC_table_reduce;
static __inline__ __device__ int tex_lookup(int state, int inputChar)
{
return tex1Dfetch(tex_PFAC_table_reduce, state*CHAR_SET + inputChar);
}
/* declaration */
template <int TEXTURE_ON , int SMEM_ON >
__global__ void PFAC_reduce_kernel_device(
int *d_PFAC_table,
int *d_input_string,
int input_size,
int n_hat,
int num_finalState,
int initial_state,
int num_blocks_minus1,
int *d_pos,
int *d_match_result,
int *d_nnz_per_block ) ;
__host__ PFAC_status_t PFAC_reduce_kernel_stage1(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int n_hat,
int num_blocks,
dim3 dimBlock,
dim3 dimGrid,
int *d_match_result,
int *d_pos,
int *d_nnz_per_block,
int *h_num_matched );
__global__ void zip_kernel(
int *d_pos,
int *d_match_result,
int *d_nnz_per_block,
int num_blocks_minus1,
int elements_per_block,
int *d_pos_zip,
int *d_match_result_zip);
/* end of declaration */
// ---------------------------- main ----------------------
/*
*
* Input -
* handle
* pointer to a legal PFAC context
* d_input_string
* input stream in device memory, its size is "input_size" bytes
* input_size
* size of input stream
*
* Output -
* h_num_matched
* pointer to a host memory, it denotes number of matched patterns in the input stream
* for example, if device mode is set, and h_num_matched = 5, then
* d_pos[0:4] contains startig position of each matched pattern in the input stream
* d_match_result[0:4] contains pattern ID of matched pattern
*
* NOTE: if h_num_matched = 0, then d_pos and d_match_result are not touched,
* their value is at random.
* also at this time, (d_pos_zip, d_match_result_zip) is not allocated, so
* space is efficient.
*
* support 2 mode:
*
* Device mode:
* (d_pos, d_match_result) pair is device memory
* (h_pos, h_match_result) is (NULL,NULL)
*
* 1) (d_pos, d_match_result) is used as working space, store local compressed (match,pos)
* 2) zip (d_pos, d_match_result) to working space (d_pos_zip, d_match_result_zip)
* 3) copy (d_pos_zip, d_match_result_zip) to (d_pos, d_match_result) via DeviceToDevice
*
* Host mode:
* (d_pos, d_match_result) pair is working space
* (h_pos, h_match_result) is not (NULL,NULL)
*
* 1) (d_pos, d_match_result) is used as working space, store local compressed (match,pos)
* 2) zip (d_pos, d_match_result) to working space (d_pos_zip, d_match_result_zip)
* 3) copy (d_pos_zip, d_match_result_zip) to (h_pos, h_match_result) via DeviceToHost
*
* We can combine two modes in a simple way,
* (d_pos, h_pos) is mutually exclusive, so is (d_match_result, h_match_result).
* i.e.
* if ( h_pos ) then
* h_pos <-- d_pos_zip
* else
* d_pos <-- d_pos_zip
* end
*
* if ( h_match_result ) then
* h_match_result <-- d_match_result_zip
* else
* d_match_result <-- d_match_result_zip
* end
*
*/
__host__ PFAC_status_t PFAC_reduce_kernel(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int *d_match_result,
int *d_pos,
int *h_num_matched,
int *h_match_result,
int *h_pos )
{
int *d_nnz_per_block = NULL ; // working space, d_nnz_per_block[j] = nnz of block j
int *d_pos_zip = NULL ; // working space, compression of initial d_pos
int *d_match_result_zip = NULL ; // working space, compression of initial d_match_result
cudaError_t cuda_status ;
PFAC_status_t PFAC_status ;
// n_hat = (input_size + 3)/4 = number of integers of input string
int n_hat = (input_size + sizeof(int)-1)/sizeof(int) ;
// num_blocks = # of thread blocks to cover input stream
int num_blocks = (n_hat + BLOCK_SIZE*NUM_INTS_PER_THREAD-1)/(BLOCK_SIZE*NUM_INTS_PER_THREAD) ;
cuda_status = cudaMalloc((void **)&d_nnz_per_block, num_blocks*sizeof(int) );
if ( cudaSuccess != cuda_status ){
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
dim3 dimBlock( BLOCK_SIZE, 1 ) ;
dim3 dimGrid ;
/*
* hardware limitatin of 2-D grid is (65535, 65535),
* 1-D grid is not enough to cover large input stream.
* For example, input_size = 1G (input stream has 1Gbyte), then
* num_blocks = # of thread blocks = 1G / 1024 = 1M > 65535
*
* However when using 2-D grid, then number of invoke blocks = dimGrid.x * dimGrid.y
* which is bigger than > num_blocks
*
* we need to check this boundary condition inside kernel because
* size of d_nnz_per_block is num_blocks
*
* trick: decompose num_blocks = p * 2^15 + q
*/
int p = num_blocks >> 15 ;
dimGrid.x = num_blocks ;
if ( p ){
dimGrid.x = 1<<15 ;
dimGrid.y = p+1 ;
}
PFAC_status = PFAC_reduce_kernel_stage1( handle, d_input_string, input_size,
n_hat, num_blocks, dimBlock, dimGrid,
d_match_result, d_pos, d_nnz_per_block, h_num_matched );
if ( PFAC_STATUS_SUCCESS != PFAC_status ){
cudaFree(d_nnz_per_block);
return PFAC_STATUS_INTERNAL_ERROR ;
}
if ( 0 == *h_num_matched ){
cudaFree(d_nnz_per_block);
return PFAC_STATUS_SUCCESS;
}
/*
* stage 3: compression (d_match_result, d_pos) to working space (d_pos_zip, d_match_result_zip)
* by information of d_nnz_per_block
*
* after stage 3, d_nnz_per_block is useless
*/
cudaError_t cuda_status1 = cudaMalloc((void **) &d_pos_zip, (*h_num_matched)*sizeof(int) );
cudaError_t cuda_status2 = cudaMalloc((void **) &d_match_result_zip, (*h_num_matched)*sizeof(int) );
if ( (cudaSuccess != cuda_status1) || (cudaSuccess != cuda_status2) ){
if ( NULL != d_pos_zip ) { cudaFree(d_pos_zip); }
if ( NULL != d_match_result_zip ) { cudaFree(d_match_result_zip); }
cudaFree(d_nnz_per_block);
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
int elements_per_block = BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ;
zip_kernel<<< dimGrid, dimBlock >>>(d_pos, d_match_result, d_nnz_per_block,
num_blocks - 1, elements_per_block,
d_pos_zip, d_match_result_zip );
cuda_status = cudaGetLastError() ;
if ( cudaSuccess != cuda_status ){
cudaFree(d_pos_zip);
cudaFree(d_match_result_zip);
cudaFree(d_nnz_per_block);
return PFAC_STATUS_INTERNAL_ERROR ;
}
cudaFree(d_nnz_per_block);
/*
* stage 4: copy data back to d_pos and d_match_result
* we can write hand-copy kernel to copy (d_pos_zip, d_match_result)
* this should be efficient
*/
if ( NULL != h_pos ){
cuda_status1 = cudaMemcpy(h_pos, d_pos_zip, (*h_num_matched)*sizeof(int), cudaMemcpyDeviceToHost);
}else{
cuda_status1 = cudaMemcpy(d_pos, d_pos_zip, (*h_num_matched)*sizeof(int), cudaMemcpyDeviceToDevice);
}
if ( NULL != h_match_result ){
cuda_status2 = cudaMemcpy(h_match_result, d_match_result_zip, (*h_num_matched)*sizeof(int), cudaMemcpyDeviceToHost);
}else{
cuda_status2 = cudaMemcpy(d_match_result, d_match_result_zip, (*h_num_matched)*sizeof(int), cudaMemcpyDeviceToDevice);
}
if ( (cudaSuccess != cuda_status1) ||
(cudaSuccess != cuda_status2) )
{
cudaFree(d_pos_zip);
cudaFree(d_match_result_zip);
return PFAC_STATUS_INTERNAL_ERROR ;
}
cudaFree(d_pos_zip);
cudaFree(d_match_result_zip);
return PFAC_STATUS_SUCCESS;
}
/*
* stage 1: perform matching process and zip non-zero (matched thread) into continuous
* memory block and keep order. Morever nnz of each thread block is stored in d_nnz_per_block
*
* d_nnz_per_block[j] = nnz of thread block j
*
* since each thread block processes 1024 substrings, so range of d_nnz_per_block[j] is [0,1024]
*/
__host__ PFAC_status_t PFAC_reduce_kernel_stage1(
PFAC_handle_t handle,
int *d_input_string,
int input_size,
int n_hat,
int num_blocks,
dim3 dimBlock,
dim3 dimGrid,
int *d_match_result,
int *d_pos,
int *d_nnz_per_block,
int *h_num_matched )
{
cudaError_t cuda_status ;
PFAC_status_t pfac_status = PFAC_STATUS_SUCCESS;
int num_finalState = handle->numOfFinalStates;
int initial_state = handle->initial_state;
bool smem_on = ((4*EXTRA_SIZE_PER_TB-1) >= handle->maxPatternLen) ;
bool texture_on = (PFAC_TEXTURE_ON == handle->textureMode );
if ( texture_on ){
// #### lock mutex, only one thread can bind texture
pfac_status = PFAC_tex_mutex_lock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
textureReference *texRefTable ;
cudaGetTextureReference( (const struct textureReference**)&texRefTable, &tex_PFAC_table_reduce );
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int>();
// set texture parameters
tex_PFAC_table_reduce.addressMode[0] = cudaAddressModeClamp;
tex_PFAC_table_reduce.addressMode[1] = cudaAddressModeClamp;
tex_PFAC_table_reduce.filterMode = cudaFilterModePoint;
tex_PFAC_table_reduce.normalized = 0;
size_t offset ;
cuda_status = cudaBindTexture( &offset, (const struct textureReference*) texRefTable,
(const void*) handle->d_PFAC_table, (const struct cudaChannelFormatDesc*) &channelDesc, handle->sizeOfTableInBytes ) ;
// #### unlock mutex
pfac_status = PFAC_tex_mutex_unlock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
if ( cudaSuccess != cuda_status ){
PFAC_PRINTF("Error: cannot bind texture, %s\n", cudaGetErrorString(cuda_status) );
return PFAC_STATUS_CUDA_ALLOC_FAILED ;
}
if ( 0 != offset ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
}
if (smem_on) {
if ( texture_on ){
PFAC_reduce_kernel_device<1, 1> <<< dimGrid, dimBlock >>>( handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}else{
PFAC_reduce_kernel_device<0, 1> <<< dimGrid, dimBlock >>>( handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}
}else{
if ( texture_on ){
PFAC_reduce_kernel_device<1, 0> <<< dimGrid, dimBlock >>>( handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}else{
PFAC_reduce_kernel_device<0, 0> <<< dimGrid, dimBlock >>>( handle->d_PFAC_table,
d_input_string, input_size, n_hat, num_finalState, initial_state, num_blocks - 1,
d_pos, d_match_result, d_nnz_per_block );
}
}
cuda_status = cudaGetLastError() ;
if ( texture_on ){
// #### lock mutex, only one thread can unbind texture
pfac_status = PFAC_tex_mutex_lock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
cudaUnbindTexture(tex_PFAC_table_reduce);
// #### unlock mutex
pfac_status = PFAC_tex_mutex_unlock();
if ( PFAC_STATUS_SUCCESS != pfac_status ){
return pfac_status ;
}
}
if ( cudaSuccess != cuda_status ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
/*
* stage 2: use Thrust to do in-place prefix_sum( d_nnz_per_block[0:num_blocks-1] )
*
* after inclusive_scan, then
*
* d_nnz_per_block[j] = prefix_sum( d_nnz_per_block[0:j] )
*
* d_nnz_per_block[num_blocks-1] = total number of non-zero = h_num_matched
*
*/
thrust::device_ptr<int> dev_nnz_per_block ( d_nnz_per_block ) ;
thrust::inclusive_scan(dev_nnz_per_block, dev_nnz_per_block + num_blocks, dev_nnz_per_block );
cuda_status = cudaMemcpy( h_num_matched, d_nnz_per_block + num_blocks-1, sizeof(int), cudaMemcpyDeviceToHost) ;
if ( cudaSuccess != cuda_status ){
return PFAC_STATUS_INTERNAL_ERROR ;
}
return PFAC_STATUS_SUCCESS ;
}
__global__ void zip_kernel(
int *d_pos,
int *d_match_result,
int *d_nnz_per_block,
int num_blocks_minus1,
int elements_per_block,
int *d_pos_zip,
int *d_match_result_zip)
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
if ( gbid > num_blocks_minus1 ){
return ; // d_nnz_per_block[0:num_blocks-1]
}
int start = 0 ;
if ( 0 < gbid ){
start = d_nnz_per_block[gbid - 1] ;
}
int nnz = d_nnz_per_block[gbid] - start ;
int base = gbid * elements_per_block ;
for( int colIdx = tid ; colIdx < nnz ; colIdx += BLOCK_SIZE ){
d_pos_zip[ start + colIdx ] = d_pos[ base + colIdx ] ;
d_match_result_zip[ start + colIdx ] = d_match_result[ base + colIdx ] ;
}
}
/*
* (1) transition table of initial state is in the shared memory phi_s02s1
* we don't need to look up table in texture tex_PFAC_table
*
* (2) final states are reordered as 0, 1, 2, ..., k -1
* so state number < k (number of final states) means final state
*/
#define SUBSEG_MATCH( j, match ) \
pos = tid + j * BLOCK_SIZE ;\
if ( pos < bdy ){ \
inputChar = s_char[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < bdy ) { \
inputChar = s_char[pos]; \
state = tex_lookup(state, inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOSMEM( j, match ) \
pos = ( gbid * BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ) + tid + j * BLOCK_SIZE ;\
if ( pos < input_size ){ \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < input_size ) { \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = tex_lookup(state, inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOTEX( j, match ) \
pos = tid + j * BLOCK_SIZE ;\
if ( pos < bdy ){ \
inputChar = s_char[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < bdy ) { \
inputChar = s_char[pos]; \
state = *(d_PFAC_table + state*CHAR_SET + inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
#define SUBSEG_MATCH_NOSMEM_NOTEX( j, match ) \
pos = ( gbid * BLOCK_SIZE * NUM_INTS_PER_THREAD * 4 ) + tid + j * BLOCK_SIZE ;\
if ( pos < input_size ){ \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = phi_s02s1[ inputChar ]; \
if ( TRAP_STATE != state ){ \
if ( state <= num_finalState ){ \
match = state;\
} \
pos = pos + 1; \
while ( pos < input_size ) { \
inputChar = (unsigned char) char_d_input_string[pos]; \
state = *(d_PFAC_table + state*CHAR_SET + inputChar); \
if ( TRAP_STATE == state ){ break ;} \
if ( state <= num_finalState ){ \
match = state;\
}\
pos = pos + 1;\
}\
}\
}
// end macro
/*
* caller must reset working space s_Data first
*
* This device function comes from SDK/scan
*
* original code
* [code]
* //assuming size <= WARP_SIZE
* inline __device__ uint warpScanInclusive(uint idata, uint *s_Data, uint size){
* uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
* s_Data[pos] = 0;
* pos += size;
* s_Data[pos] = idata;
*
* for(uint offset = 1; offset < size; offset <<= 1)
* s_Data[pos] += s_Data[pos - offset];
*
* return s_Data[pos];
* }
* [/code]
*
* Question: one may wonder why volatile keyword is missing?
* nvcc 3.2 will keep "s_Data[pos] = ..." and cuobjdump shows
* [code]
* int pos = 2 * id - (id &31);
* s_Data[pos] = 0 ;
* pos += 32 ;
* s_Data[pos] = idata ;
* R0 = idata ;
* for( int offset = 1 ; offset < 32 ; offset <<= 1 ){
* R0 += s_Data[pos - offset];
* s_Data[pos] = R0 ;
* }
* return s_Data[pos];
* [/code]
*
* check http://forums.nvidia.com/index.php?showtopic=193730
*
*/
inline __device__ int warpScanInclusive(int idata, int id, int *s_Data)
{
int pos = 2 * id - (id &31);
// s_Data[pos] = 0 ;
pos += 32 ;
s_Data[pos] = idata ;
for( int offset = 1 ; offset < 32 ; offset <<= 1 ){
s_Data[pos] += s_Data[pos - offset];
}
return s_Data[pos];
}
#define MANUAL_EXPAND_2( X ) { X ; X ; }
#define MANUAL_EXPAND_4( X ) { MANUAL_EXPAND_2( MANUAL_EXPAND_2( X ) ) }
#define MANUAL_EXPAND_8( X ) { MANUAL_EXPAND_4( MANUAL_EXPAND_4( X ) ) }
/*
* resource usage
* sm20:
* 1) use smem
* 32 regs, 5120B smem, 96B cmem, => 1024 threads / SM
* 2) no smem
* 40 regs, 5120B smem, 96B cmem, => 768 threads / SM
* sm13:
* 1) use smem
* 24 regs, 5200B smem, 52B cmem, => 256 threads / SM
* 2) no smem
* 32 regs, 5200B smem, 52B cmem, => 256 threads / SM
*
* sm11:
* 1) use smem
* 24 regs, 5200B smem, 52B cmem, => 256 threads / SM
* 2) no smem
* 32 regs, 5200B smem, 8B cmem, => 256 threads / SM
*/
template <int TEXTURE_ON , int SMEM_ON >
__global__ void PFAC_reduce_kernel_device(
int *d_PFAC_table,
int *d_input_string,
int input_size,
int n_hat,
int num_finalState,
int initial_state,
int num_blocks_minus1,
int *d_pos,
int *d_match_result,
int *d_nnz_per_block )
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
int start ;
int pos;
int state;
int inputChar;
int match[4*NUM_INTS_PER_THREAD] ;
__shared__ int s_input[ BLOCK_SIZE*NUM_INTS_PER_THREAD*4];
__shared__ int phi_s02s1[ 256 ] ;
volatile unsigned char *s_char;
char * char_d_input_string ;
if ( gbid > num_blocks_minus1 ){
return ; // whole block is outside input stream
}
#pragma unroll
for(int i = 0 ; i < 4*NUM_INTS_PER_THREAD ; i++){
match[i] = 0 ;
}
// load transition table of initial state to shared memory
if ( TEXTURE_ON ){
#pragma unroll
for(int i = 0 ; i < BLOCK_SIZE_DIV_256 ; i++){
phi_s02s1[ tid + i*BLOCK_SIZE ] = tex_lookup(initial_state, tid + i*BLOCK_SIZE);
}
}else{
#pragma unroll
for(int i = 0 ; i < BLOCK_SIZE_DIV_256 ; i++){
phi_s02s1[ tid + i*BLOCK_SIZE ] = *(d_PFAC_table + initial_state*CHAR_SET + (tid + i*BLOCK_SIZE) );
}
}
#if BLOCK_SIZE < EXTRA_SIZE_PER_TB
#error BLOCK_SIZE should be bigger than EXTRA_SIZE_PER_TB
#endif
if ( SMEM_ON ){
// legal thread block which contains some input stream
s_char = (unsigned char *)s_input;
// read global data to shared memory
start = gbid * (BLOCK_SIZE*NUM_INTS_PER_THREAD) + tid ;
#pragma unroll
for(int i = 0 ; i < NUM_INTS_PER_THREAD ; i++){
if ( start < n_hat ){
s_input[tid + i*BLOCK_SIZE] = d_input_string[start];
}
start += BLOCK_SIZE ;
}
if ( (start < n_hat) && (tid < EXTRA_SIZE_PER_TB) ){
s_input[tid + NUM_INTS_PER_THREAD*BLOCK_SIZE] = d_input_string[start];
}
}
__syncthreads();
// bdy = number of legal characters starting at gbid*BLOCKSIZE*4
int bdy = input_size - gbid*(BLOCK_SIZE * NUM_INTS_PER_THREAD * 4);
#if 2 != NUM_INTS_PER_THREAD
#error NUM_INTS_PER_THREAD must be 2, or MANUAL_EXPAND_8 is wrong
#endif
if ( SMEM_ON ){
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOTEX(j, match[j]) ; j++ ;)
}
}else{
char_d_input_string = (char*)d_input_string ; // used only when SMEM_ON = 0
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOSMEM(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOSMEM_NOTEX(j, match[j]) ; j++ ;)
}
}
// matching is done, we can re-use shared memory s_input and phi_s02s1
// to do inclusive_scan
// we have 128 thread per block (4 warps per block) and each thread needs to
// process 8 (4*NUM_INTS_PER_THREAD) substrings. It is equivalent to say
// 4 x 8 = 32 warps processing 1024 substrings.
// if we concatenate match[j] to a linear array of 1024 entries, then
// acc_pos[j] of lane_id = number of non-zero of match[j] of thread k, k <= lane_id
// = prefix_sum( match[32*j:32*j+land_id] )
// acc_warp[j] is number of nonzero of match[32*j:32*j+31]
// = prefix_sum( match[32*j:32*j+31] )
//
// stage 1: inclusive scan inside a warp
int warp_id = tid >> 5 ;
int lane_id = tid & 31 ;
int acc_pos[4*NUM_INTS_PER_THREAD] ;
int *acc_warp = phi_s02s1 ; // alias acc_warp[32] to phi_s02s1
// reuse phi_s02s1
#if 32 != (NUM_WARPS_PER_BLOCK * 4*NUM_INTS_PER_THREAD)
#error 32 != (NUM_WARPS_PER_BLOCK * 4*NUM_INTS_PER_THREAD)
#endif
__syncthreads(); // s_input and phi_s02s1 can be re-used
#if 200 <= __CUDA_ARCH__
if ( 0 == warp_id ){
s_input[lane_id] = 0 ;
}
int k = 0 ;
unsigned int match_pattern ;
MANUAL_EXPAND_8( match_pattern = __ballot( match[k] > 0 ); \
match_pattern <<= (31-lane_id); \
acc_pos[k] = __popc(match_pattern); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
}\
k++ ; )
__syncthreads();
#else
// clear supplemet area of s_input
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int id = tid + k*BLOCK_SIZE ;
int pos = 2 * id - (id &31);
s_input[pos] = 0 ;
}
__syncthreads();
int k = 0 ;
int idata ;
MANUAL_EXPAND_4( idata = match[k] > 0 ; \
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
} \
k++ ; )
// __syncthreads(); // not necessary
k = 0 ;
MANUAL_EXPAND_4( idata = match[4+k] > 0 ; \
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;\
} \
k++ ; )
__syncthreads();
#endif
// stage 2: acc_pos[0:7] and acc_warp[0:31] are done, we can re-use s_input again
// s_input[32+j] = prefix_sum( acc_warp[0:j] )
// note that s_input[0:31] always keeps zero
if ( 0 == warp_id ){
warpScanInclusive(acc_warp[lane_id], lane_id, s_input);
}
__syncthreads();
// stage 3: s_input[32:63] contains information as
// s_input[32+j+1] - s_input[32+j] = nnz of warp j
// s_input[63] = prefix_sum( match[0:1023] )
// correct local position of each matched substring
// note that position starts from 0, so we need minus 1,
// for example, suppose acc_pos[0] of warp 0 is
// 1 1 1 2 3 ...
// then t0, t3, t4 match, and should write to position of matched result
// 0 0 0 1 2 ...
// d_match_result[ t0, t3, t4, ...]
// d_pos[ 0, 3, 4, ...]
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
acc_pos[j] += ( s_input[31 + warp_id + j * NUM_WARPS_PER_BLOCK] - 1 ) ;
}
int nnz = s_input[63];
__syncthreads();
// stage 4: all data are in acc_pos[] and match[], s_input can be reused again
// collect non-zero data to s_input, then do coalesced write
start = gbid * (BLOCK_SIZE * NUM_INTS_PER_THREAD * 4) ;
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
if ( match[j] ){
s_input[ acc_pos[j] ] = match[j];
}
}
__syncthreads();
for (int j = tid ; j < nnz; j+= BLOCK_SIZE ){
d_match_result[start + j ] = s_input[j] ;
}
__syncthreads();
#pragma unroll
for (int j = 0 ; j < 4*NUM_INTS_PER_THREAD ; j++ ){
if ( match[j] ){
s_input[ acc_pos[j] ] = start + tid + j * BLOCK_SIZE ;
}
}
__syncthreads();
for (int j = tid ; j < nnz; j+= BLOCK_SIZE ){
d_pos[start + j ] = s_input[j] ;
}
if ( 0 == tid ){
d_nnz_per_block[ gbid ] = nnz ;
}
}
/*
******************* technical note of PFAC_reduce_kernel_device:
----------------------------------------------------------------------------------------
1) nvcc uses lmem on following code, so
[code]
__syncthreads();
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int idata = match[k] > 0 ;
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input);
if ( 31 == lane_id ){
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;
}
}
#pragma unroll
for (int k = 0 ; k < 4 ; k++ ){
int idata = match[4+k] > 0 ;
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input);
if ( 31 == lane_id ){
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;
}
}
__syncthreads();
[/code]
is manually unrolled as
[code]
__syncthreads();
int k = 0 ;
int idata ;
MANUAL_EXPAND_4( idata = match[k] > 0 ; \
acc_pos[k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
} \
k++ ; )
// __syncthreads(); // not necessary
k = 0 ;
MANUAL_EXPAND_4( idata = match[4+k] > 0 ; \
acc_pos[4+k] = warpScanInclusive(idata, tid + k*BLOCK_SIZE, s_input); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + (4+k) * NUM_WARPS_PER_BLOCK ] = acc_pos[4+k] ;\
} \
k++ ; )
__syncthreads();
[/code]
---------------------------------------------------------------------------------
2) simplify following code
[code]
if ( TEXTURE_ON ){
SUBSEG_MATCH(0, match[0]) ;
SUBSEG_MATCH(1, match[1]) ;
SUBSEG_MATCH(2, match[2]) ;
SUBSEG_MATCH(3, match[3]) ;
#if 2 == NUM_INTS_PER_THREAD
SUBSEG_MATCH(4, match[4]) ;
SUBSEG_MATCH(5, match[5]) ;
SUBSEG_MATCH(6, match[6]) ;
SUBSEG_MATCH(7, match[7]) ;
#endif
}else{
SUBSEG_MATCH_NOTEX(0, match[0]) ;
SUBSEG_MATCH_NOTEX(1, match[1]) ;
SUBSEG_MATCH_NOTEX(2, match[2]) ;
SUBSEG_MATCH_NOTEX(3, match[3]) ;
#if 2 == NUM_INTS_PER_THREAD
SUBSEG_MATCH_NOTEX(4, match[4]) ;
SUBSEG_MATCH_NOTEX(5, match[5]) ;
SUBSEG_MATCH_NOTEX(6, match[6]) ;
SUBSEG_MATCH_NOTEX(7, match[7]) ;
#endif
}
[/code]
by compact macro
[code]
if ( TEXTURE_ON ){
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH(j, match[j]) ; j++ ; )
}else{
int j = 0 ;
MANUAL_EXPAND_8( SUBSEG_MATCH_NOTEX(j, match[j]) ; j++ ;)
}
[/code]
-----------------------------------------------------------------------------------------
3. optimization on Fermi
[code]
#pragma unroll
for(int k = 0 ; k < 4*NUM_INTS_PER_THREAD ; k++ ){
unsigned int match_pattern = __ballot( match[k] > 0 ) ;
match_pattern <<= (31 - lane_id);
acc_pos[k] = __popc(match_pattern) ;
if ( 31 == lane_id ){
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;
}
}
[/code]
becomes
[code]
int k = 0 ;
unsigned int match_pattern ;
MANUAL_EXPAND_8( match_pattern = __ballot( match[k] > 0 ); \
match_pattern <<= (31-lane_id); \
acc_pos[k] = __popc(match_pattern); \
if ( 31 == lane_id ){ \
acc_warp[ warp_id + k * NUM_WARPS_PER_BLOCK ] = acc_pos[k] ;\
}\
k++ ; )
[/code]
-----------------------------------------------------------------------------------------
*/
|
e36105e05e88a52c0386cf316d93c2ee36cf51b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "track_ellipse_kernel.h"
#include "misc_math.h"
// #include <cutil.h>
// Constants used in the MGVF computation
#define ONE_OVER_PI (1.0 / PI)
#define MU 0.5
#define LAMBDA (8.0 * MU + 1.0)
// Host and device arrays to hold device pointers to input matrices
float **host_I_array, **host_IMGVF_array;
float **device_I_array, **device_IMGVF_array;
// Host and device arrays to hold sizes of input matrices
int *host_m_array, *host_n_array;
int *device_m_array, *device_n_array;
// Host array to hold matrices for all cells
// (so we can copy to and from the device in a single transfer)
float *host_I_all;
int total_mem_size;
// The number of threads per thread block
const int threads_per_block = 320;
// next_lowest_power_of_two = 2^(floor(log2(threads_per_block)))
const int next_lowest_power_of_two = 256;
// Regularized version of the Heaviside step function:
// He(x) = (atan(x) / pi) + 0.5
__device__ float heaviside(float x) {
return (atan(x) * ONE_OVER_PI) + 0.5;
// A simpler, faster approximation of the Heaviside function
/* float out = 0.0;
if (x > -0.0001) out = 0.5;
if (x > 0.0001) out = 1.0;
return out; */
}
// Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple cells
__global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array, int *n_array,
float vx, float vy, float e, int max_iterations, float cutoff) {
// Shared copy of the matrix being computed
__shared__ float IMGVF[41 * 81];
// Shared buffer used for two purposes:
// 1) To temporarily store newly computed matrix values so that only
// values from the previous iteration are used in the computation.
// 2) To store partial sums during the tree reduction which is performed
// at the end of each iteration to determine if the computation has converged.
__shared__ float buffer[threads_per_block];
// Figure out which cell this thread block is working on
int cell_num = blockIdx.x;
// Get pointers to current cell's input image and inital matrix
float *IMGVF_global = IMGVF_array[cell_num];
float *I = I_array[cell_num];
// Get current cell's matrix dimensions
int m = m_array[cell_num];
int n = n_array[cell_num];
// Compute the number of virtual thread blocks
int max = (m * n + threads_per_block - 1) / threads_per_block;
// Load the initial IMGVF matrix into shared memory
int thread_id = threadIdx.x, thread_block, i, j;
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m) IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j];
}
__syncthreads();
// Set the converged flag to false
__shared__ int cell_converged;
if (threadIdx.x == 0) cell_converged = 0;
__syncthreads();
// Constants used to iterate through virtual thread blocks
const float one_nth = 1.f / (float) n;
const int tid_mod = thread_id % n;
const int tbsize_mod = threads_per_block % n;
// Constant used in the computation of Heaviside values
float one_over_e = 1.0 / e;
// Iteratively compute the IMGVF matrix until the computation has
// converged or we have reached the maximum number of iterations
int iterations = 0;
while ((! cell_converged) && (iterations < max_iterations)) {
// The total change to this thread's matrix elements in the current iteration
float total_diff = 0.0f;
int old_i = 0, old_j = 0;
j = tid_mod - tbsize_mod;
// Iterate over virtual thread blocks
for (thread_block = 0; thread_block < max; thread_block++) {
// Store the index of this thread's previous matrix element
// (used in the buffering scheme below)
old_i = i;
old_j = j;
// Determine the index of this thread's current matrix element
int offset = thread_block * threads_per_block;
i = (thread_id + offset) * one_nth;
j += tbsize_mod;
if (j >= n) j -= n;
float new_val = 0.0, old_val = 0.0;
// Make sure the thread has not gone off the end of the matrix
if (i < m) {
// Compute neighboring matrix element indices
int rowU = (i == 0) ? 0 : i - 1;
int rowD = (i == m - 1) ? m - 1 : i + 1;
int colL = (j == 0) ? 0 : j - 1;
int colR = (j == n - 1) ? n - 1 : j + 1;
// Compute the difference between the matrix element and its eight neighbors
old_val = IMGVF[(i * n) + j];
float U = IMGVF[(rowU * n) + j ] - old_val;
float D = IMGVF[(rowD * n) + j ] - old_val;
float L = IMGVF[(i * n) + colL] - old_val;
float R = IMGVF[(i * n) + colR] - old_val;
float UR = IMGVF[(rowU * n) + colR] - old_val;
float DR = IMGVF[(rowD * n) + colR] - old_val;
float UL = IMGVF[(rowU * n) + colL] - old_val;
float DL = IMGVF[(rowD * n) + colL] - old_val;
// Compute the regularized heaviside value for these differences
float UHe = heaviside((U * -vy) * one_over_e);
float DHe = heaviside((D * vy) * one_over_e);
float LHe = heaviside((L * -vx ) * one_over_e);
float RHe = heaviside((R * vx ) * one_over_e);
float URHe = heaviside((UR * ( vx - vy)) * one_over_e);
float DRHe = heaviside((DR * ( vx + vy)) * one_over_e);
float ULHe = heaviside((UL * (-vx - vy)) * one_over_e);
float DLHe = heaviside((DL * (-vx + vy)) * one_over_e);
// Update the IMGVF value in two steps:
// 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe .*L + RHe .*R +
// URHe.*UR + DRHe.*DR + ULHe.*UL + DLHe.*DL);
new_val = old_val + (MU / LAMBDA) * (UHe * U + DHe * D + LHe * L + RHe * R +
URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL);
// 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I))
float vI = I[(i * n) + j];
new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI));
}
// Save the previous virtual thread block's value (if it exists)
if (thread_block > 0) {
offset = (thread_block - 1) * threads_per_block;
if (old_i < m) IMGVF[(old_i * n) + old_j] = buffer[thread_id];
}
if (thread_block < max - 1) {
// Write the new value to the buffer
buffer[thread_id] = new_val;
} else {
// We've reached the final virtual thread block,
// so write directly to the matrix
if (i < m) IMGVF[(i * n) + j] = new_val;
}
// Keep track of the total change of this thread's matrix elements
total_diff += fabs(new_val - old_val);
// We need to synchronize between virtual thread blocks to prevent
// threads from writing the values from the buffer to the actual
// IMGVF matrix too early
__syncthreads();
}
// We need to compute the overall sum of the change at each matrix element
// by performing a tree reduction across the whole threadblock
buffer[thread_id] = total_diff;
__syncthreads();
// Account for thread block sizes that are not a power of 2
if (thread_id >= next_lowest_power_of_two) {
buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id];
}
__syncthreads();
// Perform the tree reduction
int th;
for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) {
if (thread_id < th) {
buffer[thread_id] += buffer[thread_id + th];
}
__syncthreads();
}
// Figure out if we have converged
if(thread_id == 0) {
float mean = buffer[thread_id] / (float) (m * n);
if (mean < cutoff) {
// We have converged, so set the appropriate flag
cell_converged = 1;
}
}
// We need to synchronize to ensure that all threads
// read the correct value of the convergence flag
__syncthreads();
// Keep track of the number of iterations we have performed
iterations++;
}
// Save the final IMGVF matrix to global memory
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m) IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j];
}
}
// Host function that launches a CUDA kernel to compute the MGVF matrices for the specified cells
void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e, int max_iterations, double cutoff, int num_cells) {
// Initialize the data on the GPU
IMGVF_cuda_init(I, num_cells);
// Compute the MGVF on the GPU
hipLaunchKernelGGL(( IMGVF_kernel) , dim3(num_cells), dim3(threads_per_block) , 0, 0,
device_IMGVF_array, device_I_array, device_m_array, device_n_array,
(float) vx, (float) vy, (float) e, max_iterations, (float) cutoff );
// Check for kernel errors
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("MGVF kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy back the final results from the GPU
IMGVF_cuda_cleanup(IMGVF, num_cells);
}
// Initializes data on the GPU for the MGVF kernel
void IMGVF_cuda_init(MAT **IE, int num_cells) {
// Allocate arrays of pointers to device memory
host_I_array = (float **) malloc(sizeof(float *) * num_cells);
host_IMGVF_array = (float **) malloc(sizeof(float *) * num_cells);
hipMalloc( (void**) &device_I_array, num_cells * sizeof(float *));
hipMalloc( (void**) &device_IMGVF_array, num_cells * sizeof(float *));
// Allocate arrays of memory dimensions
host_m_array = (int *) malloc(sizeof(int) * num_cells);
host_n_array = (int *) malloc(sizeof(int) * num_cells);
hipMalloc( (void**) &device_m_array, num_cells * sizeof(int));
hipMalloc( (void**) &device_n_array, num_cells * sizeof(int));
// Figure out the size of all of the matrices combined
int i, j, cell_num;
int total_size = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
int size = I->m * I->n;
total_size += size;
}
total_mem_size = total_size * sizeof(float);
// Allocate host memory just once for all cells
host_I_all = (float *) malloc(total_mem_size);
// Allocate device memory just once for all cells
float *device_I_all, *device_IMGVF_all;
hipMalloc( (void**) &device_I_all, total_mem_size);
hipMalloc( (void**) &device_IMGVF_all, total_mem_size);
// Copy each initial matrix into the allocated host memory
int offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
// Determine the size of the matrix
int m = I->m, n = I->n;
int size = m * n;
// Store memory dimensions
host_m_array[cell_num] = m;
host_n_array[cell_num] = n;
// Store pointers to allocated memory
float *device_I = &(device_I_all[offset]);
float *device_IMGVF = &(device_IMGVF_all[offset]);
host_I_array[cell_num] = device_I;
host_IMGVF_array[cell_num] = device_IMGVF;
// Copy matrix I (which is also the initial IMGVF matrix) into the overall array
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
host_I_all[offset + (i * n) + j] = (float) m_get_val(I, i, j);
offset += size;
}
// Copy I matrices (which are also the initial IMGVF matrices) to device
hipMemcpy(device_I_all, host_I_all, total_mem_size, hipMemcpyHostToDevice);
hipMemcpy(device_IMGVF_all, host_I_all, total_mem_size, hipMemcpyHostToDevice);
// Copy pointer arrays to device
hipMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *), hipMemcpyHostToDevice);
hipMemcpy(device_IMGVF_array, host_IMGVF_array, num_cells * sizeof(float *), hipMemcpyHostToDevice);
// Copy memory dimension arrays to device
hipMemcpy(device_m_array, host_m_array, num_cells * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_n_array, host_n_array, num_cells * sizeof(int), hipMemcpyHostToDevice);
}
// Copies the results of the MGVF kernel back to the host
void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) {
// Copy the result matrices from the device to the host
hipMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size, hipMemcpyDeviceToHost);
// Copy each result matrix into its appropriate host matrix
int cell_num, offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *IMGVF_out = IMGVF_out_array[cell_num];
// Determine the size of the matrix
int m = IMGVF_out->m, n = IMGVF_out->n, i, j;
// Pack the result into the matrix
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
m_set_val(IMGVF_out, i, j, (double) host_I_all[offset + (i * n) + j]);
offset += (m * n);
}
// Free device memory
hipFree(device_m_array);
hipFree(device_n_array);
hipFree(device_IMGVF_array);
hipFree(device_I_array);
hipFree(host_IMGVF_array[0]);
hipFree(host_I_array[0]);
// Free host memory
free(host_m_array);
free(host_n_array);
free(host_IMGVF_array);
free(host_I_array);
free(host_I_all);
}
|
e36105e05e88a52c0386cf316d93c2ee36cf51b0.cu
|
#include "track_ellipse_kernel.h"
#include "misc_math.h"
// #include <cutil.h>
// Constants used in the MGVF computation
#define ONE_OVER_PI (1.0 / PI)
#define MU 0.5
#define LAMBDA (8.0 * MU + 1.0)
// Host and device arrays to hold device pointers to input matrices
float **host_I_array, **host_IMGVF_array;
float **device_I_array, **device_IMGVF_array;
// Host and device arrays to hold sizes of input matrices
int *host_m_array, *host_n_array;
int *device_m_array, *device_n_array;
// Host array to hold matrices for all cells
// (so we can copy to and from the device in a single transfer)
float *host_I_all;
int total_mem_size;
// The number of threads per thread block
const int threads_per_block = 320;
// next_lowest_power_of_two = 2^(floor(log2(threads_per_block)))
const int next_lowest_power_of_two = 256;
// Regularized version of the Heaviside step function:
// He(x) = (atan(x) / pi) + 0.5
__device__ float heaviside(float x) {
return (atan(x) * ONE_OVER_PI) + 0.5;
// A simpler, faster approximation of the Heaviside function
/* float out = 0.0;
if (x > -0.0001) out = 0.5;
if (x > 0.0001) out = 1.0;
return out; */
}
// Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple cells
__global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array, int *n_array,
float vx, float vy, float e, int max_iterations, float cutoff) {
// Shared copy of the matrix being computed
__shared__ float IMGVF[41 * 81];
// Shared buffer used for two purposes:
// 1) To temporarily store newly computed matrix values so that only
// values from the previous iteration are used in the computation.
// 2) To store partial sums during the tree reduction which is performed
// at the end of each iteration to determine if the computation has converged.
__shared__ float buffer[threads_per_block];
// Figure out which cell this thread block is working on
int cell_num = blockIdx.x;
// Get pointers to current cell's input image and inital matrix
float *IMGVF_global = IMGVF_array[cell_num];
float *I = I_array[cell_num];
// Get current cell's matrix dimensions
int m = m_array[cell_num];
int n = n_array[cell_num];
// Compute the number of virtual thread blocks
int max = (m * n + threads_per_block - 1) / threads_per_block;
// Load the initial IMGVF matrix into shared memory
int thread_id = threadIdx.x, thread_block, i, j;
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m) IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j];
}
__syncthreads();
// Set the converged flag to false
__shared__ int cell_converged;
if (threadIdx.x == 0) cell_converged = 0;
__syncthreads();
// Constants used to iterate through virtual thread blocks
const float one_nth = 1.f / (float) n;
const int tid_mod = thread_id % n;
const int tbsize_mod = threads_per_block % n;
// Constant used in the computation of Heaviside values
float one_over_e = 1.0 / e;
// Iteratively compute the IMGVF matrix until the computation has
// converged or we have reached the maximum number of iterations
int iterations = 0;
while ((! cell_converged) && (iterations < max_iterations)) {
// The total change to this thread's matrix elements in the current iteration
float total_diff = 0.0f;
int old_i = 0, old_j = 0;
j = tid_mod - tbsize_mod;
// Iterate over virtual thread blocks
for (thread_block = 0; thread_block < max; thread_block++) {
// Store the index of this thread's previous matrix element
// (used in the buffering scheme below)
old_i = i;
old_j = j;
// Determine the index of this thread's current matrix element
int offset = thread_block * threads_per_block;
i = (thread_id + offset) * one_nth;
j += tbsize_mod;
if (j >= n) j -= n;
float new_val = 0.0, old_val = 0.0;
// Make sure the thread has not gone off the end of the matrix
if (i < m) {
// Compute neighboring matrix element indices
int rowU = (i == 0) ? 0 : i - 1;
int rowD = (i == m - 1) ? m - 1 : i + 1;
int colL = (j == 0) ? 0 : j - 1;
int colR = (j == n - 1) ? n - 1 : j + 1;
// Compute the difference between the matrix element and its eight neighbors
old_val = IMGVF[(i * n) + j];
float U = IMGVF[(rowU * n) + j ] - old_val;
float D = IMGVF[(rowD * n) + j ] - old_val;
float L = IMGVF[(i * n) + colL] - old_val;
float R = IMGVF[(i * n) + colR] - old_val;
float UR = IMGVF[(rowU * n) + colR] - old_val;
float DR = IMGVF[(rowD * n) + colR] - old_val;
float UL = IMGVF[(rowU * n) + colL] - old_val;
float DL = IMGVF[(rowD * n) + colL] - old_val;
// Compute the regularized heaviside value for these differences
float UHe = heaviside((U * -vy) * one_over_e);
float DHe = heaviside((D * vy) * one_over_e);
float LHe = heaviside((L * -vx ) * one_over_e);
float RHe = heaviside((R * vx ) * one_over_e);
float URHe = heaviside((UR * ( vx - vy)) * one_over_e);
float DRHe = heaviside((DR * ( vx + vy)) * one_over_e);
float ULHe = heaviside((UL * (-vx - vy)) * one_over_e);
float DLHe = heaviside((DL * (-vx + vy)) * one_over_e);
// Update the IMGVF value in two steps:
// 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe .*L + RHe .*R +
// URHe.*UR + DRHe.*DR + ULHe.*UL + DLHe.*DL);
new_val = old_val + (MU / LAMBDA) * (UHe * U + DHe * D + LHe * L + RHe * R +
URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL);
// 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I))
float vI = I[(i * n) + j];
new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI));
}
// Save the previous virtual thread block's value (if it exists)
if (thread_block > 0) {
offset = (thread_block - 1) * threads_per_block;
if (old_i < m) IMGVF[(old_i * n) + old_j] = buffer[thread_id];
}
if (thread_block < max - 1) {
// Write the new value to the buffer
buffer[thread_id] = new_val;
} else {
// We've reached the final virtual thread block,
// so write directly to the matrix
if (i < m) IMGVF[(i * n) + j] = new_val;
}
// Keep track of the total change of this thread's matrix elements
total_diff += fabs(new_val - old_val);
// We need to synchronize between virtual thread blocks to prevent
// threads from writing the values from the buffer to the actual
// IMGVF matrix too early
__syncthreads();
}
// We need to compute the overall sum of the change at each matrix element
// by performing a tree reduction across the whole threadblock
buffer[thread_id] = total_diff;
__syncthreads();
// Account for thread block sizes that are not a power of 2
if (thread_id >= next_lowest_power_of_two) {
buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id];
}
__syncthreads();
// Perform the tree reduction
int th;
for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) {
if (thread_id < th) {
buffer[thread_id] += buffer[thread_id + th];
}
__syncthreads();
}
// Figure out if we have converged
if(thread_id == 0) {
float mean = buffer[thread_id] / (float) (m * n);
if (mean < cutoff) {
// We have converged, so set the appropriate flag
cell_converged = 1;
}
}
// We need to synchronize to ensure that all threads
// read the correct value of the convergence flag
__syncthreads();
// Keep track of the number of iterations we have performed
iterations++;
}
// Save the final IMGVF matrix to global memory
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m) IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j];
}
}
// Host function that launches a CUDA kernel to compute the MGVF matrices for the specified cells
void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e, int max_iterations, double cutoff, int num_cells) {
// Initialize the data on the GPU
IMGVF_cuda_init(I, num_cells);
// Compute the MGVF on the GPU
IMGVF_kernel <<< num_cells, threads_per_block >>>
( device_IMGVF_array, device_I_array, device_m_array, device_n_array,
(float) vx, (float) vy, (float) e, max_iterations, (float) cutoff );
// Check for kernel errors
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("MGVF kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy back the final results from the GPU
IMGVF_cuda_cleanup(IMGVF, num_cells);
}
// Initializes data on the GPU for the MGVF kernel
void IMGVF_cuda_init(MAT **IE, int num_cells) {
// Allocate arrays of pointers to device memory
host_I_array = (float **) malloc(sizeof(float *) * num_cells);
host_IMGVF_array = (float **) malloc(sizeof(float *) * num_cells);
cudaMalloc( (void**) &device_I_array, num_cells * sizeof(float *));
cudaMalloc( (void**) &device_IMGVF_array, num_cells * sizeof(float *));
// Allocate arrays of memory dimensions
host_m_array = (int *) malloc(sizeof(int) * num_cells);
host_n_array = (int *) malloc(sizeof(int) * num_cells);
cudaMalloc( (void**) &device_m_array, num_cells * sizeof(int));
cudaMalloc( (void**) &device_n_array, num_cells * sizeof(int));
// Figure out the size of all of the matrices combined
int i, j, cell_num;
int total_size = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
int size = I->m * I->n;
total_size += size;
}
total_mem_size = total_size * sizeof(float);
// Allocate host memory just once for all cells
host_I_all = (float *) malloc(total_mem_size);
// Allocate device memory just once for all cells
float *device_I_all, *device_IMGVF_all;
cudaMalloc( (void**) &device_I_all, total_mem_size);
cudaMalloc( (void**) &device_IMGVF_all, total_mem_size);
// Copy each initial matrix into the allocated host memory
int offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
// Determine the size of the matrix
int m = I->m, n = I->n;
int size = m * n;
// Store memory dimensions
host_m_array[cell_num] = m;
host_n_array[cell_num] = n;
// Store pointers to allocated memory
float *device_I = &(device_I_all[offset]);
float *device_IMGVF = &(device_IMGVF_all[offset]);
host_I_array[cell_num] = device_I;
host_IMGVF_array[cell_num] = device_IMGVF;
// Copy matrix I (which is also the initial IMGVF matrix) into the overall array
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
host_I_all[offset + (i * n) + j] = (float) m_get_val(I, i, j);
offset += size;
}
// Copy I matrices (which are also the initial IMGVF matrices) to device
cudaMemcpy(device_I_all, host_I_all, total_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(device_IMGVF_all, host_I_all, total_mem_size, cudaMemcpyHostToDevice);
// Copy pointer arrays to device
cudaMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *), cudaMemcpyHostToDevice);
cudaMemcpy(device_IMGVF_array, host_IMGVF_array, num_cells * sizeof(float *), cudaMemcpyHostToDevice);
// Copy memory dimension arrays to device
cudaMemcpy(device_m_array, host_m_array, num_cells * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_n_array, host_n_array, num_cells * sizeof(int), cudaMemcpyHostToDevice);
}
// Copies the results of the MGVF kernel back to the host
void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) {
// Copy the result matrices from the device to the host
cudaMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size, cudaMemcpyDeviceToHost);
// Copy each result matrix into its appropriate host matrix
int cell_num, offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *IMGVF_out = IMGVF_out_array[cell_num];
// Determine the size of the matrix
int m = IMGVF_out->m, n = IMGVF_out->n, i, j;
// Pack the result into the matrix
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
m_set_val(IMGVF_out, i, j, (double) host_I_all[offset + (i * n) + j]);
offset += (m * n);
}
// Free device memory
cudaFree(device_m_array);
cudaFree(device_n_array);
cudaFree(device_IMGVF_array);
cudaFree(device_I_array);
cudaFree(host_IMGVF_array[0]);
cudaFree(host_I_array[0]);
// Free host memory
free(host_m_array);
free(host_n_array);
free(host_IMGVF_array);
free(host_I_array);
free(host_I_all);
}
|
c226e39836e95a99833388e5fbfaeb9e9e4669a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_mul (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
a[offset_a + gid_0 + gid_1 * ld_a] * b[offset_b + gid_0 + gid_1 * ld_b];
}
}
|
c226e39836e95a99833388e5fbfaeb9e9e4669a1.cu
|
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_mul (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
a[offset_a + gid_0 + gid_1 * ld_a] * b[offset_b + gid_0 + gid_1 * ld_b];
}
}
|
f5215778b183c75d92cb3f2e08793b1cf489a187.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void device_len_dot ()
{
__shared__ float partial_len[REDUC_THREADS], partial_dot[REDUC_THREADS] ;
int i, n, index ;
float sum_len, sum_dot ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
sum_len = sum_dot = 0.0f ;
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
sum_len += d_w_grad[i] * d_w_grad[i] ;
sum_dot += d_w_grad[i] * d_prev_grad[i] ;
d_prev_grad[i] = d_w_grad[i] ;
}
partial_len[index] = sum_len ;
partial_dot[index] = sum_dot ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
partial_len[index] += partial_len[index+i] ;
partial_dot[index] += partial_dot[index+i] ;
}
__syncthreads() ;
}
if (index == 0) {
d_len_out[blockIdx.x] = partial_len[0] ;
d_dot_out[blockIdx.x] = partial_dot[0] ;
}
}
|
f5215778b183c75d92cb3f2e08793b1cf489a187.cu
|
#include "includes.h"
__global__ void device_len_dot ()
{
__shared__ float partial_len[REDUC_THREADS], partial_dot[REDUC_THREADS] ;
int i, n, index ;
float sum_len, sum_dot ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
sum_len = sum_dot = 0.0f ;
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
sum_len += d_w_grad[i] * d_w_grad[i] ;
sum_dot += d_w_grad[i] * d_prev_grad[i] ;
d_prev_grad[i] = d_w_grad[i] ;
}
partial_len[index] = sum_len ;
partial_dot[index] = sum_dot ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
partial_len[index] += partial_len[index+i] ;
partial_dot[index] += partial_dot[index+i] ;
}
__syncthreads() ;
}
if (index == 0) {
d_len_out[blockIdx.x] = partial_len[0] ;
d_dot_out[blockIdx.x] = partial_dot[0] ;
}
}
|
69d4dbdac5eee3e7e46b25ec3914e89dda5b624a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectDownSampleImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devided_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (std::isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectDownSample(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectDownSampleImpl(*this, *output, inv_indices);
} else {
SelectDownSampleImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_iterator(sorted_points.begin()),
make_tuple_iterator(output->points_.begin()));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->colors_.resize(n_out);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
output->colors_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectDownSample(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectDownSample(indices), indices);
}
|
69d4dbdac5eee3e7e46b25ec3914e89dda5b624a.cu
|
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectDownSampleImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devided_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (std::isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectDownSample(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectDownSampleImpl(*this, *output, inv_indices);
} else {
SelectDownSampleImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_iterator(sorted_points.begin()),
make_tuple_iterator(output->points_.begin()));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->colors_.resize(n_out);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
output->colors_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectDownSample(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectDownSample(indices), indices);
}
|
5f2774d8f2f9a1b1156f5d9f3041cd5b4a950d20.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
// measure time
#include <windows.h>
#include <time.h>
#include <Constants.h>
#include <ComputationalModel.h>
#include <ArrayAdditionModel.h>
#include <DotMultiplicationModel.h>
#include <MatrixMultiplicationModel.h>
#include <random>
#include <string>
#include <thread>
#include <future>
#include<cmath>
using namespace std;
int main()
{
LARGE_INTEGER start, stop, clockFreq;
ofstream outfile;
QueryPerformanceFrequency(&clockFreq);
double delay;
int elapsedTime;
// ---Random Seed Value---
srand(5);
MatrixMultiplicationModel<numericalType1> matmulmodel(4);
matmulmodel.clearLogs(); // empty the performance matrix log file
const int step = 32;
const int levels = 3;
const int spaceLength = pow(levels, 3);
myDim3**matrixSpace = new myDim3*[spaceLength];
int lengthX, lengthY, lengthZ, counter = 0;
for (int l = step; l <= levels * step; l += step)
for (int m = step; m <= levels * step; m += step)
for (int n = step; n <= levels * step; n += step) {
matrixSpace[counter++] = new myDim3(l,m,n);
}
// for (int space = 0; space < spaceLength; space++) {
// cout << matrixSpace[space].x << ", " << matrixSpace[space].y << ", " << matrixSpace[space].z << endl;
// }
numericalType1** arraySet1 = new numericalType1 * [EXPERIMENT_COUNT];
numericalType1** arraySet2 = new numericalType1 * [EXPERIMENT_COUNT];
int x, k, fileNum, length, widthCount, width;
int arrayLength[EXPERIMENT_COUNT];
myDim3* dimension;
numericalType1* mat1, * mat2;
numericalType1* matOut;
bool iSmall;
switch (INPUT_NATURE) {
case 1:
/*********Generate Aligned Binary Input Stream*********/
widthCount = 0, width = rand() % MAX_WIDTH_ALIGNED + 1;
iSmall = true;
for (x = 0; x < spaceLength; x++) {
if (++widthCount > width) {
//cout << "width: " << width << endl << endl;
widthCount = 0;
width = rand() % (MAX_WIDTH_ALIGNED - MIN_WIDTH_ALIGNED) + MIN_WIDTH_ALIGNED;
iSmall = !iSmall;
}
if (iSmall) dimension = matrixSpace[0];
else dimension = matrixSpace[4];
//cout << "length: " << length << endl;
int l1 = matrixSpace[x]->x * matrixSpace[x]->y, l2 = matrixSpace[x]->z * matrixSpace[x]->y, l3 = matrixSpace[x]->x * matrixSpace[x]->z;
numericalType1* temp1 = new numericalType1[l1];
numericalType1* temp2 = new numericalType1[l2];
matOut = new numericalType1[l3];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < l1; k++)
temp1[k] = rand() % RANGE_OF_INT_VALUES;
for (k = 0; k < l2; k++)
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
break;
case 2:
// *********Generate Aligned Square Wave Input Stream*********
widthCount = 0, width = rand() % MAX_WIDTH_ALIGNED + 1;
iSmall = true;
for (x = 0; x < EXPERIMENT_COUNT; x++) {
if (++widthCount > width) {
//cout << "width: " << width << endl << endl;
widthCount = 0;
width = rand() % (MAX_WIDTH_ALIGNED - MIN_WIDTH_ALIGNED) + MIN_WIDTH_ALIGNED;
iSmall = !iSmall;
}
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 3:
/*********Generate Odd Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 4:
/*********Generate GPU Specific Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % (ARRAY_MAX_LENGTH - SMALL_ARRAY_MAX_LENGTH) + SMALL_ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 5:
/*********Generate CPU Specific Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % SMALL_ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
}
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging(2);
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedTimeGPU = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging(1);
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedTimeCPU = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging();
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedAutoTime = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeByMLAndLogging();
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedML = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
cout << "CPU:\t" << elapsedTimeCPU << "\tGPU:\t" << elapsedTimeGPU << "\tSelfFlow:\t" << elapsedAutoTime<< "\tML Flow:\t" << elapsedML << endl;
for (int ex = 0; ex < spaceLength; ex++) {
free(arraySet1[ex]);
free(arraySet2[ex]);
}
free(matrixSpace);
free(matOut);
return 0;
}
|
5f2774d8f2f9a1b1156f5d9f3041cd5b4a950d20.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
// measure time
#include <windows.h>
#include <time.h>
#include <Constants.h>
#include <ComputationalModel.h>
#include <ArrayAdditionModel.h>
#include <DotMultiplicationModel.h>
#include <MatrixMultiplicationModel.h>
#include <random>
#include <string>
#include <thread>
#include <future>
#include<cmath>
using namespace std;
int main()
{
LARGE_INTEGER start, stop, clockFreq;
ofstream outfile;
QueryPerformanceFrequency(&clockFreq);
double delay;
int elapsedTime;
// ---Random Seed Value---
srand(5);
MatrixMultiplicationModel<numericalType1> matmulmodel(4);
matmulmodel.clearLogs(); // empty the performance matrix log file
const int step = 32;
const int levels = 3;
const int spaceLength = pow(levels, 3);
myDim3**matrixSpace = new myDim3*[spaceLength];
int lengthX, lengthY, lengthZ, counter = 0;
for (int l = step; l <= levels * step; l += step)
for (int m = step; m <= levels * step; m += step)
for (int n = step; n <= levels * step; n += step) {
matrixSpace[counter++] = new myDim3(l,m,n);
}
// for (int space = 0; space < spaceLength; space++) {
// cout << matrixSpace[space].x << ", " << matrixSpace[space].y << ", " << matrixSpace[space].z << endl;
// }
numericalType1** arraySet1 = new numericalType1 * [EXPERIMENT_COUNT];
numericalType1** arraySet2 = new numericalType1 * [EXPERIMENT_COUNT];
int x, k, fileNum, length, widthCount, width;
int arrayLength[EXPERIMENT_COUNT];
myDim3* dimension;
numericalType1* mat1, * mat2;
numericalType1* matOut;
bool iSmall;
switch (INPUT_NATURE) {
case 1:
/*********Generate Aligned Binary Input Stream*********/
widthCount = 0, width = rand() % MAX_WIDTH_ALIGNED + 1;
iSmall = true;
for (x = 0; x < spaceLength; x++) {
if (++widthCount > width) {
//cout << "width: " << width << endl << endl;
widthCount = 0;
width = rand() % (MAX_WIDTH_ALIGNED - MIN_WIDTH_ALIGNED) + MIN_WIDTH_ALIGNED;
iSmall = !iSmall;
}
if (iSmall) dimension = matrixSpace[0];
else dimension = matrixSpace[4];
//cout << "length: " << length << endl;
int l1 = matrixSpace[x]->x * matrixSpace[x]->y, l2 = matrixSpace[x]->z * matrixSpace[x]->y, l3 = matrixSpace[x]->x * matrixSpace[x]->z;
numericalType1* temp1 = new numericalType1[l1];
numericalType1* temp2 = new numericalType1[l2];
matOut = new numericalType1[l3];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < l1; k++)
temp1[k] = rand() % RANGE_OF_INT_VALUES;
for (k = 0; k < l2; k++)
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
break;
case 2:
// *********Generate Aligned Square Wave Input Stream*********
widthCount = 0, width = rand() % MAX_WIDTH_ALIGNED + 1;
iSmall = true;
for (x = 0; x < EXPERIMENT_COUNT; x++) {
if (++widthCount > width) {
//cout << "width: " << width << endl << endl;
widthCount = 0;
width = rand() % (MAX_WIDTH_ALIGNED - MIN_WIDTH_ALIGNED) + MIN_WIDTH_ALIGNED;
iSmall = !iSmall;
}
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 3:
/*********Generate Odd Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 4:
/*********Generate GPU Specific Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % (ARRAY_MAX_LENGTH - SMALL_ARRAY_MAX_LENGTH) + SMALL_ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
case 5:
/*********Generate CPU Specific Input Stream*********/
for (x = 0; x < EXPERIMENT_COUNT; x++) {
length = rand() % SMALL_ARRAY_MAX_LENGTH + 1;
//cout << "length: " << length << endl;
arrayLength[x] = length;
numericalType1* temp1 = new numericalType1[length];
numericalType1* temp2 = new numericalType1[length];
arraySet1[x] = temp1;
arraySet2[x] = temp2;
for (k = 0; k < length; k++) {
temp1[k] = rand() % RANGE_OF_INT_VALUES;
temp2[k] = rand() % RANGE_OF_INT_VALUES;
}
}
break;
}
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging(2);
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedTimeGPU = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging(1);
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedTimeCPU = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeAndLogging();
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedAutoTime = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
matmulmodel.setData(arraySet1[0], arraySet2[0], matOut, matrixSpace[0]); // to initialise GPU to avoid initialization overhead
matmulmodel.executeAndLogging(2); // to initialise GPU to avoid initialization overhead
QueryPerformanceCounter(&start);
for (x = 0; x < spaceLength; x++) {
matmulmodel.setData(arraySet1[x], arraySet2[x], matOut, matrixSpace[x]);
matmulmodel.executeByMLAndLogging();
}
QueryPerformanceCounter(&stop);
delay = (double)(stop.QuadPart - start.QuadPart) / (double)clockFreq.QuadPart;
int elapsedML = int(delay * 1000);
matmulmodel.logExTime("\n\n"); // add new line in logging file
cout << "CPU:\t" << elapsedTimeCPU << "\tGPU:\t" << elapsedTimeGPU << "\tSelfFlow:\t" << elapsedAutoTime<< "\tML Flow:\t" << elapsedML << endl;
for (int ex = 0; ex < spaceLength; ex++) {
free(arraySet1[ex]);
free(arraySet2[ex]);
}
free(matrixSpace);
free(matOut);
return 0;
}
|
07d761cd68a98c367b305586b84db3e0334af5a3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::exp(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_EXP, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::exp()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_EXP);
layers.push_back(ele);
return ele;
}
Tensor FFModel::relu(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_RELU, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::relu()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_RELU);
layers.push_back(ele);
return ele;
}
Tensor FFModel::sigmoid(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_SIGMOID, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::sigmoid()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_SIGMOID);
layers.push_back(ele);
return ele;
}
Tensor FFModel::tanh(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_TANH, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::tanh()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_TANH);
layers.push_back(ele);
return ele;
}
Tensor FFModel::elu(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_ELU, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::elu()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_ELU);
layers.push_back(ele);
return ele;
}
ElementUnary::ElementUnary(FFModel& model,
ElementUnary::OpType _op_type,
const Tensor& x)
: Op(model, "ElementUnary_"+std::to_string(_op_type), x), op_type(_op_type)
{
outputs[0].numDim = inputs[0].numDim;
for (int i = 0; i < outputs[0].numDim; i++)
outputs[0].adim[i] = inputs[0].adim[i];
}
ElementUnary::ElementUnary(FFModel& model,
ElementUnary::OpType _op_type)
: Op(model, "ElementUnary_"+std::to_string(_op_type), 1), op_type(_op_type)
{}
Tensor ElementUnary::init_inout(FFModel& model,
const Tensor& input)
{
inputs[0] = input;
create_output_and_partition(model);
return outputs[0];
}
bool ElementUnary::use_cudnn() const
{
if (op_type == EW_RELU)
return true;
if (op_type == EW_SIGMOID)
return true;
if (op_type == EW_TANH)
return true;
if (op_type == EW_ELU)
return true;
return false;
}
/*
void ElementUnary::add_to_model(FFModel& model)
{
model.layers.push_back(this);
}
*/
void ElementUnary::create_weights(FFModel& model)
{
// Do nothing
}
void ElementUnary::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition_with_dim<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition_with_dim<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition_with_dim<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition_with_dim<4>(model);
break;
}
default:
{
// Unsupported dim for ElementWiseUnary operator
assert(false);
}
}
}
template<int NDIM>
void ElementUnary::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* ElementUnary::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
ElementUnary* eu = (ElementUnary*) task->args;
FFHandler handle = *((FFHandler*) task->local_args);
ElementUnaryMeta* m = new ElementUnaryMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
if (eu->use_cudnn())
{
cudnnActivationMode_t mode;
switch (eu->op_type) {
case EW_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case EW_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case EW_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
case EW_ELU:
mode = CUDNN_ACTIVATION_ELU;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
}
return m;
}
void ElementUnary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
default:
assert(false);
}
IndexLauncher init_launcher(ELEMENTUNARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
}
}
__global__
void elewise_unary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementUnary::OpType type,
const float* in,
float* out)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementUnary::EW_EXP:
{
out[i] = alpha * exp(in[i]) + beta * out[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void ElementUnary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f, beta = 0.0f;
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const ElementUnary* ele = (const ElementUnary*) task->args;
const ElementUnaryMeta* m = *((ElementUnaryMeta**) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(output_domain == input_domain);
const float* input_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* output_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
if (ele->use_cudnn()) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->inputTensor, input_ptr,
&beta, m->outputTensor, output_ptr));
} else {
hipLaunchKernelGGL(( elewise_unary_forward_kernel), dim3(GET_BLOCKS(output_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
output_domain.get_volume(), alpha, beta, ele->op_type, input_ptr, output_ptr);
}
}
void ElementUnary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
default:
assert(false);
}
IndexLauncher launcher(ELEMENTUNARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_unary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementUnary::OpType type,
const float* output_grad,
const float* input,
float* input_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementUnary::EW_EXP:
{
//TODO: change to use output instead of recomputing
input_grad[i] = alpha * output_grad[i] * exp(input[i]) + beta * input_grad[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I): output_grad
*/
__host__
void ElementUnary::backward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const ElementUnary* ele = (const ElementUnary*) task->args;
const ElementUnaryMeta* m = *((ElementUnaryMeta**) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain input_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Domain output_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
assert(output_grad_domain == input_domain);
assert(output_grad_domain == output_domain);
assert(output_grad_domain == input_grad_domain);
const float* input_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* input_grad_ptr = helperGetTensorPointerRW<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
const float* output_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
const float* output_grad_ptr = helperGetTensorPointerRO<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
if (ele->use_cudnn()) {
checkCUDNN(cudnnActivationBackward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_ptr, &alpha, m->inputTensor, input_grad_ptr));
} else {
hipLaunchKernelGGL(( elewise_unary_backward_kernel), dim3(GET_BLOCKS(input_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
input_domain.get_volume(), alpha, alpha, ele->op_type, output_grad_ptr, input_ptr, input_grad_ptr);
}
}
void ElementUnary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
}
IndexLauncher launcher(ELEMENTUNARY_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool ElementUnary::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
07d761cd68a98c367b305586b84db3e0334af5a3.cu
|
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::exp(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_EXP, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::exp()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_EXP);
layers.push_back(ele);
return ele;
}
Tensor FFModel::relu(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_RELU, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::relu()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_RELU);
layers.push_back(ele);
return ele;
}
Tensor FFModel::sigmoid(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_SIGMOID, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::sigmoid()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_SIGMOID);
layers.push_back(ele);
return ele;
}
Tensor FFModel::tanh(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_TANH, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::tanh()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_TANH);
layers.push_back(ele);
return ele;
}
Tensor FFModel::elu(const Tensor& x)
{
ElementUnary *ele = new ElementUnary(*this, ElementUnary::EW_ELU, x);
layers.push_back(ele);
return ele->outputs[0];
}
ElementUnary* FFModel::elu()
{
ElementUnary* ele = new ElementUnary(*this, ElementUnary::EW_ELU);
layers.push_back(ele);
return ele;
}
ElementUnary::ElementUnary(FFModel& model,
ElementUnary::OpType _op_type,
const Tensor& x)
: Op(model, "ElementUnary_"+std::to_string(_op_type), x), op_type(_op_type)
{
outputs[0].numDim = inputs[0].numDim;
for (int i = 0; i < outputs[0].numDim; i++)
outputs[0].adim[i] = inputs[0].adim[i];
}
ElementUnary::ElementUnary(FFModel& model,
ElementUnary::OpType _op_type)
: Op(model, "ElementUnary_"+std::to_string(_op_type), 1), op_type(_op_type)
{}
Tensor ElementUnary::init_inout(FFModel& model,
const Tensor& input)
{
inputs[0] = input;
create_output_and_partition(model);
return outputs[0];
}
bool ElementUnary::use_cudnn() const
{
if (op_type == EW_RELU)
return true;
if (op_type == EW_SIGMOID)
return true;
if (op_type == EW_TANH)
return true;
if (op_type == EW_ELU)
return true;
return false;
}
/*
void ElementUnary::add_to_model(FFModel& model)
{
model.layers.push_back(this);
}
*/
void ElementUnary::create_weights(FFModel& model)
{
// Do nothing
}
void ElementUnary::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition_with_dim<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition_with_dim<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition_with_dim<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition_with_dim<4>(model);
break;
}
default:
{
// Unsupported dim for ElementWiseUnary operator
assert(false);
}
}
}
template<int NDIM>
void ElementUnary::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* ElementUnary::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
ElementUnary* eu = (ElementUnary*) task->args;
FFHandler handle = *((FFHandler*) task->local_args);
ElementUnaryMeta* m = new ElementUnaryMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
if (eu->use_cudnn())
{
cudnnActivationMode_t mode;
switch (eu->op_type) {
case EW_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case EW_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case EW_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
case EW_ELU:
mode = CUDNN_ACTIVATION_ELU;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode,
CUDNN_PROPAGATE_NAN, 0.0));
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
}
return m;
}
void ElementUnary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
break;
}
default:
assert(false);
}
IndexLauncher init_launcher(ELEMENTUNARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
break;
}
}
}
__global__
void elewise_unary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementUnary::OpType type,
const float* in,
float* out)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementUnary::EW_EXP:
{
out[i] = alpha * exp(in[i]) + beta * out[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void ElementUnary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f, beta = 0.0f;
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const ElementUnary* ele = (const ElementUnary*) task->args;
const ElementUnaryMeta* m = *((ElementUnaryMeta**) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(output_domain == input_domain);
const float* input_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* output_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
if (ele->use_cudnn()) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->inputTensor, input_ptr,
&beta, m->outputTensor, output_ptr));
} else {
elewise_unary_forward_kernel<<<GET_BLOCKS(output_domain.get_volume()), CUDA_NUM_THREADS>>>(
output_domain.get_volume(), alpha, beta, ele->op_type, input_ptr, output_ptr);
}
}
void ElementUnary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
default:
assert(false);
}
IndexLauncher launcher(ELEMENTUNARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_unary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementUnary::OpType type,
const float* output_grad,
const float* input,
float* input_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementUnary::EW_EXP:
{
//TODO: change to use output instead of recomputing
input_grad[i] = alpha * output_grad[i] * exp(input[i]) + beta * input_grad[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I): output_grad
*/
__host__
void ElementUnary::backward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const ElementUnary* ele = (const ElementUnary*) task->args;
const ElementUnaryMeta* m = *((ElementUnaryMeta**) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain input_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Domain output_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
assert(output_grad_domain == input_domain);
assert(output_grad_domain == output_domain);
assert(output_grad_domain == input_grad_domain);
const float* input_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* input_grad_ptr = helperGetTensorPointerRW<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
const float* output_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
const float* output_grad_ptr = helperGetTensorPointerRO<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
if (ele->use_cudnn()) {
checkCUDNN(cudnnActivationBackward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_ptr, &alpha, m->inputTensor, input_grad_ptr));
} else {
elewise_unary_backward_kernel<<<GET_BLOCKS(input_domain.get_volume()), CUDA_NUM_THREADS>>>(
input_domain.get_volume(), alpha, alpha, ele->op_type, output_grad_ptr, input_ptr, input_grad_ptr);
}
}
void ElementUnary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
int idx = 0;
for (PointInRectIterator<1> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 2:
{
Rect<2> rect = domain;
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 3:
{
Rect<3> rect = domain;
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
case 4:
{
Rect<4> rect = domain;
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
break;
}
}
IndexLauncher launcher(ELEMENTUNARY_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementUnary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool ElementUnary::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
b9d94940a4d5139e2bdf3d46abdde2e6a528ebf8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void BatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int_tp num = bottom[0]->shape(0);
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor =
this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(),
variance_.mutable_gpu_data());
} else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_powx(top[0]->count(), top_data, Dtype(2),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_,
this->blobs_[0]->mutable_gpu_data());
int_tp m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data());
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(),
variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
caffe_copy(x_norm_.count(), top_data, x_norm_.mutable_gpu_data());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (use_global_stats_) {
const Dtype scale_factor =
this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
viennacl::ocl::program &program = this->device_->program();
cl_uint argIdx = 0;
size_t global_work_size_[3] = {(size_t)num,
(size_t)channels_,
(size_t)spatial_dim};
if (bottom[0] == top[0]) {
viennacl::ocl::kernel &oclk_bn_use_global_stats = program.get_kernel(
CL_KERNEL_SELECT("batch_norm_use_global_stats_in_place"));
oclk_bn_use_global_stats.arg(argIdx++, num);
oclk_bn_use_global_stats.arg(argIdx++, channels_);
oclk_bn_use_global_stats.arg(argIdx++, spatial_dim);
oclk_bn_use_global_stats.arg(argIdx++, scale_factor);
oclk_bn_use_global_stats.arg(argIdx++, eps_);
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[0]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[1]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) top_data, &ctx));
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_bn_use_global_stats.handle().get(), 3, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
} else {
viennacl::ocl::kernel &oclk_bn_use_global_stats =
program.get_kernel(
CL_KERNEL_SELECT("batch_norm_use_global_stats"));
oclk_bn_use_global_stats.arg(argIdx++, num);
oclk_bn_use_global_stats.arg(argIdx++, channels_);
oclk_bn_use_global_stats.arg(argIdx++, spatial_dim);
oclk_bn_use_global_stats.arg(argIdx++, scale_factor);
oclk_bn_use_global_stats.arg(argIdx++, eps_);
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[0]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[1]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) bottom_data, &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) top_data, &ctx));
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_bn_use_global_stats.handle().get(), 3, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
}
} else {
if (bottom[0] != top[0]) {
greentea_copy<Dtype>(bottom[0]->count(), (cl_mem) bottom_data, 0,
(cl_mem) top_data, 0, &ctx);
}
// compute mean
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) bottom_data,
0, (cl_mem) (spatial_sum_multiplier_.gpu_data()),
0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (mean_.mutable_gpu_data()), 0);
// subtract mean
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, -1,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) top_data, 0);
// compute variance using var(X) = E((X-EX)^2)
greentea_gpu_powx<Dtype>(this->device_->id(), top[0]->count(),
(cl_mem) top_data, 0, Dtype(2),
(cl_mem) (temp_.mutable_gpu_data()), 0);
// (X-EX)^2
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
channels_ * num, spatial_dim,
1. / (num * spatial_dim),
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (variance_.mutable_gpu_data()), 0);
// E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), mean_.count(), Dtype(1),
(cl_mem) (mean_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[0]->mutable_gpu_data()),
0);
int_tp m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), variance_.count(),
bias_correction_factor,
(cl_mem) (variance_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[1]->mutable_gpu_data()),
0);
// normalize variance
greentea_gpu_add_scalar<Dtype>(this->device_->id(),
variance_.count(), eps_,
(cl_mem) (variance_.mutable_gpu_data()), 0);
greentea_gpu_powx<Dtype>(this->device_->id(), variance_.count(),
(cl_mem) (variance_.gpu_data()), 0, Dtype(0.5),
(cl_mem) (variance_.mutable_gpu_data()), 0);
// replicate variance to input size
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (variance_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (temp_.mutable_gpu_data()), 0);
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) top_data, 0);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) top_data, 0,
(cl_mem) (x_norm_.mutable_gpu_data()), 0, &ctx);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void BatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
caffe_copy(x_norm_.count(), top[0]->gpu_diff(),
x_norm_.mutable_gpu_diff());
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul<Dtype>(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul<Dtype>(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby<Dtype>(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div<Dtype>(temp_.count(), bottom_diff, temp_.gpu_data(),
bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (x_norm_.mutable_gpu_diff()), 0, &ctx);
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) bottom_diff, 0);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) bottom_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) bottom_diff, 0);
// sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) top_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num * channels_, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) bottom_diff, 0);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
greentea_gpu_axpby<Dtype>(this->device_->id(), temp_.count(), Dtype(1),
(cl_mem) top_diff, 0,
Dtype(-1. / (num * spatial_dim)),
(cl_mem) bottom_diff, 0);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) bottom_diff, 0,
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormLayer);
} // namespace caffe
|
b9d94940a4d5139e2bdf3d46abdde2e6a528ebf8.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void BatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int_tp num = bottom[0]->shape(0);
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor =
this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(),
variance_.mutable_gpu_data());
} else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_powx(top[0]->count(), top_data, Dtype(2),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_,
this->blobs_[0]->mutable_gpu_data());
int_tp m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data());
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(),
variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
caffe_copy(x_norm_.count(), top_data, x_norm_.mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (use_global_stats_) {
const Dtype scale_factor =
this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
viennacl::ocl::program &program = this->device_->program();
cl_uint argIdx = 0;
size_t global_work_size_[3] = {(size_t)num,
(size_t)channels_,
(size_t)spatial_dim};
if (bottom[0] == top[0]) {
viennacl::ocl::kernel &oclk_bn_use_global_stats = program.get_kernel(
CL_KERNEL_SELECT("batch_norm_use_global_stats_in_place"));
oclk_bn_use_global_stats.arg(argIdx++, num);
oclk_bn_use_global_stats.arg(argIdx++, channels_);
oclk_bn_use_global_stats.arg(argIdx++, spatial_dim);
oclk_bn_use_global_stats.arg(argIdx++, scale_factor);
oclk_bn_use_global_stats.arg(argIdx++, eps_);
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[0]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[1]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) top_data, &ctx));
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_bn_use_global_stats.handle().get(), 3, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
} else {
viennacl::ocl::kernel &oclk_bn_use_global_stats =
program.get_kernel(
CL_KERNEL_SELECT("batch_norm_use_global_stats"));
oclk_bn_use_global_stats.arg(argIdx++, num);
oclk_bn_use_global_stats.arg(argIdx++, channels_);
oclk_bn_use_global_stats.arg(argIdx++, spatial_dim);
oclk_bn_use_global_stats.arg(argIdx++, scale_factor);
oclk_bn_use_global_stats.arg(argIdx++, eps_);
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[0]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) this->blobs_[1]->gpu_data(), &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) bottom_data, &ctx));
oclk_bn_use_global_stats.arg(argIdx++,
WrapHandle((cl_mem) top_data, &ctx));
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_bn_use_global_stats.handle().get(), 3, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
}
} else {
if (bottom[0] != top[0]) {
greentea_copy<Dtype>(bottom[0]->count(), (cl_mem) bottom_data, 0,
(cl_mem) top_data, 0, &ctx);
}
// compute mean
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) bottom_data,
0, (cl_mem) (spatial_sum_multiplier_.gpu_data()),
0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (mean_.mutable_gpu_data()), 0);
// subtract mean
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, -1,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) top_data, 0);
// compute variance using var(X) = E((X-EX)^2)
greentea_gpu_powx<Dtype>(this->device_->id(), top[0]->count(),
(cl_mem) top_data, 0, Dtype(2),
(cl_mem) (temp_.mutable_gpu_data()), 0);
// (X-EX)^2
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
channels_ * num, spatial_dim,
1. / (num * spatial_dim),
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (variance_.mutable_gpu_data()), 0);
// E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), mean_.count(), Dtype(1),
(cl_mem) (mean_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[0]->mutable_gpu_data()),
0);
int_tp m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), variance_.count(),
bias_correction_factor,
(cl_mem) (variance_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[1]->mutable_gpu_data()),
0);
// normalize variance
greentea_gpu_add_scalar<Dtype>(this->device_->id(),
variance_.count(), eps_,
(cl_mem) (variance_.mutable_gpu_data()), 0);
greentea_gpu_powx<Dtype>(this->device_->id(), variance_.count(),
(cl_mem) (variance_.gpu_data()), 0, Dtype(0.5),
(cl_mem) (variance_.mutable_gpu_data()), 0);
// replicate variance to input size
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (variance_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (temp_.mutable_gpu_data()), 0);
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) top_data, 0);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) top_data, 0,
(cl_mem) (x_norm_.mutable_gpu_data()), 0, &ctx);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void BatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
caffe_copy(x_norm_.count(), top[0]->gpu_diff(),
x_norm_.mutable_gpu_diff());
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul<Dtype>(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul<Dtype>(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby<Dtype>(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div<Dtype>(temp_.count(), bottom_diff, temp_.gpu_data(),
bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (x_norm_.mutable_gpu_diff()), 0, &ctx);
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) bottom_diff, 0);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) bottom_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) bottom_diff, 0);
// sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) top_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num * channels_, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) bottom_diff, 0);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
greentea_gpu_axpby<Dtype>(this->device_->id(), temp_.count(), Dtype(1),
(cl_mem) top_diff, 0,
Dtype(-1. / (num * spatial_dim)),
(cl_mem) bottom_diff, 0);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) bottom_diff, 0,
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormLayer);
} // namespace caffe
|
34761b7869029b755042fe63ce61d5d1c59222f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////////////////
// Guard conditions around the entire file.
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass_unit_tests.h"
#include "tools/util/half.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "cutlass/gemm/wmma_gemm_traits.h"
#include "cutlass/gemm/wmma_gemm_epilogue.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemmTraits, typename EpilogueTraits, typename LoadAccumulatorIterator>
__global__ void test_epilogue_kernel(
typename EpilogueTraits::Params params,
cutlass::Coord<3> problem,
typename EpilogueTraits::AccumulatorScalar *accum_ptr,
int ldm) {
// Shared memory allocation
__shared__ typename EpilogueTraits::SharedStorage shared_storage;
//
// Load accumulators from memory - normally, a GEMM would compute these
//
// Traits class defines tiling
GemmTraits traits;
int warp_id = (threadIdx.x / 32);
cutlass::Coord<3> warp_offset = traits(warp_id);
// Accumulator fragment
typename EpilogueTraits::AccumulatorFragment accumulator;
// Construct an out-of-band LoadIterator for accumulators to initialize them
LoadAccumulatorIterator load_accum_iterator(accum_ptr, ldm, warp_offset);
load_accum_iterator.load(accumulator);
__syncthreads();
//
// Test the epilogue itself
//
typedef cutlass::gemm::WmmaGemmEpilogue<EpilogueTraits> Epilogue;
Epilogue epilogue(params, problem, warp_offset);
// Perform the epilogue operation
epilogue.update(shared_storage, accumulator);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadBlockTile,
typename WarpTile,
typename WmmaTile,
typename EpilogueTile,
typename StreamTile,
typename AccumulatorType,
typename ScalarC
>
struct TestWmmaGemmEpilogue {
typedef cutlass::gemm::WmmaGemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
ThreadBlockTile,
WarpTile,
WmmaTile,
AccumulatorType,
AccumulatorType,
1,
AccumulatorType,
EpilogueTile,
StreamTile
> Traits;
// Construct an actual epilogue
typedef cutlass::gemm::EpilogueLinearScaling<ScalarC, ScalarC, ScalarC, ScalarC> EpilogueLinearScaling;
/// Define some traits
typedef cutlass::gemm::WmmaGemmEpilogueTraitsBasic<
ScalarC,
typename Traits::WarpMultiplyAdd::StoreIteratorC,
ScalarC,
ThreadBlockTile,
32 * Traits::Warps::kCount,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
EpilogueLinearScaling
> WmmaGemmEpilogueTraits;
/// Type alias for EpilogueTraits type
typedef typename WmmaGemmEpilogueTraits::Traits EpilogueTraits;
TestWmmaGemmEpilogue() {
}
void run(cutlass::Coord<3> problem) {
//
// Prepare accumulator tile
//
cutlass::HostTensor<ScalarC> accumulator_matrix;
cutlass::HostTensor<ScalarC> source_matrix;
cutlass::HostTensor<ScalarC> destination_matrix;
accumulator_matrix.resize_matrix(
ThreadBlockTile::kW,
ThreadBlockTile::kH,
cutlass::MatrixLayout::kColumnMajor);
source_matrix.resize_matrix(
problem[2],
problem[1],
cutlass::MatrixLayout::kColumnMajor);
destination_matrix.resize_matrix(
problem[2],
problem[1],
cutlass::MatrixLayout::kColumnMajor);
accumulator_matrix.fill_sequential();
source_matrix.fill_sequential();
int value = 0;
for (int row = 0; row < ThreadBlockTile::kW; ++row) {
for (int col = 0; col < ThreadBlockTile::kH; ++col, ++value) {
if (row < problem[2] && col < problem[1]) {
source_matrix.at(cutlass::make_Coord(0, row, col, 0)) = ScalarC(value);
}
}
}
destination_matrix.fill(0);
//
// Launch test kernel
//
dim3 grid(1,1);
dim3 block(32 * Traits::Warps::kCount, 1, 1);
EpilogueLinearScaling functor;
functor.initialize(1, 0);
typename EpilogueTraits::Params params;
params.initialize(
functor,
source_matrix.device_data(),
source_matrix.leading_dim(),
destination_matrix.device_data(),
destination_matrix.leading_dim()
);
hipLaunchKernelGGL(( test_epilogue_kernel<
Traits,
EpilogueTraits,
typename Traits::WarpMultiplyAdd::LoadIteratorC
>), dim3(grid), dim3(block) , 0, 0,
params,
problem,
accumulator_matrix.device_data(),
accumulator_matrix.leading_dim()
);
destination_matrix.sync_host();
EXPECT_TRUE(accumulator_matrix.bit_equals(destination_matrix))
<< "Accumulators:\n" << accumulator_matrix << "\nDestination:\n" << destination_matrix;
}
void run() {
run(cutlass::make_Coord(ThreadBlockTile::kD, ThreadBlockTile::kH, ThreadBlockTile::kW));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_16x16x16, wmma_epilogue_basic) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 16, 16> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 16, 16> EpilogueTile;
typedef cutlass::Shape<1, 16, 16> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
TEST(WmmaGemm_16x16x16, wmma_epilogue_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 16, 16> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 16, 16> EpilogueTile;
typedef cutlass::Shape<1, 16, 16> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 15, 15));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_32x32) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 32> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_32x32_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 32> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 14, 17));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_16x16) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_16x16_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 23, 19));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Large epilogue
TEST(WmmaGemm_128x128x16, wmma_epilogue_basic_32x32_16x16) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 128, 128> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 64> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 64, 64> EpilogueTile;
typedef cutlass::Shape<1, 4, 64> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
typedef cutlass::gemm::WmmaGemmEpilogueStructure<
ThreadBlockTile,
EpilogueTile,
StreamTile,
WarpTile,
WmmaTile
> Structure;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Large epilogue
TEST(WmmaGemm_128x128x16, wmma_epilogue_basic_32x32_16x16_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 128, 128> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 64> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 64, 64> EpilogueTile;
typedef cutlass::Shape<1, 4, 64> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
typedef cutlass::gemm::WmmaGemmEpilogueStructure<
ThreadBlockTile,
EpilogueTile,
StreamTile,
WarpTile,
WmmaTile
> Structure;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 119, 101));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // end guard conditional on SM70
|
34761b7869029b755042fe63ce61d5d1c59222f3.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////////////////
// Guard conditions around the entire file.
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass_unit_tests.h"
#include "tools/util/half.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "cutlass/gemm/wmma_gemm_traits.h"
#include "cutlass/gemm/wmma_gemm_epilogue.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemmTraits, typename EpilogueTraits, typename LoadAccumulatorIterator>
__global__ void test_epilogue_kernel(
typename EpilogueTraits::Params params,
cutlass::Coord<3> problem,
typename EpilogueTraits::AccumulatorScalar *accum_ptr,
int ldm) {
// Shared memory allocation
__shared__ typename EpilogueTraits::SharedStorage shared_storage;
//
// Load accumulators from memory - normally, a GEMM would compute these
//
// Traits class defines tiling
GemmTraits traits;
int warp_id = (threadIdx.x / 32);
cutlass::Coord<3> warp_offset = traits(warp_id);
// Accumulator fragment
typename EpilogueTraits::AccumulatorFragment accumulator;
// Construct an out-of-band LoadIterator for accumulators to initialize them
LoadAccumulatorIterator load_accum_iterator(accum_ptr, ldm, warp_offset);
load_accum_iterator.load(accumulator);
__syncthreads();
//
// Test the epilogue itself
//
typedef cutlass::gemm::WmmaGemmEpilogue<EpilogueTraits> Epilogue;
Epilogue epilogue(params, problem, warp_offset);
// Perform the epilogue operation
epilogue.update(shared_storage, accumulator);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadBlockTile,
typename WarpTile,
typename WmmaTile,
typename EpilogueTile,
typename StreamTile,
typename AccumulatorType,
typename ScalarC
>
struct TestWmmaGemmEpilogue {
typedef cutlass::gemm::WmmaGemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
ThreadBlockTile,
WarpTile,
WmmaTile,
AccumulatorType,
AccumulatorType,
1,
AccumulatorType,
EpilogueTile,
StreamTile
> Traits;
// Construct an actual epilogue
typedef cutlass::gemm::EpilogueLinearScaling<ScalarC, ScalarC, ScalarC, ScalarC> EpilogueLinearScaling;
/// Define some traits
typedef cutlass::gemm::WmmaGemmEpilogueTraitsBasic<
ScalarC,
typename Traits::WarpMultiplyAdd::StoreIteratorC,
ScalarC,
ThreadBlockTile,
32 * Traits::Warps::kCount,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
EpilogueLinearScaling
> WmmaGemmEpilogueTraits;
/// Type alias for EpilogueTraits type
typedef typename WmmaGemmEpilogueTraits::Traits EpilogueTraits;
TestWmmaGemmEpilogue() {
}
void run(cutlass::Coord<3> problem) {
//
// Prepare accumulator tile
//
cutlass::HostTensor<ScalarC> accumulator_matrix;
cutlass::HostTensor<ScalarC> source_matrix;
cutlass::HostTensor<ScalarC> destination_matrix;
accumulator_matrix.resize_matrix(
ThreadBlockTile::kW,
ThreadBlockTile::kH,
cutlass::MatrixLayout::kColumnMajor);
source_matrix.resize_matrix(
problem[2],
problem[1],
cutlass::MatrixLayout::kColumnMajor);
destination_matrix.resize_matrix(
problem[2],
problem[1],
cutlass::MatrixLayout::kColumnMajor);
accumulator_matrix.fill_sequential();
source_matrix.fill_sequential();
int value = 0;
for (int row = 0; row < ThreadBlockTile::kW; ++row) {
for (int col = 0; col < ThreadBlockTile::kH; ++col, ++value) {
if (row < problem[2] && col < problem[1]) {
source_matrix.at(cutlass::make_Coord(0, row, col, 0)) = ScalarC(value);
}
}
}
destination_matrix.fill(0);
//
// Launch test kernel
//
dim3 grid(1,1);
dim3 block(32 * Traits::Warps::kCount, 1, 1);
EpilogueLinearScaling functor;
functor.initialize(1, 0);
typename EpilogueTraits::Params params;
params.initialize(
functor,
source_matrix.device_data(),
source_matrix.leading_dim(),
destination_matrix.device_data(),
destination_matrix.leading_dim()
);
test_epilogue_kernel<
Traits,
EpilogueTraits,
typename Traits::WarpMultiplyAdd::LoadIteratorC
><<< grid, block >>>(
params,
problem,
accumulator_matrix.device_data(),
accumulator_matrix.leading_dim()
);
destination_matrix.sync_host();
EXPECT_TRUE(accumulator_matrix.bit_equals(destination_matrix))
<< "Accumulators:\n" << accumulator_matrix << "\nDestination:\n" << destination_matrix;
}
void run() {
run(cutlass::make_Coord(ThreadBlockTile::kD, ThreadBlockTile::kH, ThreadBlockTile::kW));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_16x16x16, wmma_epilogue_basic) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 16, 16> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 16, 16> EpilogueTile;
typedef cutlass::Shape<1, 16, 16> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
TEST(WmmaGemm_16x16x16, wmma_epilogue_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 16, 16> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 16, 16> EpilogueTile;
typedef cutlass::Shape<1, 16, 16> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 15, 15));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_32x32) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 32> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_32x32_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 32> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 14, 17));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_16x16) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Small epilogue
TEST(WmmaGemm_32x32x16, wmma_epilogue_basic_32x32_16x16_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 32, 32> ThreadBlockTile;
typedef cutlass::Shape<16, 16, 16> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 32, 32> EpilogueTile;
typedef cutlass::Shape<1, 4, 32> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 23, 19));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Large epilogue
TEST(WmmaGemm_128x128x16, wmma_epilogue_basic_32x32_16x16) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 128, 128> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 64> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 64, 64> EpilogueTile;
typedef cutlass::Shape<1, 4, 64> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
typedef cutlass::gemm::WmmaGemmEpilogueStructure<
ThreadBlockTile,
EpilogueTile,
StreamTile,
WarpTile,
WmmaTile
> Structure;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run();
}
/// Large epilogue
TEST(WmmaGemm_128x128x16, wmma_epilogue_basic_32x32_16x16_ragged) {
// GEMM threadblock structure
typedef cutlass::Shape<16, 128, 128> ThreadBlockTile;
typedef cutlass::Shape<16, 32, 64> WarpTile;
typedef cutlass::Shape<16, 16, 16> WmmaTile;
// Epilogue shapes
typedef cutlass::Shape<1, 64, 64> EpilogueTile;
typedef cutlass::Shape<1, 4, 64> StreamTile;
typedef float AccumulatorType;
typedef float ScalarC;
typedef cutlass::gemm::WmmaGemmEpilogueStructure<
ThreadBlockTile,
EpilogueTile,
StreamTile,
WarpTile,
WmmaTile
> Structure;
TestWmmaGemmEpilogue<
ThreadBlockTile,
WarpTile,
WmmaTile,
EpilogueTile,
StreamTile,
AccumulatorType,
ScalarC
>().run(cutlass::make_Coord(0, 119, 101));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // end guard conditional on SM70
|
391da143ca9c90117c0b1902dc8d55844adccabb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(double *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i%3;
}
}
struct position {
int x;
int y;
};
/// convert a 2D position to a 1D index
/// assumes bottom left corner of image is 0,0 and index 1
long get1dIndex( int width, int x, int y) {
return y * width + x;
}
/// inverse of 2D to 1D mapping function
/// sends back x,y values in tuple from index
void get_Position( int width, int id, struct position *pos) {
int xx = 0;
int yy = 0;
// struct position pos;
xx = id / width;
yy = id % width;
pos->x = yy;
pos->y = xx;
// return pos;
}
__global__
void doubleElements(double *a, int N, int color )
{
/*
* Use a grid-stride loop so each thread does work
* on more than one element in the array.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
if ( color == 1 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 2 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 3 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
}
int main()
{
int N = 12;
double *red;
size_t size = N * sizeof(double);
hipMallocManaged(&red, size);
init(red, N);
double *green;
hipMallocManaged(&green, size);
init(green, N);
double *blue;
hipMallocManaged(&blue, size);
init(blue, N);
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, red, N, 1);
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, blue, N, 2);
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, green, N, 3);
hipDeviceSynchronize();
printf("\nresult: ");
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", red[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", green[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", blue[i]);
}
hipFree(red);
hipFree(green);
hipFree(blue);
}
|
391da143ca9c90117c0b1902dc8d55844adccabb.cu
|
#include <stdio.h>
void init(double *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i%3;
}
}
struct position {
int x;
int y;
};
/// convert a 2D position to a 1D index
/// assumes bottom left corner of image is 0,0 and index 1
long get1dIndex( int width, int x, int y) {
return y * width + x;
}
/// inverse of 2D to 1D mapping function
/// sends back x,y values in tuple from index
void get_Position( int width, int id, struct position *pos) {
int xx = 0;
int yy = 0;
// struct position pos;
xx = id / width;
yy = id % width;
pos->x = yy;
pos->y = xx;
// return pos;
}
__global__
void doubleElements(double *a, int N, int color )
{
/*
* Use a grid-stride loop so each thread does work
* on more than one element in the array.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
if ( color == 1 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 2 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 3 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
}
int main()
{
int N = 12;
double *red;
size_t size = N * sizeof(double);
cudaMallocManaged(&red, size);
init(red, N);
double *green;
cudaMallocManaged(&green, size);
init(green, N);
double *blue;
cudaMallocManaged(&blue, size);
init(blue, N);
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(red, N, 1);
doubleElements<<<number_of_blocks, threads_per_block>>>(blue, N, 2);
doubleElements<<<number_of_blocks, threads_per_block>>>(green, N, 3);
cudaDeviceSynchronize();
printf("\nresult: ");
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", red[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", green[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", blue[i]);
}
cudaFree(red);
cudaFree(green);
cudaFree(blue);
}
|
f6ad66ad507fddc4d58c51bcfa7ec6704323a0c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
typedef struct N3 {
int x, y, z;
} N3;
typedef struct P3F3 {
float ***x, ***y, ***z;
} P3F3;
typedef struct P1F3 {
float *x, *y, *z;
} P1F3;
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(N3 N, float ***a) {
int j,k;
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
printf("%1.4f\t", a[N.x/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(N3 N) {
float ***f;
f = (float ***) calloc (N.x, sizeof(float **));
f[0] = (float **) calloc (N.y*N.x, sizeof(float *));
f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float));
for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y;
for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z;
return f;
}
__host__ void set_geometry(N3 N, P3F3 CE) {
int i,j,k;
for (i=0; i<N.x; i++) {
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
CE.x[i][j][k] = 0.5;
CE.y[i][j][k] = 0.5;
CE.z[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(N3 N, int Nzpit, P1F3 E, P1F3 H) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < N.x*N.y*Nzpit ) {
E.x[idx] = 0;
E.y[idx] = 0;
E.z[idx] = 0;
H.x[idx] = 0;
H.y[idx] = 0;
H.z[idx] = 0;
}
}
__global__ void updateE(N3 N, int Nzpit, N3 Dg, N3 Db, P1F3 E, P1F3 H, P1F3 CE) {
int bk;
int tk, tj, ti;
int i, j, k;
bk = blockIdx.x;
tk = threadIdx.x;
tj = threadIdx.y;
ti = threadIdx.z;
k = Db.x*( bk%Dg.x ) + tk;
j = Db.y*( (bk/Dg.x)%Dg.y ) + tj;
i = Db.z*( bk/(Dg.x*Dg.y) ) + ti;
//printf("bk(%d),\tbk%Dg.x (%d),\t(bk/Dg.x)%Dg.y (%d),\tbk/(Dg.x*Dg.y) (%d)\n", bk, bk%Dg.x, (bk/Dg.x)%Dg.y, bk/(Dg.x*Dg.y) );
//printf("blockIdx(%d),\tthreadIdx(%d,%d,%d),\tkji(%d,%d,%d)\n", bk, tk, tj, ti, k, j, i);
int Bx = Db.x+1;
int By = Db.x+1;
int Bz = Db.x;
int BPx = (Db.y+1)*(Db.x+1);
int BPy = (Db.y)*(Db.x+1);
int BPz = (Db.y+1)*(Db.x);
int Nyzpit = N.y*Nzpit;
int idx = k + Nzpit*j + Nyzpit*i;
extern __shared__ float hs[];
float* hx = (float*)hs;
float* hy = (float*)&hx[(Db.z)*BPx];
float* hz = (float*)&hy[(Db.z+1)*BPy];
if ( i<N.x && j<N.y && k<N.z ) {
//printf("(%d),\t(%d,%d,%d),\t(%d,%d,%d),\t%d\n", bk, tk, tj, ti, k, j, i, idx);
//__shared__ float hx[(TPBz)*(TPBy+1)*(TPBx+1)];
//__shared__ float hy[(TPBz+1)*(TPBy)*(TPBx+1)];
//__shared__ float hz[(TPBz+1)*(TPBy+1)*(TPBx)];
hx[tk + Bx*tj + BPx*ti] = H.x[idx];
hy[tk + By*tj + BPy*ti] = H.y[idx];
hz[tk + Bz*tj + BPz*ti] = H.z[idx];
if ( tk==Db.x-1 && k<N.z-1 ) {
hx[(tk+1) + Bx*tj + BPx*ti] = H.x[idx+1];
hy[(tk+1) + By*tj + BPy*ti] = H.y[idx+1];
}
if ( tj==Db.y-1 && j<N.y-1 ) {
hx[tk + Bx*(tj+1) + BPx*ti] = H.x[idx+Nzpit];
hz[tk + Bz*(tj+1) + BPz*ti] = H.z[idx+Nzpit];
}
if ( ti==Db.z-1 && i<N.x-1 ) {
hy[tk + By*tj + BPy*(ti+1)] = H.y[idx+Nyzpit];
hz[tk + Bz*tj + BPz*(ti+1)] = H.z[idx+Nyzpit];
}
}
__syncthreads();
if ( i<N.x && j<N.y && k<N.z ) {
if ( j<N.y-1 && k<N.z-1 )
E.x[idx] += CE.x[idx]*(
hz[tk + Bz*(tj+1) + BPz*ti]
- hz[tk + Bz*tj + BPz*ti]
- hy[(tk+1) + By*tj + BPy*ti]
+ hy[tk + By*tj + BPy*ti] );
if ( i<N.x-1 && k<N.z-1 )
E.y[idx] += CE.y[idx]*(
hx[(tk+1) + Bx*tj + BPx*ti]
- hx[tk + Bx*tj + BPx*ti]
- hz[tk + Bz*tj + BPz*(ti+1)]
+ hz[tk + Bz*tj + BPz*ti] );
if ( i<N.x-1 && j<N.y-1 )
E.z[idx] += CE.z[idx]*(
hy[tk + By*tj + BPy*(ti+1)]
- hy[tk + By*tj + BPy*ti]
- hx[tk + Bx*(tj+1) + BPx*ti]
+ hx[tk + Bx*tj + BPx*ti] );
}
}
__global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//E.x[ijk] += __sinf(0.1*tstep);
if ( idx < N.x ) {
E.x[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(N3 N, int Nzpit, N3 Dg, N3 Db, P1F3 E, P1F3 H) {
int bk;
int tk, tj, ti;
int i, j, k;
bk = blockIdx.x;
tk = threadIdx.x;
tj = threadIdx.y;
ti = threadIdx.z;
k = Db.x*( bk%Dg.x ) + tk;
j = Db.y*( (bk/Dg.x)%Dg.y ) + tj;
i = Db.z*( bk/(Dg.x*Dg.y) ) + ti;
int Bx = Db.x+1;
int By = Db.x+1;
int Bz = Db.x;
int BPx = (Db.y+1)*(Db.x+1);
int BPy = (Db.y)*(Db.x+1);
int BPz = (Db.y+1)*(Db.x);
int Nyzpit = N.y*Nzpit;
int idx = k + Nzpit*j + Nyzpit*i;
extern __shared__ float es[];
float* ex = (float*)es;
float* ey = (float*)&ex[(Db.z)*BPx];
float* ez = (float*)&ey[(Db.z+1)*BPy];
if ( i<N.x && j<N.y && k<N.z ) {
ex[(tk+1) + Bx*(tj+1) + BPx*ti] = E.x[idx];
ey[(tk+1) + By*tj + BPy*(ti+1)] = E.y[idx];
ez[tk + Bz*(tj+1) + BPz*(ti+1)] = E.z[idx];
if ( tk==0 && k>0 ) {
ex[Bx*(tj+1) + BPx*ti] = E.x[idx-1];
ey[By*tj + BPy*(ti+1)] = E.y[idx-1];
}
if ( tj==0 && j>0 ) {
ex[(tk+1) + BPx*ti] = E.x[idx-Nzpit];
ez[tk + BPz*(ti+1)] = E.z[idx-Nzpit];
}
if ( ti==0 && i>0 ) {
ey[(tk+1) + By*tj] = E.y[idx-Nyzpit];
ez[tk + Bz*(tj+1)] = E.z[idx-Nyzpit];
}
}
__syncthreads();
if ( i<N.x && j<N.y && k<N.z ) {
if ( j>0 && k>0 )
H.x[idx] -= 0.5*(
ez[tk + Bz*(tj+1) + BPz*(ti+1)]
- ez[tk + Bz*tj + BPz*(ti+1)]
- ey[(tk+1) + By*tj + BPy*(ti+1)]
+ ey[tk + By*tj + BPy*(ti+1)] );
if ( i>0 && k>0 )
H.y[idx] -= 0.5*(
ex[(tk+1) + Bx*(tj+1) + BPx*ti]
- ex[tk + Bx*(tj+1) + BPx*ti]
- ez[tk + Bz*(tj+1) + BPz*(ti+1)]
+ ez[tk + Bz*(tj+1) + BPz*ti] );
if ( i>0 && j>0 )
H.z[idx] -= 0.5*(
ey[(tk+1) + By*tj + BPy*(ti+1)]
- ey[(tk+1) + By*tj + BPy*ti]
- ex[(tk+1) + Bx*(tj+1) + BPx*ti]
+ ex[(tk+1) + Bx*tj + BPx*ti] );
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
N3 N;
N.x = 100;
N.y = 200;
N.z = 500;
//N.y = 16;
//N.z = 20;
int TMAX = 1000;
printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX);
// Allocate host memory
float ***Ex;
P3F3 CE;
Ex = makeArray(N);
CE.x = makeArray(N);
CE.y = makeArray(N);
CE.z = makeArray(N);
// Geometry
set_geometry(N, CE);
// Allocate device memory
P1F3 devE;
P1F3 devH;
P1F3 devCE;
int z_size = N.z*sizeof(float);
size_t pitch;
hipMallocPitch ( (void**) &devE.x, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devE.y, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devE.z, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devH.x, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devH.y, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devH.z, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y );
hipMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y );
// Copy arrays from host to device
hipMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice );
hipMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice );
hipMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Set the GPU parameters
N3 TPB; // Number of threads per block
TPB.x = 16;
TPB.y = 4;
TPB.z = 4;
N3 BPG; // Number of thread blocks per grid
BPG.x = Nz_pitch/TPB.x;
BPG.y = N.y%TPB.y == 0 ? N.y/TPB.y : N.y/TPB.y + 1;
BPG.z = N.x%TPB.z == 0 ? N.x/TPB.z : N.x/TPB.z + 1;
dim3 Dg = dim3(BPG.x*BPG.y*BPG.z);
dim3 Db = dim3(TPB.x, TPB.y, TPB.z);
//dim3 Dg = dim3(20);
//dim3 Db = dim3(16,3,4);
size_t Ns = sizeof(float)*(
(TPB.z)*(TPB.y+1)*(TPB.x+1) +
(TPB.z+1)*(TPB.y)*(TPB.x+1) +
(TPB.z+1)*(TPB.y+1)*(TPB.x) );
printf("Threads per block: %d (%d,%d,%d)\n", TPB.x*TPB.y*TPB.z, TPB.x, TPB.y, TPB.z);
if ( TPB.x*TPB.y*TPB.z > 512 ) {
printf("Error: An excessive number of threads per block.\n");
exit(0);
}
printf("Blocks per grid: %d (%d,%d,%d)\n", BPG.x*BPG.y*BPG.z, BPG.x, BPG.y, BPG.z);
if ( BPG.x*BPG.y*BPG.z > 65535 ) {
printf("Error: An excessive number of blocks per grid.\n");
exit(0);
}
printf("Number of bytes in shared memory: %d\n", Ns);
int TPBsrc = N.x;
int BPGsrc = 1;
dim3 Dgsrc(BPGsrc);
dim3 Dbsrc(TPBsrc);
int Ntot = N.x*N.y*Nz_pitch;
int TPBinit = Nz_pitch;
int BPGinit = Ntot%TPBinit == 0 ? Ntot/TPBinit : Ntot/TPBinit + 1;
dim3 Dginit(BPGinit);
dim3 Dbinit(TPBinit);
// Initialize the device arrays
hipLaunchKernelGGL(( initArrays) , dim3(Dginit),dim3(Dbinit), 0, 0, N, Nz_pitch, devE, devH );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=500; tstep++) {
// Update on the GPU
hipLaunchKernelGGL(( updateE) , dim3(Dg),dim3(Db),Ns, 0, N, Nz_pitch, BPG, TPB, devE, devH, devCE );
hipLaunchKernelGGL(( updateSrc) , dim3(Dgsrc),dim3(Dbsrc), 0, 0, N, Nz_pitch, devE, tstep );
hipLaunchKernelGGL(( updateH) , dim3(Dg),dim3(Db),Ns, 0, N, Nz_pitch, BPG, TPB, devE, devH );
if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
hipMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, N.x*N.y, hipMemcpyDeviceToHost );
//print_array(N, Ex);
dumpToH5(N.x, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
|
f6ad66ad507fddc4d58c51bcfa7ec6704323a0c1.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
typedef struct N3 {
int x, y, z;
} N3;
typedef struct P3F3 {
float ***x, ***y, ***z;
} P3F3;
typedef struct P1F3 {
float *x, *y, *z;
} P1F3;
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(N3 N, float ***a) {
int j,k;
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
printf("%1.4f\t", a[N.x/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(N3 N) {
float ***f;
f = (float ***) calloc (N.x, sizeof(float **));
f[0] = (float **) calloc (N.y*N.x, sizeof(float *));
f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float));
for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y;
for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z;
return f;
}
__host__ void set_geometry(N3 N, P3F3 CE) {
int i,j,k;
for (i=0; i<N.x; i++) {
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
CE.x[i][j][k] = 0.5;
CE.y[i][j][k] = 0.5;
CE.z[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(N3 N, int Nzpit, P1F3 E, P1F3 H) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < N.x*N.y*Nzpit ) {
E.x[idx] = 0;
E.y[idx] = 0;
E.z[idx] = 0;
H.x[idx] = 0;
H.y[idx] = 0;
H.z[idx] = 0;
}
}
__global__ void updateE(N3 N, int Nzpit, N3 Dg, N3 Db, P1F3 E, P1F3 H, P1F3 CE) {
int bk;
int tk, tj, ti;
int i, j, k;
bk = blockIdx.x;
tk = threadIdx.x;
tj = threadIdx.y;
ti = threadIdx.z;
k = Db.x*( bk%Dg.x ) + tk;
j = Db.y*( (bk/Dg.x)%Dg.y ) + tj;
i = Db.z*( bk/(Dg.x*Dg.y) ) + ti;
//printf("bk(%d),\tbk%Dg.x (%d),\t(bk/Dg.x)%Dg.y (%d),\tbk/(Dg.x*Dg.y) (%d)\n", bk, bk%Dg.x, (bk/Dg.x)%Dg.y, bk/(Dg.x*Dg.y) );
//printf("blockIdx(%d),\tthreadIdx(%d,%d,%d),\tkji(%d,%d,%d)\n", bk, tk, tj, ti, k, j, i);
int Bx = Db.x+1;
int By = Db.x+1;
int Bz = Db.x;
int BPx = (Db.y+1)*(Db.x+1);
int BPy = (Db.y)*(Db.x+1);
int BPz = (Db.y+1)*(Db.x);
int Nyzpit = N.y*Nzpit;
int idx = k + Nzpit*j + Nyzpit*i;
extern __shared__ float hs[];
float* hx = (float*)hs;
float* hy = (float*)&hx[(Db.z)*BPx];
float* hz = (float*)&hy[(Db.z+1)*BPy];
if ( i<N.x && j<N.y && k<N.z ) {
//printf("(%d),\t(%d,%d,%d),\t(%d,%d,%d),\t%d\n", bk, tk, tj, ti, k, j, i, idx);
//__shared__ float hx[(TPBz)*(TPBy+1)*(TPBx+1)];
//__shared__ float hy[(TPBz+1)*(TPBy)*(TPBx+1)];
//__shared__ float hz[(TPBz+1)*(TPBy+1)*(TPBx)];
hx[tk + Bx*tj + BPx*ti] = H.x[idx];
hy[tk + By*tj + BPy*ti] = H.y[idx];
hz[tk + Bz*tj + BPz*ti] = H.z[idx];
if ( tk==Db.x-1 && k<N.z-1 ) {
hx[(tk+1) + Bx*tj + BPx*ti] = H.x[idx+1];
hy[(tk+1) + By*tj + BPy*ti] = H.y[idx+1];
}
if ( tj==Db.y-1 && j<N.y-1 ) {
hx[tk + Bx*(tj+1) + BPx*ti] = H.x[idx+Nzpit];
hz[tk + Bz*(tj+1) + BPz*ti] = H.z[idx+Nzpit];
}
if ( ti==Db.z-1 && i<N.x-1 ) {
hy[tk + By*tj + BPy*(ti+1)] = H.y[idx+Nyzpit];
hz[tk + Bz*tj + BPz*(ti+1)] = H.z[idx+Nyzpit];
}
}
__syncthreads();
if ( i<N.x && j<N.y && k<N.z ) {
if ( j<N.y-1 && k<N.z-1 )
E.x[idx] += CE.x[idx]*(
hz[tk + Bz*(tj+1) + BPz*ti]
- hz[tk + Bz*tj + BPz*ti]
- hy[(tk+1) + By*tj + BPy*ti]
+ hy[tk + By*tj + BPy*ti] );
if ( i<N.x-1 && k<N.z-1 )
E.y[idx] += CE.y[idx]*(
hx[(tk+1) + Bx*tj + BPx*ti]
- hx[tk + Bx*tj + BPx*ti]
- hz[tk + Bz*tj + BPz*(ti+1)]
+ hz[tk + Bz*tj + BPz*ti] );
if ( i<N.x-1 && j<N.y-1 )
E.z[idx] += CE.z[idx]*(
hy[tk + By*tj + BPy*(ti+1)]
- hy[tk + By*tj + BPy*ti]
- hx[tk + Bx*(tj+1) + BPx*ti]
+ hx[tk + Bx*tj + BPx*ti] );
}
}
__global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//E.x[ijk] += __sinf(0.1*tstep);
if ( idx < N.x ) {
E.x[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(N3 N, int Nzpit, N3 Dg, N3 Db, P1F3 E, P1F3 H) {
int bk;
int tk, tj, ti;
int i, j, k;
bk = blockIdx.x;
tk = threadIdx.x;
tj = threadIdx.y;
ti = threadIdx.z;
k = Db.x*( bk%Dg.x ) + tk;
j = Db.y*( (bk/Dg.x)%Dg.y ) + tj;
i = Db.z*( bk/(Dg.x*Dg.y) ) + ti;
int Bx = Db.x+1;
int By = Db.x+1;
int Bz = Db.x;
int BPx = (Db.y+1)*(Db.x+1);
int BPy = (Db.y)*(Db.x+1);
int BPz = (Db.y+1)*(Db.x);
int Nyzpit = N.y*Nzpit;
int idx = k + Nzpit*j + Nyzpit*i;
extern __shared__ float es[];
float* ex = (float*)es;
float* ey = (float*)&ex[(Db.z)*BPx];
float* ez = (float*)&ey[(Db.z+1)*BPy];
if ( i<N.x && j<N.y && k<N.z ) {
ex[(tk+1) + Bx*(tj+1) + BPx*ti] = E.x[idx];
ey[(tk+1) + By*tj + BPy*(ti+1)] = E.y[idx];
ez[tk + Bz*(tj+1) + BPz*(ti+1)] = E.z[idx];
if ( tk==0 && k>0 ) {
ex[Bx*(tj+1) + BPx*ti] = E.x[idx-1];
ey[By*tj + BPy*(ti+1)] = E.y[idx-1];
}
if ( tj==0 && j>0 ) {
ex[(tk+1) + BPx*ti] = E.x[idx-Nzpit];
ez[tk + BPz*(ti+1)] = E.z[idx-Nzpit];
}
if ( ti==0 && i>0 ) {
ey[(tk+1) + By*tj] = E.y[idx-Nyzpit];
ez[tk + Bz*(tj+1)] = E.z[idx-Nyzpit];
}
}
__syncthreads();
if ( i<N.x && j<N.y && k<N.z ) {
if ( j>0 && k>0 )
H.x[idx] -= 0.5*(
ez[tk + Bz*(tj+1) + BPz*(ti+1)]
- ez[tk + Bz*tj + BPz*(ti+1)]
- ey[(tk+1) + By*tj + BPy*(ti+1)]
+ ey[tk + By*tj + BPy*(ti+1)] );
if ( i>0 && k>0 )
H.y[idx] -= 0.5*(
ex[(tk+1) + Bx*(tj+1) + BPx*ti]
- ex[tk + Bx*(tj+1) + BPx*ti]
- ez[tk + Bz*(tj+1) + BPz*(ti+1)]
+ ez[tk + Bz*(tj+1) + BPz*ti] );
if ( i>0 && j>0 )
H.z[idx] -= 0.5*(
ey[(tk+1) + By*tj + BPy*(ti+1)]
- ey[(tk+1) + By*tj + BPy*ti]
- ex[(tk+1) + Bx*(tj+1) + BPx*ti]
+ ex[(tk+1) + Bx*tj + BPx*ti] );
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
N3 N;
N.x = 100;
N.y = 200;
N.z = 500;
//N.y = 16;
//N.z = 20;
int TMAX = 1000;
printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX);
// Allocate host memory
float ***Ex;
P3F3 CE;
Ex = makeArray(N);
CE.x = makeArray(N);
CE.y = makeArray(N);
CE.z = makeArray(N);
// Geometry
set_geometry(N, CE);
// Allocate device memory
P1F3 devE;
P1F3 devH;
P1F3 devCE;
int z_size = N.z*sizeof(float);
size_t pitch;
cudaMallocPitch ( (void**) &devE.x, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devE.y, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devE.z, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devH.x, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devH.y, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devH.z, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y );
cudaMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y );
// Copy arrays from host to device
cudaMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Set the GPU parameters
N3 TPB; // Number of threads per block
TPB.x = 16;
TPB.y = 4;
TPB.z = 4;
N3 BPG; // Number of thread blocks per grid
BPG.x = Nz_pitch/TPB.x;
BPG.y = N.y%TPB.y == 0 ? N.y/TPB.y : N.y/TPB.y + 1;
BPG.z = N.x%TPB.z == 0 ? N.x/TPB.z : N.x/TPB.z + 1;
dim3 Dg = dim3(BPG.x*BPG.y*BPG.z);
dim3 Db = dim3(TPB.x, TPB.y, TPB.z);
//dim3 Dg = dim3(20);
//dim3 Db = dim3(16,3,4);
size_t Ns = sizeof(float)*(
(TPB.z)*(TPB.y+1)*(TPB.x+1) +
(TPB.z+1)*(TPB.y)*(TPB.x+1) +
(TPB.z+1)*(TPB.y+1)*(TPB.x) );
printf("Threads per block: %d (%d,%d,%d)\n", TPB.x*TPB.y*TPB.z, TPB.x, TPB.y, TPB.z);
if ( TPB.x*TPB.y*TPB.z > 512 ) {
printf("Error: An excessive number of threads per block.\n");
exit(0);
}
printf("Blocks per grid: %d (%d,%d,%d)\n", BPG.x*BPG.y*BPG.z, BPG.x, BPG.y, BPG.z);
if ( BPG.x*BPG.y*BPG.z > 65535 ) {
printf("Error: An excessive number of blocks per grid.\n");
exit(0);
}
printf("Number of bytes in shared memory: %d\n", Ns);
int TPBsrc = N.x;
int BPGsrc = 1;
dim3 Dgsrc(BPGsrc);
dim3 Dbsrc(TPBsrc);
int Ntot = N.x*N.y*Nz_pitch;
int TPBinit = Nz_pitch;
int BPGinit = Ntot%TPBinit == 0 ? Ntot/TPBinit : Ntot/TPBinit + 1;
dim3 Dginit(BPGinit);
dim3 Dbinit(TPBinit);
// Initialize the device arrays
initArrays <<<Dginit,Dbinit>>> ( N, Nz_pitch, devE, devH );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=500; tstep++) {
// Update on the GPU
updateE <<<Dg,Db,Ns>>> ( N, Nz_pitch, BPG, TPB, devE, devH, devCE );
updateSrc <<<Dgsrc,Dbsrc>>> ( N, Nz_pitch, devE, tstep );
updateH <<<Dg,Db,Ns>>> ( N, Nz_pitch, BPG, TPB, devE, devH );
if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
cudaMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, N.x*N.y, cudaMemcpyDeviceToHost );
//print_array(N, Ex);
dumpToH5(N.x, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
|
ff497642cc57e79766f8ab9a2d5167073b7042bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, bool kUpper>
__global__ void
_SetEye(const int nthreads, const int M, const int N, const int k, T* y) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int i = index % M;
if (kUpper) {
const int j = i + k;
y[index * N + min(j, N - 1)] =
j < N ? convert::To<T>(1.f) : convert::To<T>(0.f);
} else {
const int j = i - k;
y[index * N + max(j, 0)] =
j < 0 ? convert::To<T>(0.f) : convert::To<T>(1.f);
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void SetEye<T, CUDAContext>( \
const int batch_size, \
const int M, \
const int N, \
const int k, \
T* y, \
CUDAContext* ctx) { \
const auto nthreads = batch_size * M; \
math::Set(nthreads* N, convert::To<T>(0.f), y, ctx); \
if (k > 0) { \
hipLaunchKernelGGL(( _SetEye<T, true>) \
, dim3(CUDA_BLOCKS(nthreads)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
nthreads, M, N, k, y); \
} else { \
hipLaunchKernelGGL(( _SetEye<T, false>) \
, dim3(CUDA_BLOCKS(nthreads)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
nthreads, M, N, -k, y); \
} \
}
DEFINE_KERNEL_LAUNCHER(bool);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
|
ff497642cc57e79766f8ab9a2d5167073b7042bd.cu
|
#ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, bool kUpper>
__global__ void
_SetEye(const int nthreads, const int M, const int N, const int k, T* y) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int i = index % M;
if (kUpper) {
const int j = i + k;
y[index * N + min(j, N - 1)] =
j < N ? convert::To<T>(1.f) : convert::To<T>(0.f);
} else {
const int j = i - k;
y[index * N + max(j, 0)] =
j < 0 ? convert::To<T>(0.f) : convert::To<T>(1.f);
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void SetEye<T, CUDAContext>( \
const int batch_size, \
const int M, \
const int N, \
const int k, \
T* y, \
CUDAContext* ctx) { \
const auto nthreads = batch_size * M; \
math::Set(nthreads* N, convert::To<T>(0.f), y, ctx); \
if (k > 0) { \
_SetEye<T, true> \
<<<CUDA_BLOCKS(nthreads), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
nthreads, M, N, k, y); \
} else { \
_SetEye<T, false> \
<<<CUDA_BLOCKS(nthreads), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
nthreads, M, N, -k, y); \
} \
}
DEFINE_KERNEL_LAUNCHER(bool);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
17ccd9b150d9bee7e960f3da90670a1e55676af0.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
17ccd9b150d9bee7e960f3da90670a1e55676af0.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
a6c845a12cf6e12eaba750202de71145fadfb543.hip
|
// !!! This is a file automatically generated by hipify!!!
vecmultKernel00.o : vecmultKernel00.cu \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_runtime.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/host_config.h \
/usr/include/features.h \
/usr/include/sys/cdefs.h \
/usr/include/bits/wordsize.h \
/usr/include/gnu/stubs.h \
/usr/include/gnu/stubs-64.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/builtin_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/device_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/host_defines.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/driver_types.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/limits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/syslimits.h \
/usr/include/limits.h \
/usr/include/bits/posix1_lim.h \
/usr/include/bits/local_lim.h \
/usr/include/linux/limits.h \
/usr/include/bits/posix2_lim.h \
/usr/include/bits/xopen_lim.h \
/usr/include/bits/stdio_lim.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/stddef.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/hip_vector_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/channel_descriptor.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/hip_runtime_api.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_device_runtime_api.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/driver_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/vector_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/common_functions.h \
/usr/include/string.h \
/usr/include/xlocale.h \
/usr/include/time.h \
/usr/include/bits/time.h \
/usr/include/bits/types.h \
/usr/include/bits/typesizes.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/new \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cstddef \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/c++config.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/os_defines.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/cpu_defines.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/exception \
/usr/include/stdio.h \
/usr/include/libio.h \
/usr/include/_G_config.h \
/usr/include/wchar.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/stdarg.h \
/usr/include/bits/sys_errlist.h \
/usr/include/bits/stdio.h \
/usr/include/stdlib.h \
/usr/include/bits/waitflags.h \
/usr/include/bits/waitstatus.h \
/usr/include/endian.h \
/usr/include/bits/endian.h \
/usr/include/bits/byteswap.h \
/usr/include/sys/types.h \
/usr/include/sys/select.h \
/usr/include/bits/select.h \
/usr/include/bits/sigset.h \
/usr/include/sys/sysmacros.h \
/usr/include/bits/pthreadtypes.h \
/usr/include/alloca.h \
/usr/include/assert.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/math_functions.h \
/usr/include/math.h \
/usr/include/bits/huge_val.h \
/usr/include/bits/huge_valf.h \
/usr/include/bits/huge_vall.h \
/usr/include/bits/inf.h \
/usr/include/bits/nan.h \
/usr/include/bits/mathdef.h \
/usr/include/bits/mathcalls.h \
/usr/include/bits/mathinline.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cmath \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/bits/cpp_type_traits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/ext/type_traits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/bits/cmath.tcc \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cstdlib \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/math_functions_dbl_ptx3.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_surface_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/hip_texture_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/hip/device_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_11_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_12_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_13_double_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_20_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_32_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_35_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_20_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_30_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_32_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_35_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_fetch_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_indirect_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_indirect_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/device_launch_parameters.h \
vecmultKernel.h
|
a6c845a12cf6e12eaba750202de71145fadfb543.cu
|
vecmultKernel00.o : vecmultKernel00.cu \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_runtime.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/host_config.h \
/usr/include/features.h \
/usr/include/sys/cdefs.h \
/usr/include/bits/wordsize.h \
/usr/include/gnu/stubs.h \
/usr/include/gnu/stubs-64.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/builtin_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/device_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/host_defines.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/driver_types.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/limits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/syslimits.h \
/usr/include/limits.h \
/usr/include/bits/posix1_lim.h \
/usr/include/bits/local_lim.h \
/usr/include/linux/limits.h \
/usr/include/bits/posix2_lim.h \
/usr/include/bits/xopen_lim.h \
/usr/include/bits/stdio_lim.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/stddef.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/vector_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/channel_descriptor.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_runtime_api.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_device_runtime_api.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/driver_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/vector_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/common_functions.h \
/usr/include/string.h \
/usr/include/xlocale.h \
/usr/include/time.h \
/usr/include/bits/time.h \
/usr/include/bits/types.h \
/usr/include/bits/typesizes.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/new \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cstddef \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/c++config.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/os_defines.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/x86_64-redhat-linux/bits/cpu_defines.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/exception \
/usr/include/stdio.h \
/usr/include/libio.h \
/usr/include/_G_config.h \
/usr/include/wchar.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/include/stdarg.h \
/usr/include/bits/sys_errlist.h \
/usr/include/bits/stdio.h \
/usr/include/stdlib.h \
/usr/include/bits/waitflags.h \
/usr/include/bits/waitstatus.h \
/usr/include/endian.h \
/usr/include/bits/endian.h \
/usr/include/bits/byteswap.h \
/usr/include/sys/types.h \
/usr/include/sys/select.h \
/usr/include/bits/select.h \
/usr/include/bits/sigset.h \
/usr/include/sys/sysmacros.h \
/usr/include/bits/pthreadtypes.h \
/usr/include/alloca.h \
/usr/include/assert.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/math_functions.h \
/usr/include/math.h \
/usr/include/bits/huge_val.h \
/usr/include/bits/huge_valf.h \
/usr/include/bits/huge_vall.h \
/usr/include/bits/inf.h \
/usr/include/bits/nan.h \
/usr/include/bits/mathdef.h \
/usr/include/bits/mathcalls.h \
/usr/include/bits/mathinline.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cmath \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/bits/cpp_type_traits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/ext/type_traits.h \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/bits/cmath.tcc \
/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../include/c++/4.4.7/cstdlib \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/math_functions_dbl_ptx3.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_surface_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/cuda_texture_types.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/device_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_11_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_12_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_13_double_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_20_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_32_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_35_atomic_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_20_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_30_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_32_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/sm_35_intrinsics.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_fetch_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/texture_indirect_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/surface_indirect_functions.h \
/s/chopin/e/proj/AlphaZ/waruna/cuda-6.0/installation/bin/..//include/device_launch_parameters.h \
vecmultKernel.h
|
363bc432d4b5bafa650fc76588cabad9ab0cddde.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define num_threads 128
#define gemv_bs 32
#define threadSize 128
__global__ void
sgemvn_kernel1_fermi(
int m, int n, int n1, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
float res = 0.f;
for( int i=0; i < n1; i += gemv_bs ) {
#pragma unroll
for(int j=0; j < gemv_bs; j++) {
res += A[0] * x[j];
A += lda;
}
x += gemv_bs;
}
if ( n > n1 ) {
for(int j=0; j < (n-n1); j++) {
res += A[0] * x[j];
A += lda;
}
}
if ( ind < m )
y[ind] = alpha * res + beta * y[ind];
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
sgemvn_kernel2_fermi(
int m, int n, int n1, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
x += threadIdx.x;
float res = 0.f;
__shared__ float buff[num_threads];
for( int i=0; i < n1; i += num_threads ) {
__syncthreads();
buff[threadIdx.x] = x[i];
__syncthreads();
#pragma unroll
for(int j=0; j < num_threads; j++) {
res += A[0]*buff[j];
A += lda;
}
}
__syncthreads();
if ( n > n1 ) {
buff[threadIdx.x] = x[n1];
__syncthreads();
for(int j=0; j<(n-n1); j++) {
res += A[0]*buff[j];
A += lda;
}
}
if ( ind < m )
y[ind] = alpha * res + beta * y[ind];
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C" void
magmablas_sgemvn_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes Y = alpha A x on the GPU.
M (input) INTEGER.
On entry, M specifies the number of rows of the matrix A.
N (input) INTEGER.
On entry, N specifies the number of columns of the matrix A
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER.
LDA specifies the leading dimension of A.
X (input) REAL array of dimension n.
Y (output) REAL array of dimension n.
On exit Y = alpha A X.
===================================================================== */
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
if ( m <= 8500 )
hipLaunchKernelGGL(( sgemvn_kernel1_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, (n / gemv_bs)*gemv_bs, alpha, A, lda, x, beta, y);
else
hipLaunchKernelGGL(( sgemvn_kernel2_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, (n / num_threads)*num_threads, alpha, A, lda, x, beta, y);
}
__global__ void
sgemvt_kernel1_fermi(
int m, int n, float alpha, int m1,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[threadSize];
volatile float *smem;
float res;
res = 0.0f;
for(int i=0; i < m1; i += threadSize) {
res += A[tx + i + lda * blockIdx.y] * x[tx + i];
}
if ( m > m1 ) {
if ( tx + m1 < m ) {
res += A[tx + m1 + lda*blockIdx.y] * x[tx + m1];
}
else {
res += 0.0f;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s > 32; s /= 2) {
if ( tx < s ) {
sdata[tx] += sdata[tx + s];
}
__syncthreads();
}
if ( tx < 32 ) {
smem = sdata;
smem[tx] += smem[tx + 32];
smem[tx] += smem[tx + 16];
smem[tx] += smem[tx + 8];
smem[tx] += smem[tx + 4];
smem[tx] += smem[tx + 2];
smem[tx] += smem[tx + 1];
}
if ( tx == 0 ) {
if ( blockIdx.y < n ) {
y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y];
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
sgemvt_kernel2_fermi(
int m, int n, float alpha, int n1,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
const int inx = threadIdx.x;
const int iny = threadIdx.y;
int ind = iny + blockIdx.x * 16;
ind = inx + ind * lda;
int ind2 = inx + iny * 16;
if ( ind2 > 31 )
ind2 -= 32;
A += ind;
x += ind2;
float res = 0.f;
__shared__ float buff[32];
__shared__ float la[16][17];
for( int i=0; i < n1; i += 32 ) {
buff[ind2] = x[i];
#pragma unroll
for(int j=0; j < 4; j++)
la[iny + j * 4][inx] = A[j* 4 * lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[inx][iny*4+j]*buff[j+iny*4];
A += 16;
__syncthreads();
//===========================================
#pragma unroll
for(int j=0; j < 4; j++)
la[iny+ j * 4][inx] = A[j* 4 * lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[inx][iny*4+j]*buff[j+16+iny*4];
A += 16;
}
__syncthreads(); // 1
if ( n > n1 ) {
if ( ind2 >= (n-n1) )
buff[ind2]=0.;
else
buff[ind2] = x[n1];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
if ( inx >= (n-n1) )
la[iny + j * 4][inx] = 0.f;
else
la[iny + j * 4][inx] = A[j* 4 * lda];
__syncthreads();
if ( n-n1 > 4 ) {
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+iny*4;
res += la[inx][ind]*buff[ind];
}
A += 16;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
if ( inx+16>=(n-n1) )
la[iny+ j * 4][inx] = 0.f;
else
la[iny+ j * 4][inx] = A[j* 4* lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+4*iny;
res += la[inx][ind]*buff[16+ind];
}
}
else {
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+iny*4;
res += la[inx][ind]*buff[ind];
}
}
}
__syncthreads();
ind = inx + blockIdx.x * 16;
la[inx][iny] = res;
__syncthreads();
if ( ind < n && iny == 0 ) {
res = la[inx][0] + la[inx][1] + la[inx][2] + la[inx][3];
y[ind] = alpha*res + beta * y[ind];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C" void
magmablas_sgemvt1_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
dim3 grid ( 1, n, 1 );
dim3 threads ( threadSize, 1, 1 );
hipLaunchKernelGGL(( sgemvt_kernel1_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, (m / threadSize)*threadSize, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemvt2_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
magma_int_t blocks = (n - 1)/16 + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(16, 4, 1);
hipLaunchKernelGGL(( sgemvt_kernel2_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, (m / 32)*32, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemvt_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes y = alpha * A^T * x on the GPU.
M (input) INTEGER.
On entry, M specifies the number of rows of the matrix A.
N (input) INTEGER.
On entry, N specifies the number of columns of the matrix A
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER.
LDA specifies the leading dimension of A.
X (input) REAL array of dimension m.
Y (output) REAL array of dimension n.
On exit Y = alpha A^T X.
===================================================================== */
magmablas_sgemvt1_fermi(m, n, alpha, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemv(
char trans, magma_int_t m, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes:
1) y = A x if trans == 'N' or 'n', alpha == 1, beta == 0,
and incx == incy == 1 (using magmablas code)
2) y = alpha A^T x if trans == 'T' or 't', beta == 0,
and incx == incy == 1 (using magmablas code)
3) y = alpha A^trans x + beta y
otherwise, using CUBLAS.
Arguments
==========
TRANS CHARACTER*1
On entry, TRANS specifies the operation to be performed as
follows:
TRANS = 'N' or 'n' y := alpha*A *x + beta*y
TRANS = 'T' or 't' y := alpha*A^T*x + beta*y
M (input) INTEGER
On entry, m specifies the number of rows of the matrix A.
N (input) INTEGER
On entry, n specifies the number of columns of the matrix A
ALPHA REAL
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER
LDA specifies the leading dimension of A.
X (input) REAL array of dimension
n if trans == 'n'
m if trans == 't'
INCX (input) Specifies the increment for the elements of X.
INCX must not be zero. Unchanged on exit.
BETA REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y (output) REAL array of dimension
m if trans == 'n'
n if trans == 't'
INCY (input) Specifies the increment for the elements of Y.
INCY must not be zero. Unchanged on exit.
===================================================================== */
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
hipblasSgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_sgemv_tesla( trans, m, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( incx == 1 && incy == 1 ) {
if ( trans == 'n' || trans == 'N' )
magmablas_sgemvn_fermi(m, n, alpha, A, lda, x, beta, y);
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
magmablas_sgemvt_fermi(m, n, alpha, A, lda, x, beta, y);
else
fprintf( stderr, "trans = %c is invalid\n", trans );
}
else {
hipblasSgemv(trans, m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
#undef num_threads
#undef gemv_bs
#undef threadSize
|
363bc432d4b5bafa650fc76588cabad9ab0cddde.cu
|
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define num_threads 128
#define gemv_bs 32
#define threadSize 128
__global__ void
sgemvn_kernel1_fermi(
int m, int n, int n1, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
float res = 0.f;
for( int i=0; i < n1; i += gemv_bs ) {
#pragma unroll
for(int j=0; j < gemv_bs; j++) {
res += A[0] * x[j];
A += lda;
}
x += gemv_bs;
}
if ( n > n1 ) {
for(int j=0; j < (n-n1); j++) {
res += A[0] * x[j];
A += lda;
}
}
if ( ind < m )
y[ind] = alpha * res + beta * y[ind];
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
sgemvn_kernel2_fermi(
int m, int n, int n1, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
x += threadIdx.x;
float res = 0.f;
__shared__ float buff[num_threads];
for( int i=0; i < n1; i += num_threads ) {
__syncthreads();
buff[threadIdx.x] = x[i];
__syncthreads();
#pragma unroll
for(int j=0; j < num_threads; j++) {
res += A[0]*buff[j];
A += lda;
}
}
__syncthreads();
if ( n > n1 ) {
buff[threadIdx.x] = x[n1];
__syncthreads();
for(int j=0; j<(n-n1); j++) {
res += A[0]*buff[j];
A += lda;
}
}
if ( ind < m )
y[ind] = alpha * res + beta * y[ind];
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C" void
magmablas_sgemvn_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes Y = alpha A x on the GPU.
M (input) INTEGER.
On entry, M specifies the number of rows of the matrix A.
N (input) INTEGER.
On entry, N specifies the number of columns of the matrix A
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER.
LDA specifies the leading dimension of A.
X (input) REAL array of dimension n.
Y (output) REAL array of dimension n.
On exit Y = alpha A X.
===================================================================== */
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
if ( m <= 8500 )
sgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>>
(m, n, (n / gemv_bs)*gemv_bs, alpha, A, lda, x, beta, y);
else
sgemvn_kernel2_fermi<<< grid, threads, 0, magma_stream >>>
(m, n, (n / num_threads)*num_threads, alpha, A, lda, x, beta, y);
}
__global__ void
sgemvt_kernel1_fermi(
int m, int n, float alpha, int m1,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[threadSize];
volatile float *smem;
float res;
res = 0.0f;
for(int i=0; i < m1; i += threadSize) {
res += A[tx + i + lda * blockIdx.y] * x[tx + i];
}
if ( m > m1 ) {
if ( tx + m1 < m ) {
res += A[tx + m1 + lda*blockIdx.y] * x[tx + m1];
}
else {
res += 0.0f;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s > 32; s /= 2) {
if ( tx < s ) {
sdata[tx] += sdata[tx + s];
}
__syncthreads();
}
if ( tx < 32 ) {
smem = sdata;
smem[tx] += smem[tx + 32];
smem[tx] += smem[tx + 16];
smem[tx] += smem[tx + 8];
smem[tx] += smem[tx + 4];
smem[tx] += smem[tx + 2];
smem[tx] += smem[tx + 1];
}
if ( tx == 0 ) {
if ( blockIdx.y < n ) {
y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y];
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
sgemvt_kernel2_fermi(
int m, int n, float alpha, int n1,
const float * __restrict__ A, int lda,
const float * __restrict__ x, float beta,
float * __restrict__ y)
{
#if (__CUDA_ARCH__ >= 200)
const int inx = threadIdx.x;
const int iny = threadIdx.y;
int ind = iny + blockIdx.x * 16;
ind = inx + ind * lda;
int ind2 = inx + iny * 16;
if ( ind2 > 31 )
ind2 -= 32;
A += ind;
x += ind2;
float res = 0.f;
__shared__ float buff[32];
__shared__ float la[16][17];
for( int i=0; i < n1; i += 32 ) {
buff[ind2] = x[i];
#pragma unroll
for(int j=0; j < 4; j++)
la[iny + j * 4][inx] = A[j* 4 * lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[inx][iny*4+j]*buff[j+iny*4];
A += 16;
__syncthreads();
//===========================================
#pragma unroll
for(int j=0; j < 4; j++)
la[iny+ j * 4][inx] = A[j* 4 * lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[inx][iny*4+j]*buff[j+16+iny*4];
A += 16;
}
__syncthreads(); // 1
if ( n > n1 ) {
if ( ind2 >= (n-n1) )
buff[ind2]=0.;
else
buff[ind2] = x[n1];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
if ( inx >= (n-n1) )
la[iny + j * 4][inx] = 0.f;
else
la[iny + j * 4][inx] = A[j* 4 * lda];
__syncthreads();
if ( n-n1 > 4 ) {
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+iny*4;
res += la[inx][ind]*buff[ind];
}
A += 16;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
if ( inx+16>=(n-n1) )
la[iny+ j * 4][inx] = 0.f;
else
la[iny+ j * 4][inx] = A[j* 4* lda];
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+4*iny;
res += la[inx][ind]*buff[16+ind];
}
}
else {
#pragma unroll
for(int j=0; j < 4; j++) {
ind = j+iny*4;
res += la[inx][ind]*buff[ind];
}
}
}
__syncthreads();
ind = inx + blockIdx.x * 16;
la[inx][iny] = res;
__syncthreads();
if ( ind < n && iny == 0 ) {
res = la[inx][0] + la[inx][1] + la[inx][2] + la[inx][3];
y[ind] = alpha*res + beta * y[ind];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C" void
magmablas_sgemvt1_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
dim3 grid ( 1, n, 1 );
dim3 threads ( threadSize, 1, 1 );
sgemvt_kernel1_fermi<<< grid, threads, 0, magma_stream >>>
(m, n, alpha, (m / threadSize)*threadSize, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemvt2_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
magma_int_t blocks = (n - 1)/16 + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(16, 4, 1);
sgemvt_kernel2_fermi<<< grid, threads, 0, magma_stream >>>
(m, n, alpha, (m / 32)*32, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemvt_fermi(
magma_int_t m, magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, float beta,
float *y)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes y = alpha * A^T * x on the GPU.
M (input) INTEGER.
On entry, M specifies the number of rows of the matrix A.
N (input) INTEGER.
On entry, N specifies the number of columns of the matrix A
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER.
LDA specifies the leading dimension of A.
X (input) REAL array of dimension m.
Y (output) REAL array of dimension n.
On exit Y = alpha A^T X.
===================================================================== */
magmablas_sgemvt1_fermi(m, n, alpha, A, lda, x, beta, y);
}
extern "C" void
magmablas_sgemv(
char trans, magma_int_t m, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy)
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This routine computes:
1) y = A x if trans == 'N' or 'n', alpha == 1, beta == 0,
and incx == incy == 1 (using magmablas code)
2) y = alpha A^T x if trans == 'T' or 't', beta == 0,
and incx == incy == 1 (using magmablas code)
3) y = alpha A^trans x + beta y
otherwise, using CUBLAS.
Arguments
==========
TRANS CHARACTER*1
On entry, TRANS specifies the operation to be performed as
follows:
TRANS = 'N' or 'n' y := alpha*A *x + beta*y
TRANS = 'T' or 't' y := alpha*A^T*x + beta*y
M (input) INTEGER
On entry, m specifies the number of rows of the matrix A.
N (input) INTEGER
On entry, n specifies the number of columns of the matrix A
ALPHA REAL
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A (input) REAL array of dimension ( LDA, n ) on the GPU.
LDA (input) INTEGER
LDA specifies the leading dimension of A.
X (input) REAL array of dimension
n if trans == 'n'
m if trans == 't'
INCX (input) Specifies the increment for the elements of X.
INCX must not be zero. Unchanged on exit.
BETA REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y (output) REAL array of dimension
m if trans == 'n'
n if trans == 't'
INCY (input) Specifies the increment for the elements of Y.
INCY must not be zero. Unchanged on exit.
===================================================================== */
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
cublasSgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_sgemv_tesla( trans, m, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( incx == 1 && incy == 1 ) {
if ( trans == 'n' || trans == 'N' )
magmablas_sgemvn_fermi(m, n, alpha, A, lda, x, beta, y);
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
magmablas_sgemvt_fermi(m, n, alpha, A, lda, x, beta, y);
else
fprintf( stderr, "trans = %c is invalid\n", trans );
}
else {
cublasSgemv(trans, m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
#undef num_threads
#undef gemv_bs
#undef threadSize
|
4e58292f20ca94df2d7b2c8a282e6efc65eac2b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_top;
int xdim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_top;
int ydim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_top;
int xdim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_top;
int ydim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_top * \
ydim0_update_halo_kernel2_xvel_plus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_top * \
ydim1_update_halo_kernel2_xvel_plus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_top_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top *
ydim0_update_halo_kernel2_xvel_plus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top *
ydim1_update_halo_kernel2_xvel_plus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 25))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_4_top");
OPS_kernels[25].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_top_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_top_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_top_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_top_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_4_top), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[25].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 25;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 25;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
4e58292f20ca94df2d7b2c8a282e6efc65eac2b5.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_top;
int xdim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_top;
int ydim0_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_top;
int xdim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_top;
int ydim1_update_halo_kernel2_xvel_plus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_top * \
ydim0_update_halo_kernel2_xvel_plus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_top * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_top * \
ydim1_update_halo_kernel2_xvel_plus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_top_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_top *
ydim0_update_halo_kernel2_xvel_plus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_top *
ydim1_update_halo_kernel2_xvel_plus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 25))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_4_top");
OPS_kernels[25].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_top_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_top_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_top_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_top_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_4_top<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[25].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 25;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 25;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
4b9019b96a6ffea71223e8287e7571d84027c7ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpu.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define IS_BETWEEN(value, lower, higher) (lower <= value && value < higher)
struct Boid {
float x,y;
#ifdef THIRDAXIS
float z;
#endif
float xVel, yVel;
#ifdef THIRDAXIS
float zVel;
#endif
} Boid;
uint32_t * gpuAlloc(int w, int h) {
uint32_t* gpu_mem;
hipError_t err = hipMalloc(&gpu_mem, w * h * sizeof(int));
if ( err != hipSuccess ) return NULL;
return gpu_mem;
};
void checkError()
{
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
void gpuFree(void* gpu_mem) {
hipFree(gpu_mem);
}
int gpuUp(void* src, void* dst,int size){
hipError_t err = hipMemcpy(dst, src, size , hipMemcpyHostToDevice);
if ( err != hipSuccess ) return 1;
return 0;
}
int gpuBlit(void* src, void* dst, int size){
hipError_t err = hipMemcpy(dst, src, size , hipMemcpyDeviceToHost);
if ( err != hipSuccess ) return 1;
return 0;
}
// ----- i
__host__
__device__
uint32_t getPixColor(int x, int y) {
return 0xFF000000 + x + y;
}
__device__ void extractRGB_kernel(uint32_t pixel,uint32_t & a, uint32_t &r, uint32_t &g, uint32_t&b)
{
a = 0xFF & (pixel >> 24);
r = 0xFF & (pixel >> 16);
g = 0xFF & (pixel >> 8 );
b = 0xFF & pixel ;
}
__device__ void infuseRGB_kernel(uint32_t& poxil, char a, char r, char g, char b)
{
uint32_t a32 = (uint32_t) a;
uint32_t r32 = (uint32_t) r;
uint32_t g32 = (uint32_t) g;
uint32_t b32 = (uint32_t) b;
poxil =
(a32 << 24) & 0xFF000000 |
(r32 << 16) & 0x00FF0000 |
(g32 << 8) & 0x0000FF00 |
(b32 ) & 0x000000FF;
}
__device__ void normalizeVelocity(Boid& boid)
{
float magnitude = boid.xVel * boid.xVel;
magnitude += boid.yVel * boid.yVel;
#ifdef THIRDAXIS
magnitude += boid.zVel * boid.zVel;
#endif
float fixer = __frsqrt_rn(magnitude);
boid.xVel *= fixer;
boid.yVel *= fixer;
#ifdef THIRDAXIS
boid.zVel *= fixer;
#endif
}
__device__ void applyVelocity(Boid& boid, float scalar)
{
boid.x += boid.xVel * scalar;
boid.y += boid.yVel * scalar;
#ifdef THIRDAXIS
boid.z += boid.zVel * scalar;
#endif
}
__global__ void handleBoids(Boid* boids,int w, int h) {
const int xPix = blockDim.x * blockIdx.x + threadIdx.x;
const int yPix = blockDim.y * blockIdx.y + threadIdx.y;
__syncthreads();
}
void gpuRender(uint32_t* buf, int w, int h) {
checkError();
printf("The output is %d by %d\n",w,h);
int gridw = 1 + (w / TILE_WIDTH);
int gridh = 1 + (h / TILE_HEIGHT);
printf("Grid (w,h): (%d,%d)\n",gridw,gridh);
checkError();
printf("Readying dims\n");
checkError();
const dim3 blocksPerGrid(gridw,gridh);
printf("Tiles are %d by %d\n",TILE_WIDTH , TILE_HEIGHT);
const dim3 threadsPerBlock(TILE_WIDTH, TILE_HEIGHT);
checkError();
printf("For real\n");
printf("The image is %d by %d",w,h);
//my_kernel<<<blocksPerGrid, threadsPerBlock>>>(buf,w,h);
checkError();
hipDeviceSynchronize();
printf("Done\n");
}
|
4b9019b96a6ffea71223e8287e7571d84027c7ef.cu
|
#include "gpu.h"
#include <cuda.h>
#include <stdio.h>
#define IS_BETWEEN(value, lower, higher) (lower <= value && value < higher)
struct Boid {
float x,y;
#ifdef THIRDAXIS
float z;
#endif
float xVel, yVel;
#ifdef THIRDAXIS
float zVel;
#endif
} Boid;
uint32_t * gpuAlloc(int w, int h) {
uint32_t* gpu_mem;
cudaError_t err = cudaMalloc(&gpu_mem, w * h * sizeof(int));
if ( err != cudaSuccess ) return NULL;
return gpu_mem;
};
void checkError()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
void gpuFree(void* gpu_mem) {
cudaFree(gpu_mem);
}
int gpuUp(void* src, void* dst,int size){
cudaError_t err = cudaMemcpy(dst, src, size , cudaMemcpyHostToDevice);
if ( err != cudaSuccess ) return 1;
return 0;
}
int gpuBlit(void* src, void* dst, int size){
cudaError_t err = cudaMemcpy(dst, src, size , cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) return 1;
return 0;
}
// ----- i
__host__
__device__
uint32_t getPixColor(int x, int y) {
return 0xFF000000 + x + y;
}
__device__ void extractRGB_kernel(uint32_t pixel,uint32_t & a, uint32_t &r, uint32_t &g, uint32_t&b)
{
a = 0xFF & (pixel >> 24);
r = 0xFF & (pixel >> 16);
g = 0xFF & (pixel >> 8 );
b = 0xFF & pixel ;
}
__device__ void infuseRGB_kernel(uint32_t& poxil, char a, char r, char g, char b)
{
uint32_t a32 = (uint32_t) a;
uint32_t r32 = (uint32_t) r;
uint32_t g32 = (uint32_t) g;
uint32_t b32 = (uint32_t) b;
poxil =
(a32 << 24) & 0xFF000000 |
(r32 << 16) & 0x00FF0000 |
(g32 << 8) & 0x0000FF00 |
(b32 ) & 0x000000FF;
}
__device__ void normalizeVelocity(Boid& boid)
{
float magnitude = boid.xVel * boid.xVel;
magnitude += boid.yVel * boid.yVel;
#ifdef THIRDAXIS
magnitude += boid.zVel * boid.zVel;
#endif
float fixer = __frsqrt_rn(magnitude);
boid.xVel *= fixer;
boid.yVel *= fixer;
#ifdef THIRDAXIS
boid.zVel *= fixer;
#endif
}
__device__ void applyVelocity(Boid& boid, float scalar)
{
boid.x += boid.xVel * scalar;
boid.y += boid.yVel * scalar;
#ifdef THIRDAXIS
boid.z += boid.zVel * scalar;
#endif
}
__global__ void handleBoids(Boid* boids,int w, int h) {
const int xPix = blockDim.x * blockIdx.x + threadIdx.x;
const int yPix = blockDim.y * blockIdx.y + threadIdx.y;
__syncthreads();
}
void gpuRender(uint32_t* buf, int w, int h) {
checkError();
printf("The output is %d by %d\n",w,h);
int gridw = 1 + (w / TILE_WIDTH);
int gridh = 1 + (h / TILE_HEIGHT);
printf("Grid (w,h): (%d,%d)\n",gridw,gridh);
checkError();
printf("Readying dims\n");
checkError();
const dim3 blocksPerGrid(gridw,gridh);
printf("Tiles are %d by %d\n",TILE_WIDTH , TILE_HEIGHT);
const dim3 threadsPerBlock(TILE_WIDTH, TILE_HEIGHT);
checkError();
printf("For real\n");
printf("The image is %d by %d",w,h);
//my_kernel<<<blocksPerGrid, threadsPerBlock>>>(buf,w,h);
checkError();
cudaDeviceSynchronize();
printf("Done\n");
}
|
84eafc22890f0143d3c16afa8a25ee1f591435ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "rocblas.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CHECK 1
#define DATA_TYPE double
#define BLOCK_NUM 1
#define THREAD_NUM 1
#define MATRIX_NUM 10
#define MATRIX_SIZE 4
void stopwatch(int);
void cublas_error();
void pp(int p)
{printf("------------ %d-------------\n",p);}
void mat_out(DATA_TYPE**);
int main()
{
printf("BLOCK_NUM :%d\nTHREAD_NUM : %d\nMATRIX_NUM : %d\nMATRIX_SIZE : (%d)X(%d)\n",BLOCK_NUM,THREAD_NUM,MATRIX_NUM,MATRIX_SIZE,MATRIX_SIZE);
//host matrix array
DATA_TYPE **h_a,**h_b,**h_c;
//device matrix array
//2 **
// ** device cudaMalloc
DATA_TYPE *d_a[MATRIX_NUM],*d_b[MATRIX_NUM];
DATA_TYPE *d_c[MATRIX_NUM];
DATA_TYPE *s_a,*s_b,*s_c;
//blas parameters
DATA_TYPE alpha=1,beta=0;
int m,n,k,lda,ldb,ldc;
hipblasOperation_t transa,transb;
long long int stridea,strideb,stridec;
//matrix square
int s2;
int s3;
// hipblasHandle_t handle;
hipblasHandle_t handle;
//
hipblasStatus_t cublas_stat;
hipError_t cuda_stat;
/************************Initialization******************************************/
m=MATRIX_SIZE,n=MATRIX_SIZE,k=MATRIX_SIZE,lda=MATRIX_SIZE,ldb=MATRIX_SIZE,ldc=MATRIX_SIZE;
s2 = MATRIX_SIZE * MATRIX_SIZE;
s3 = MATRIX_SIZE * MATRIX_SIZE * MATRIX_SIZE;
transa = HIPBLAS_OP_N;
transb = HIPBLAS_OP_N;
stridea = s2;
strideb = s2;
stridec = s2;
hipHostMalloc((void**)&h_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
hipHostMalloc((void**)&h_b,sizeof(DATA_TYPE*)*MATRIX_NUM);
hipHostMalloc((void**)&h_c,sizeof(DATA_TYPE*)*MATRIX_NUM);
for(int i=0;i<MATRIX_NUM;i++)hipHostMalloc((void**)&h_a[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)hipHostMalloc((void**)&h_b[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)hipHostMalloc((void**)&h_c[i],sizeof(DATA_TYPE)*s2);
// hipMalloc((void**)&d_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
// hipMalloc((void**)&d_b,sizeof(DATA_TYPE*)*MATRIX_NUM);
// hipMalloc((void**)&d_c,sizeof(DATA_TYPE*)*MATRIX_NUM);
for(int i=0;i<MATRIX_NUM;i++)hipMalloc((void**)&d_a[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)hipMalloc((void**)&d_b[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)hipMalloc((void**)&d_c[i],sizeof(DATA_TYPE)*s2);
hipHostMalloc((void**)&h_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
srand(time(NULL));
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_a[i][j]=rand()/(DATA_TYPE)RAND_MAX;
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_b[i][j]=rand()/(DATA_TYPE)RAND_MAX;
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_c[i][j]=0;
hipblasCreate(&handle);
printf("a GEMM : \n");
stopwatch(0);
hipMemcpy(d_a[0],h_a[0],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
hipMemcpy(d_b[0],h_b[0],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
hipblasDgemm(handle,transa,transb,m,n,k,&alpha,d_a[0],lda,d_b[0],ldb,&beta,d_c[0],ldc);
hipMemcpy(h_c[0],d_c[0],sizeof(DATA_TYPE)*s2,hipMemcpyDeviceToHost);
stopwatch(1);
/****************** GEMM ********************/
printf("GEMMs : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
hipblasDgemm(handle,transa,transb,m,n,k,&alpha,d_a[i],lda,d_b[i],ldb,&beta,d_c[i],ldc);
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,hipMemcpyDeviceToHost);
stopwatch(1);
#if CHECK
mat_out(h_c);
#endif
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<MATRIX_SIZE;j++)
for(int k=0;k<MATRIX_SIZE;k++)
h_c[i][j*MATRIX_SIZE + k] = 0 ;
/******************BATCHED STRIDE GEMM ********************/
/******************BATCHED GEMM ********************/
/*
printf("BATCH : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice);
cublas_stat = hipblasDgemmBatched(handle,transa,transb,m,n,k,&alpha,(const DATA_TYPE**)d_a,lda,(const DATA_TYPE**)d_b,ldb,&beta,d_c,ldc,MATRIX_NUM);
#if CHECK
printf("hipblasDgemmBatched : %d\n",cublas_stat);
#endif
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpy(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,hipMemcpyDeviceToHost);
stopwatch(1);
#if CHECK
mat_out(h_c);
#endif
*/
// hipblasDestroy(handle);
/******************STREAMED GEMM ********************/
/*
printf("STREAM : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
{ cuda_stat = hipMemcpyAsync(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice,stream[i]);
#if CHECK
printf("hipMemcpyAsync(d_a,h_a)[%d] : %d\n",i,cuda_stat);
#endif
}
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpyAsync(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,hipMemcpyHostToDevice,stream[i]);
for(int i=0;i<MATRIX_NUM;i++)
{cublas_stat = hipblasDgemm(handle_s[i],transa,transb,m,n,k,&alpha,d_a[i],lda,d_b[i],ldb,&beta,d_c[i],ldc);
#if CHECK
printf("hipblasDgemm[%d] : %d\n",i,cublas_stat);
#endif
}
for(int i=0;i<MATRIX_NUM;i++)
hipMemcpyAsync(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,hipMemcpyDeviceToHost,stream[i]);
for(int i=0;i<MATRIX_NUM;i++)
hipStreamSynchronize(stream[i]);
stopwatch(1);
*/
/***********DeAllocation**********************/
for(int i=0;i<MATRIX_NUM;i++)hipFree(h_a[i]);
for(int i=0;i<MATRIX_NUM;i++)hipFree(h_b[i]);
for(int i=0;i<MATRIX_NUM;i++)hipFree(h_c[i]);
for(int i=0;i<MATRIX_NUM;i++)hipFree(d_a[i]);
for(int i=0;i<MATRIX_NUM;i++)hipFree(d_b[i]);
for(int i=0;i<MATRIX_NUM;i++)hipFree(d_c[i]);
hipFree(h_a);
hipFree(h_b);
hipFree(h_c);
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c);
hipblasDestroy(handle);
return 0;
}
void stopwatch(int flag)
{
enum clock_unit{nano = 0, micro , milli, sec} unit;
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long diff = 0;
/*
nano, micro, milli, sec
*/
unit = micro;
//start
if(flag == 0)
{
diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
switch(unit)
{
case nano :
printf("elapsed time : % lld nano sec\n",diff);
break;
case micro :
printf("elapsed time : % lld micro sec\n",diff/1000);
break;
case sec :
printf("elapsed time : % lld sec\n",diff/1000000000);
break;
default :
printf("elapsed time : % lld milli sec\n",diff/100000);
break;
}
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
void mat_out(DATA_TYPE**a)
{
for(int i=0;i<MATRIX_NUM;i++)
{
printf("--- %d ---\n",i);
for(int j=0;j<MATRIX_SIZE;j++)
{
for(int k=0;k<MATRIX_SIZE;k++)
{
printf("%lf ",a[i][j*MATRIX_SIZE + k]);
}
printf("\n");
}
}
}
|
84eafc22890f0143d3c16afa8a25ee1f591435ef.cu
|
#include "cublas_v2.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CHECK 1
#define DATA_TYPE double
#define BLOCK_NUM 1
#define THREAD_NUM 1
#define MATRIX_NUM 10
#define MATRIX_SIZE 4
void stopwatch(int);
void cublas_error();
void pp(int p)
{printf("------------ %d-------------\n",p);}
void mat_out(DATA_TYPE**);
int main()
{
printf("BLOCK_NUM :%d\nTHREAD_NUM : %d\nMATRIX_NUM : %d\nMATRIX_SIZE : (%d)X(%d)\n",BLOCK_NUM,THREAD_NUM,MATRIX_NUM,MATRIX_SIZE,MATRIX_SIZE);
//host matrix array
DATA_TYPE **h_a,**h_b,**h_c;
//device matrix array
//2차원 배열을 동적할당하려면 일이 많아서 **는 정적할당
// ** 동적할당한 포인터가 device포인터라 cudaMalloc으로 접근 불가
DATA_TYPE *d_a[MATRIX_NUM],*d_b[MATRIX_NUM];
DATA_TYPE *d_c[MATRIX_NUM];
DATA_TYPE *s_a,*s_b,*s_c;
//blas parameters
DATA_TYPE alpha=1,beta=0;
int m,n,k,lda,ldb,ldc;
cublasOperation_t transa,transb;
long long int stridea,strideb,stridec;
//matrix square
int s2;
int s3;
// cublasHandle_t handle;
cublasHandle_t handle;
//디버그
cublasStatus_t cublas_stat;
cudaError_t cuda_stat;
/************************Initialization******************************************/
m=MATRIX_SIZE,n=MATRIX_SIZE,k=MATRIX_SIZE,lda=MATRIX_SIZE,ldb=MATRIX_SIZE,ldc=MATRIX_SIZE;
s2 = MATRIX_SIZE * MATRIX_SIZE;
s3 = MATRIX_SIZE * MATRIX_SIZE * MATRIX_SIZE;
transa = CUBLAS_OP_N;
transb = CUBLAS_OP_N;
stridea = s2;
strideb = s2;
stridec = s2;
cudaMallocHost((void**)&h_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
cudaMallocHost((void**)&h_b,sizeof(DATA_TYPE*)*MATRIX_NUM);
cudaMallocHost((void**)&h_c,sizeof(DATA_TYPE*)*MATRIX_NUM);
for(int i=0;i<MATRIX_NUM;i++)cudaMallocHost((void**)&h_a[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)cudaMallocHost((void**)&h_b[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)cudaMallocHost((void**)&h_c[i],sizeof(DATA_TYPE)*s2);
// cudaMalloc((void**)&d_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
// cudaMalloc((void**)&d_b,sizeof(DATA_TYPE*)*MATRIX_NUM);
// cudaMalloc((void**)&d_c,sizeof(DATA_TYPE*)*MATRIX_NUM);
for(int i=0;i<MATRIX_NUM;i++)cudaMalloc((void**)&d_a[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)cudaMalloc((void**)&d_b[i],sizeof(DATA_TYPE)*s2);
for(int i=0;i<MATRIX_NUM;i++)cudaMalloc((void**)&d_c[i],sizeof(DATA_TYPE)*s2);
cudaMallocHost((void**)&h_a,sizeof(DATA_TYPE*)*MATRIX_NUM);
srand(time(NULL));
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_a[i][j]=rand()/(DATA_TYPE)RAND_MAX;
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_b[i][j]=rand()/(DATA_TYPE)RAND_MAX;
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<s2;j++)
h_c[i][j]=0;
cublasCreate(&handle);
printf("a GEMM : \n");
stopwatch(0);
cudaMemcpy(d_a[0],h_a[0],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
cudaMemcpy(d_b[0],h_b[0],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
cublasDgemm(handle,transa,transb,m,n,k,&alpha,d_a[0],lda,d_b[0],ldb,&beta,d_c[0],ldc);
cudaMemcpy(h_c[0],d_c[0],sizeof(DATA_TYPE)*s2,cudaMemcpyDeviceToHost);
stopwatch(1);
/******************그냥 GEMM ********************/
printf("GEMMs : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
cublasDgemm(handle,transa,transb,m,n,k,&alpha,d_a[i],lda,d_b[i],ldb,&beta,d_c[i],ldc);
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,cudaMemcpyDeviceToHost);
stopwatch(1);
#if CHECK
mat_out(h_c);
#endif
for(int i=0;i<MATRIX_NUM;i++)
for(int j=0;j<MATRIX_SIZE;j++)
for(int k=0;k<MATRIX_SIZE;k++)
h_c[i][j*MATRIX_SIZE + k] = 0 ;
/******************BATCHED STRIDE GEMM ********************/
/******************BATCHED GEMM ********************/
/*
printf("BATCH : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice);
cublas_stat = cublasDgemmBatched(handle,transa,transb,m,n,k,&alpha,(const DATA_TYPE**)d_a,lda,(const DATA_TYPE**)d_b,ldb,&beta,d_c,ldc,MATRIX_NUM);
#if CHECK
printf("cublasDgemmBatched : %d\n",cublas_stat);
#endif
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpy(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,cudaMemcpyDeviceToHost);
stopwatch(1);
#if CHECK
mat_out(h_c);
#endif
*/
// cublasDestroy(handle);
/******************STREAMED GEMM ********************/
/*
printf("STREAM : \n");
stopwatch(0);
for(int i=0;i<MATRIX_NUM;i++)
{ cuda_stat = cudaMemcpyAsync(d_a[i],h_a[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice,stream[i]);
#if CHECK
printf("cudaMemcpyAsync(d_a,h_a)[%d] : %d\n",i,cuda_stat);
#endif
}
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpyAsync(d_b[i],h_b[i],sizeof(DATA_TYPE)*s2,cudaMemcpyHostToDevice,stream[i]);
for(int i=0;i<MATRIX_NUM;i++)
{cublas_stat = cublasDgemm(handle_s[i],transa,transb,m,n,k,&alpha,d_a[i],lda,d_b[i],ldb,&beta,d_c[i],ldc);
#if CHECK
printf("cublasDgemm[%d] : %d\n",i,cublas_stat);
#endif
}
for(int i=0;i<MATRIX_NUM;i++)
cudaMemcpyAsync(h_c[i],d_c[i],sizeof(DATA_TYPE)*s2,cudaMemcpyDeviceToHost,stream[i]);
for(int i=0;i<MATRIX_NUM;i++)
cudaStreamSynchronize(stream[i]);
stopwatch(1);
*/
/***********DeAllocation**********************/
for(int i=0;i<MATRIX_NUM;i++)cudaFree(h_a[i]);
for(int i=0;i<MATRIX_NUM;i++)cudaFree(h_b[i]);
for(int i=0;i<MATRIX_NUM;i++)cudaFree(h_c[i]);
for(int i=0;i<MATRIX_NUM;i++)cudaFree(d_a[i]);
for(int i=0;i<MATRIX_NUM;i++)cudaFree(d_b[i]);
for(int i=0;i<MATRIX_NUM;i++)cudaFree(d_c[i]);
cudaFree(h_a);
cudaFree(h_b);
cudaFree(h_c);
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
cublasDestroy(handle);
return 0;
}
void stopwatch(int flag)
{
enum clock_unit{nano = 0, micro , milli, sec} unit;
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long diff = 0;
/*
여기서 단위 조정
nano, micro, milli, sec
*/
unit = micro;
//start
if(flag == 0)
{
diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
switch(unit)
{
case nano :
printf("elapsed time : % lld nano sec\n",diff);
break;
case micro :
printf("elapsed time : % lld micro sec\n",diff/1000);
break;
case sec :
printf("elapsed time : % lld sec\n",diff/1000000000);
break;
default :
printf("elapsed time : % lld milli sec\n",diff/100000);
break;
}
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
void mat_out(DATA_TYPE**a)
{
for(int i=0;i<MATRIX_NUM;i++)
{
printf("--- %d ---\n",i);
for(int j=0;j<MATRIX_SIZE;j++)
{
for(int k=0;k<MATRIX_SIZE;k++)
{
printf("%lf ",a[i][j*MATRIX_SIZE + k]);
}
printf("\n");
}
}
}
|
7c62b4d2fe62ee850dd798c4e576a0095160fd41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include "hip/hip_runtime.h"
#include "rocblas.h"
using namespace std;
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){
for(int i = 0;i < m;i++){
for(int j = 0;j < k;j++){
for(int x = 0;x < n;x++){
C[IDX2C(i,j,k)] += A[i * n + x] * B[x * k + j];
}
}
}
}
int main()
{
int m,n,k;
timeval t1, t2;
cout << "Input problem size:";
cin >> m;
n = m;
k = m;
hipblasHandle_t handle;
hipblasCreate(&handle);
double *A,*B,*C,*C1;
A = (double*)malloc(sizeof(double) * m * n);
B = (double*)malloc(sizeof(double) * k * n);
C = (double*)malloc(sizeof(double) * m * k);
C1 = (double*)malloc(sizeof(double) * m * k);
for(int i = 0;i < m;i++){
for(int j = 0;j < n;j++){
A[i * n + j] = rand() % 10;
}
}
for(int i = 0;i < n;i++){
for(int j = 0;j < k;j++){
B[i * k + j] = rand() % 10;
}
}
memset(C,0,sizeof(C));
memset(C1,0,sizeof(C1));
double * d_A,*d_B,*d_C;
gettimeofday(&t1, NULL);
hipMalloc(&d_A, sizeof(double) * m * n);
hipMalloc(&d_B,sizeof(double) * n * k);
hipMalloc(&d_C,sizeof(double) * m * k);
hipMemcpy(d_A, A, sizeof(double) * m * n, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeof(double) * n * k, hipMemcpyHostToDevice);
gettimeofday(&t1, NULL);
double a = 1,b = 0;
hipblasDgemm(
handle,
HIPBLAS_OP_T, HIPBLAS_OP_T,
m, n, k,
&a, //alpha
d_A, n,
d_B, k,
&b, //beta
d_C, m
);
hipMemcpy(C, d_C, sizeof(double) * m * k, hipMemcpyDeviceToHost);
gettimeofday(&t2, NULL);
printf("GPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
gettimeofday(&t1, NULL);
CPU_MatMul(A,B,C1,m,n,k);
gettimeofday(&t2, NULL);
printf("CPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
int flag = 0;
for(int i = 0;i < m * k;i++){
if(fabs((C[i] - C1[i])) > 1e-4){
flag = 1;
break;
}
}
if(flag){
cout << "Wrong result." << endl;
}
else {
cout << "The results are correct. " << endl;
}
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(A);
free(B);
free(C);
free(C1);
hipblasDestroy(handle);
}
|
7c62b4d2fe62ee850dd798c4e576a0095160fd41.cu
|
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include "cuda_runtime.h"
#include "cublas_v2.h"
using namespace std;
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){
for(int i = 0;i < m;i++){
for(int j = 0;j < k;j++){
for(int x = 0;x < n;x++){
C[IDX2C(i,j,k)] += A[i * n + x] * B[x * k + j];
}
}
}
}
int main()
{
int m,n,k;
timeval t1, t2;
cout << "Input problem size:";
cin >> m;
n = m;
k = m;
cublasHandle_t handle;
cublasCreate(&handle);
double *A,*B,*C,*C1;
A = (double*)malloc(sizeof(double) * m * n);
B = (double*)malloc(sizeof(double) * k * n);
C = (double*)malloc(sizeof(double) * m * k);
C1 = (double*)malloc(sizeof(double) * m * k);
for(int i = 0;i < m;i++){
for(int j = 0;j < n;j++){
A[i * n + j] = rand() % 10;
}
}
for(int i = 0;i < n;i++){
for(int j = 0;j < k;j++){
B[i * k + j] = rand() % 10;
}
}
memset(C,0,sizeof(C));
memset(C1,0,sizeof(C1));
double * d_A,*d_B,*d_C;
gettimeofday(&t1, NULL);
cudaMalloc(&d_A, sizeof(double) * m * n);
cudaMalloc(&d_B,sizeof(double) * n * k);
cudaMalloc(&d_C,sizeof(double) * m * k);
cudaMemcpy(d_A, A, sizeof(double) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(double) * n * k, cudaMemcpyHostToDevice);
gettimeofday(&t1, NULL);
double a = 1,b = 0;
cublasDgemm(
handle,
CUBLAS_OP_T, CUBLAS_OP_T,
m, n, k,
&a, //alpha
d_A, n,
d_B, k,
&b, //beta
d_C, m
);
cudaMemcpy(C, d_C, sizeof(double) * m * k, cudaMemcpyDeviceToHost);
gettimeofday(&t2, NULL);
printf("GPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
gettimeofday(&t1, NULL);
CPU_MatMul(A,B,C1,m,n,k);
gettimeofday(&t2, NULL);
printf("CPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
int flag = 0;
for(int i = 0;i < m * k;i++){
if(fabs((C[i] - C1[i])) > 1e-4){
flag = 1;
break;
}
}
if(flag){
cout << "Wrong result." << endl;
}
else {
cout << "The results are correct. " << endl;
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
free(C1);
cublasDestroy(handle);
}
|
89dec929a663cfc88b85e8ec91439431d300358d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void add(int *a, int *b, int *alpha) {
int i = blockIdx.x;
b[i]=*alpha*a[i]+b[i];
}
int main(void)
{
int MAX = 10;
int a[MAX], b[MAX], alpha;
int *d_a, *d_b, *d_c;
int size = sizeof(int)*MAX;
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, sizeof(int));
for (int i = 0; i < MAX; ++i)
{
a[i] = i+10;
b[i] = i*20;
}
alpha=2;
printf("Array A:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", a[i]);
printf("\nArray B:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", b[i]);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipMemcpy(d_c, &alpha, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(MAX),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&b, d_b, size, hipMemcpyDeviceToHost);
printf("\nFinal Result:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", b[i]);
printf("\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
89dec929a663cfc88b85e8ec91439431d300358d.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void add(int *a, int *b, int *alpha) {
int i = blockIdx.x;
b[i]=*alpha*a[i]+b[i];
}
int main(void)
{
int MAX = 10;
int a[MAX], b[MAX], alpha;
int *d_a, *d_b, *d_c;
int size = sizeof(int)*MAX;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, sizeof(int));
for (int i = 0; i < MAX; ++i)
{
a[i] = i+10;
b[i] = i*20;
}
alpha=2;
printf("Array A:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", a[i]);
printf("\nArray B:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", b[i]);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, &alpha, sizeof(int), cudaMemcpyHostToDevice);
add<<<MAX,1>>>(d_a, d_b, d_c);
cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost);
printf("\nFinal Result:\n");
for (int i = 0; i < MAX; ++i)
printf("%d\t", b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
aaac4a4c6bc9fb6843103e3597d6897bc3fdfb6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 27
__global__ void search_1(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
uint32_t warpcnt = 0;
while(tid < size){
uint32_t herecount = (arr[tid] == val) ? 1 : 0;
herecount += __shfl_down_sync(0xffffffff, herecount, 16);
herecount += __shfl_down_sync(0xffffffff, herecount, 8);
herecount += __shfl_down_sync(0xffffffff, herecount, 4);
herecount += __shfl_down_sync(0xffffffff, herecount, 2);
herecount += __shfl_down_sync(0xffffffff, herecount, 1);
warpcnt += herecount;
tid += gridsize;
}
if((threadIdx.x & 31) == 0) atomicAdd(res, warpcnt);
return;
}
__global__ void search_2(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while(tid < size){
uint32_t ishere = (arr[tid] == val) ? 1 : 0;
if(__any_sync(0xffffffff, ishere))
if ((threadIdx.x & 31) == 0) atomicAdd(res, 1);
tid += gridsize;
}
return;
}
__global__ void search_3(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while(tid < size){
if(arr[tid] == val) atomicAdd(res, 1);
tid += gridsize;
}
return;
}
/* In the Algo number 4, each thread will check some index of the array.
The thread number 10 will check the indexes (i*gridsize) + 10.
i take the value in {0..(size(res)/gridsize)}
*/
__global__ void search_4(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
if(*res != 0) return;
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while((tid < size) && (*res == 0)){
if(arr[tid] == val) (*res)++;
tid += gridsize;
}
return;
}
void randgen(uint32_t* arr, size_t count, uint32_t mask){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state & mask;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *arr_host, *arr_dev, *res_dev;
uint32_t res;
const uint32_t arr_size = 1 << SHIFT;
hipHostMalloc((void**)&arr_host, arr_size*sizeof(uint32_t), hipHostMallocDefault);
hipMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t));
hipMalloc((void**)&res_dev, sizeof(uint32_t));
printf("Finding 42 in %d elements\n", arr_size);
// Search the element 42 using different kernels
for(int target_shift = 12; target_shift <= 32; target_shift+=4){
randgen(arr_host, arr_size, (1<<target_shift) - 1);
uint32_t exactcnt = 0;
float elapsed = 0;
for(int i=0; i<arr_size; i++)
if(arr_host[i] == 42) exactcnt++;
printf("\nShift %d, with %d elements equal to 42 to be found\n", target_shift, exactcnt);
hipMemcpyAsync(arr_dev, arr_host, arr_size*sizeof(uint32_t), hipMemcpyHostToDevice);
// Performing odd-even computing on 2^25 integers
cuStopwatch sw1;
sw1.start();
res = 0;
hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( search_1), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42);
hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost);
elapsed = sw1.stop();
if(res != 0)
printf("Method 1: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 1: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw2;
sw2.start();
res = 0;
hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( search_2), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42);
hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost);
elapsed = sw2.stop();
if(res != 0)
printf("Method 2: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 2: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw3;
sw3.start();
res = 0;
hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( search_3), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42);
hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost);
elapsed = sw3.stop();
if(res != 0)
printf("Method 3: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 3: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw4;
sw4.start();
res = 0;
hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( search_4), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42);
hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost);
elapsed = sw4.stop();
if(res != 0)
printf("Method 4: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 4: %7.4fms, not found, returning %u.\n", elapsed, res);
}
// Free memory
hipHostFree(arr_host);
hipFree(arr_dev);
hipFree(res_dev);
return 0;
}
|
aaac4a4c6bc9fb6843103e3597d6897bc3fdfb6c.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 27
__global__ void search_1(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
uint32_t warpcnt = 0;
while(tid < size){
uint32_t herecount = (arr[tid] == val) ? 1 : 0;
herecount += __shfl_down_sync(0xffffffff, herecount, 16);
herecount += __shfl_down_sync(0xffffffff, herecount, 8);
herecount += __shfl_down_sync(0xffffffff, herecount, 4);
herecount += __shfl_down_sync(0xffffffff, herecount, 2);
herecount += __shfl_down_sync(0xffffffff, herecount, 1);
warpcnt += herecount;
tid += gridsize;
}
if((threadIdx.x & 31) == 0) atomicAdd(res, warpcnt);
return;
}
__global__ void search_2(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while(tid < size){
uint32_t ishere = (arr[tid] == val) ? 1 : 0;
if(__any_sync(0xffffffff, ishere))
if ((threadIdx.x & 31) == 0) atomicAdd(res, 1);
tid += gridsize;
}
return;
}
__global__ void search_3(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while(tid < size){
if(arr[tid] == val) atomicAdd(res, 1);
tid += gridsize;
}
return;
}
/* In the Algo number 4, each thread will check some index of the array.
The thread number 10 will check the indexes (i*gridsize) + 10.
i take the value in {0..(size(res)/gridsize)}
*/
__global__ void search_4(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) {
if(*res != 0) return;
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t gridsize = blockDim.x * gridDim.x;
while((tid < size) && (*res == 0)){
if(arr[tid] == val) (*res)++;
tid += gridsize;
}
return;
}
void randgen(uint32_t* arr, size_t count, uint32_t mask){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state & mask;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *arr_host, *arr_dev, *res_dev;
uint32_t res;
const uint32_t arr_size = 1 << SHIFT;
cudaHostAlloc((void**)&arr_host, arr_size*sizeof(uint32_t), cudaHostAllocDefault);
cudaMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t));
cudaMalloc((void**)&res_dev, sizeof(uint32_t));
printf("Finding 42 in %d elements\n", arr_size);
// Search the element 42 using different kernels
for(int target_shift = 12; target_shift <= 32; target_shift+=4){
randgen(arr_host, arr_size, (1<<target_shift) - 1);
uint32_t exactcnt = 0;
float elapsed = 0;
for(int i=0; i<arr_size; i++)
if(arr_host[i] == 42) exactcnt++;
printf("\nShift %d, with %d elements equal to 42 to be found\n", target_shift, exactcnt);
cudaMemcpyAsync(arr_dev, arr_host, arr_size*sizeof(uint32_t), cudaMemcpyHostToDevice);
// Performing odd-even computing on 2^25 integers
cuStopwatch sw1;
sw1.start();
res = 0;
cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice);
search_1<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42);
cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost);
elapsed = sw1.stop();
if(res != 0)
printf("Method 1: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 1: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw2;
sw2.start();
res = 0;
cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice);
search_2<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42);
cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost);
elapsed = sw2.stop();
if(res != 0)
printf("Method 2: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 2: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw3;
sw3.start();
res = 0;
cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice);
search_3<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42);
cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost);
elapsed = sw3.stop();
if(res != 0)
printf("Method 3: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 3: %7.4fms, not found, returning %u.\n", elapsed, res);
cuStopwatch sw4;
sw4.start();
res = 0;
cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice);
search_4<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42);
cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost);
elapsed = sw4.stop();
if(res != 0)
printf("Method 4: %7.4fms, found, returning %u.\n", elapsed, res);
else
printf("Method 4: %7.4fms, not found, returning %u.\n", elapsed, res);
}
// Free memory
cudaFreeHost(arr_host);
cudaFree(arr_dev);
cudaFree(res_dev);
return 0;
}
|
7e828e404f573f4c82cacd3b0f18bc8586126887.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_computeModelMany1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int sizeImage = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *amplitude = NULL;
hipMalloc(&litude, XSIZE*YSIZE);
double background = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_computeModelMany1), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_computeModelMany1), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_computeModelMany1), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7e828e404f573f4c82cacd3b0f18bc8586126887.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_computeModelMany1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int sizeImage = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *amplitude = NULL;
cudaMalloc(&litude, XSIZE*YSIZE);
double background = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_computeModelMany1<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_computeModelMany1<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_computeModelMany1<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3a24192c9cd26c464280fb05ec0ddbedc55a6aff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Added by Karel Adamek
//#define MSD_DEBUG
#include "headers/params.h"
#include "headers/device_MSD_Configuration.h"
#include "device_MSD_limited_kernel.cu"
int Choose_x_dim(int grid_dim){
int seive[15] = { 32, 31, 29, 23, 19, 17, 16, 13, 11, 8, 7, 5, 4, 3, 2 };
int f, nRest, nBlocks, N, N_accepted;
N = 1;
N_accepted = 1;
for (int i = 0; i < 4; i++) {
for (f = 0; f < 15; f++) {
nBlocks = grid_dim / seive[f];
nRest = grid_dim - nBlocks*seive[f];
if (nRest == 0) {
N_accepted = N_accepted*N;
N = seive[f];
break;
}
}
if (( N_accepted*N ) > 32 || N == 1)
return ( N_accepted );
grid_dim = grid_dim / N;
}
return ( N_accepted );
}
int Choose_y_dim(int grid_dim){
int seive[5] = { 32, 16, 8, 4, 2 };
int f, nRest, nBlocks, N;
N = 1;
for (f = 0; f < 5; f++) {
nBlocks = grid_dim / seive[f];
nRest = grid_dim - nBlocks*seive[f];
if (nRest == 0) {
N = seive[f];
break;
}
}
return ( N );
}
/*
int Choose_divider(int number, int max_divider){
int seive[12]={2, 3, 4, 5, 7, 11, 13, 17, 19, 23, 29, 31};
int f, nRest, nBlocks, N, N_accepted;
N=1;N_accepted=1;
do {
N=1;
for(f=0; f<12; f++){
nBlocks=number/seive[f];
nRest=number - nBlocks*seive[f];
if(nRest==0) {
N=seive[f];
N_accepted=N_accepted*N;
break;
}
}
number=number/N;
} while ( (N_accepted)<=max_divider && N>1 );
return(N_accepted/N);
}
*/
void MSD_limited_init() {
//---------> Specific nVidia stuff
hipDeviceSetCacheConfig (hipFuncCachePreferShared);
hipDeviceSetSharedMemConfig (hipSharedMemBankSizeFourByte);
}
int MSD_limited(float *d_input, float *d_MSD, int nDMs, int nTimesamples, int offset) {
//---------> Task specific
ushort nBlocks_x, nBlocks_y;
int nBlocks_total, nSteps_x, nSteps_y, nRest;
float *d_output;
//---------> determining data block size per kernel
nSteps_x = PD_NTHREADS;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>32) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y = nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
//---------> determining number of threads for final kernel
int nThreads=2048;
int itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug: (MSD_limited)\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d is processed\n", nBlocks_x, nSteps_x, nRest);
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*3*4));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
hipMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*sizeof(float))/(1024.0*1024), nBlocks_total*3);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("---------------------------<\n");
#endif
//---------> Allocation of temporary memory
hipMalloc((void **) &d_output, nBlocks_total*3*sizeof(float));
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_limited), dim3(gridSize),dim3(blockSize), 0, 0, d_input, d_output, nDMs/nBlocks_y, nTimesamples, offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(final_gridSize),dim3(final_blockSize), 0, 0, d_output, d_MSD, nBlocks_total);
hipFree(d_output);
#ifdef MSD_DEBUG
float h_MSD[3];
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
if (nRest < 32) return ( nRest );
else return ( 0 );
}
int MSD_limited(float *d_input, float *d_MSD, float *d_temp, MSD_Configuration *MSD_conf) {
#ifdef MSD_DEBUG
MSD_conf->print();
#endif
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_limited), dim3(MSD_conf->partials_gridSize),dim3(MSD_conf->partials_blockSize), 0, 0, d_input, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], MSD_conf->nSteps.y, (int) MSD_conf->nTimesamples, (int) MSD_conf->offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(MSD_conf->final_gridSize),dim3(MSD_conf->final_blockSize), 0, 0, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], d_MSD, MSD_conf->nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD[3];
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
return (0);
}
int MSD_limited_continuous(float *d_input, float *d_MSD, float *d_previous_partials, int nDMs, int nTimesamples, int offset) {
//---------> Task specific
ushort nBlocks_x, nBlocks_y;
int nBlocks_total, nSteps_x, nSteps_y, nRest;
float *d_output;
//---------> determining data block size per kernel
nSteps_x = PD_NTHREADS;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>32) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y = nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
//---------> determining number of threads for final kernel
int nThreads=2048;
int itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug: (MSD_limited)\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d is processed\n", nBlocks_x, nSteps_x, nRest);
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*3*4));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
hipMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*sizeof(float))/(1024.0*1024), nBlocks_total*3);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("---------------------------<\n");
#endif
//---------> Allocation of temporary memory
hipMalloc((void **) &d_output, nBlocks_total*3*sizeof(float));
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_limited), dim3(gridSize),dim3(blockSize), 0, 0, d_input, d_output, nDMs/nBlocks_y, nTimesamples, offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(final_gridSize),dim3(final_blockSize), 0, 0, d_output, d_MSD, d_previous_partials, nBlocks_total);
hipFree(d_output);
#ifdef MSD_DEBUG
float h_MSD[3];
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
if (nRest < 32) return ( nRest );
else return ( 0 );
}
int MSD_limited_continuous(float *d_input, float *d_MSD, float *d_previous_partials, float *d_temp, MSD_Configuration *MSD_conf) {
#ifdef MSD_DEBUG
MSD_conf->print();
#endif
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_limited), dim3(MSD_conf->partials_gridSize),dim3(MSD_conf->partials_blockSize), 0, 0, d_input, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], MSD_conf->nSteps.y, (int) MSD_conf->nTimesamples, (int) MSD_conf->offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(MSD_conf->final_gridSize),dim3(MSD_conf->final_blockSize), 0, 0, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], d_MSD, d_previous_partials, MSD_conf->nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD[3];
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
return (0);
}
int MSD_linear_approximation(float *d_input, float *d_MSD_T, int nTaps, int nDMs, int nTimesamples, int offset){
//---------> Task specific
int nBlocks_x, nBlocks_y, nBlocks_total, nSteps_x, nSteps_y, nRest, nThreads, itemp; //epw = elements per warp 32 for float 64 for float2
float *d_output;
float *d_output_taps;
float *d_MSD_T_base;
//---------> determining data block size per kernel
nSteps_x = 2*PD_NTHREADS-nTaps+4;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>128) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y=nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
//---------> determining number of threads for final kernel
nThreads=2048;
itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug:\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d", nBlocks_x, nSteps_x, nRest);
if(nRest>3*nTaps) printf(" is processed\n");
else printf(" is not processed\n");
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*24));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
hipMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*2*sizeof(float))/(1024.0*1024), nBlocks_total*3*2);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("\n");
#endif
if(nBlocks_total>0){
//---------> Allocation of temporary memory
if ( hipSuccess != hipMalloc((void **) &d_output, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( hipSuccess != hipMalloc((void **) &d_output_taps, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( hipSuccess != hipMalloc((void **) &d_MSD_T_base, sizeof(float)*3)) {printf("Allocation error!\n"); exit(1001);}
//---------> MSD
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_LA_ALL), dim3(gridSize),dim3(blockSize), 0, 0, d_input, d_output, d_output_taps, nSteps_y, nTaps, nTimesamples, offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(final_gridSize), dim3(final_blockSize), 0, 0, d_output, d_MSD_T_base, nBlocks_total);
hipLaunchKernelGGL(( MSD_GPU_final_create_LA), dim3(final_gridSize), dim3(final_blockSize), 0, 0, d_output_taps, d_MSD_T, d_MSD_T_base, nTaps, nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD_T[3], h_MSD_T_base[3];
hipMemcpy(h_MSD_T, d_MSD_T, 3*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_MSD_T_base, d_MSD_T_base, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; modifier:%e;\n", h_MSD_T[0], h_MSD_T[1], h_MSD_T[2]);
printf("GPU results after 1 taps: Mean: %e, Standard deviation: %e; Number of elements:%d;\n", h_MSD_T_base[0], h_MSD_T_base[1], (int) h_MSD_T_base[2]);
printf("---------------------------<\n");
#endif
//---------> De-allocation of temporary memory
hipFree(d_output);
hipFree(d_output_taps);
hipFree(d_MSD_T_base);
}
else {
printf("Number of time samples is too small! Increase number of samples send to the boxcar filters. (MSD_linear_approximation)\n");
exit(1002);
}
if(nRest<64) return(nRest);
else return(0);
}
int MSD_LA_Nth(float *d_input, float *d_bv_in, float *d_MSD_T, float *d_MSD_DIT, int nTaps, int nDMs, int nTimesamples, int offset, int DIT_value){
//---------> Task specific
int nBlocks_x, nBlocks_y, nBlocks_total, nSteps_x, nSteps_y, nRest, nThreads, itemp; //epw = elements per warp 32 for float 64 for float2
float *d_output;
float *d_output_FIR;
float *d_MSD_T_base;
//---------> determining data block size per kernel
nSteps_x = 2*PD_NTHREADS-nTaps+4;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>0) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y=nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
//---------> determining number of threads for final kernel
nThreads=2048;
itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug:\n");
printf("Kernel for calculating partials: (MSD_LA_Nth)\n");
printf("nTimesamples:%d; offset:%d, nDMs:%d;\n", nTimesamples, offset, nDMs);
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d", nBlocks_x, nSteps_x, nRest);
if(nRest>3*nTaps) printf(" is processed\n");
else printf(" is not processed\n");
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*24));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
hipMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*2*sizeof(float))/(1024.0*1024), nBlocks_total*3*2);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("\n");
#endif
if(nBlocks_total>0){
//---------> Allocation of temporary memory
if ( hipSuccess != hipMalloc((void **) &d_output, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( hipSuccess != hipMalloc((void **) &d_output_FIR, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( hipSuccess != hipMalloc((void **) &d_MSD_T_base, sizeof(float)*3)) {printf("Allocation error!\n"); exit(1001);}
//---------> MSD
MSD_init();
hipLaunchKernelGGL(( MSD_GPU_LA_ALL_Nth), dim3(gridSize),dim3(blockSize), 0, 0, d_input, d_bv_in, d_output, d_output_FIR, nSteps_y, nTaps, nTimesamples, offset);
hipLaunchKernelGGL(( MSD_GPU_final_regular), dim3(final_gridSize), dim3(final_blockSize), 0, 0, d_output, d_MSD_T_base, nBlocks_total);
hipLaunchKernelGGL(( MSD_GPU_final_create_LA_Nth), dim3(final_gridSize), dim3(final_blockSize), 0, 0, d_output_FIR, d_MSD_T, d_MSD_T_base, d_MSD_DIT, nTaps, nBlocks_total, DIT_value);
#ifdef MSD_DEBUG
float h_MSD_T[4], h_MSD_T_base[3], h_MSD_DIT[3];
hipMemcpy(h_MSD_T, d_MSD_T, 4*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_MSD_T_base, d_MSD_T_base, 3*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_MSD_DIT, d_MSD_DIT, 3*sizeof(float), hipMemcpyDeviceToHost);
printf("d_MSD_T: BV Mean: %f, Standard deviation: %f; modifier:%f; DIT Mean:%f\n", h_MSD_T[0], h_MSD_T[1], h_MSD_T[2], h_MSD_T[3]);
printf("MSD for d_bv_in: Mean: %f, Standard deviation: %f; Number of elements:%d;\n", h_MSD_T_base[0], h_MSD_T_base[1], (int) h_MSD_T_base[2]);
printf("MSD for DIT: Mean: %f, Standard deviation: %f; Number of elements:%d;\n", h_MSD_DIT[0], h_MSD_DIT[1], (int) h_MSD_DIT[2]);
printf("---------------------------<\n");
#endif
//---------> De-allocation of temporary memory
hipFree(d_output);
hipFree(d_output_FIR);
hipFree(d_MSD_T_base);
}
else {
printf("WARNING: Number of time samples is too small! Increase number of samples send to the boxcar filters. (MSD_LA_Nth)\n");
return(1);
}
return(0);
}
|
3a24192c9cd26c464280fb05ec0ddbedc55a6aff.cu
|
//Added by Karel Adamek
//#define MSD_DEBUG
#include "headers/params.h"
#include "headers/device_MSD_Configuration.h"
#include "device_MSD_limited_kernel.cu"
int Choose_x_dim(int grid_dim){
int seive[15] = { 32, 31, 29, 23, 19, 17, 16, 13, 11, 8, 7, 5, 4, 3, 2 };
int f, nRest, nBlocks, N, N_accepted;
N = 1;
N_accepted = 1;
for (int i = 0; i < 4; i++) {
for (f = 0; f < 15; f++) {
nBlocks = grid_dim / seive[f];
nRest = grid_dim - nBlocks*seive[f];
if (nRest == 0) {
N_accepted = N_accepted*N;
N = seive[f];
break;
}
}
if (( N_accepted*N ) > 32 || N == 1)
return ( N_accepted );
grid_dim = grid_dim / N;
}
return ( N_accepted );
}
int Choose_y_dim(int grid_dim){
int seive[5] = { 32, 16, 8, 4, 2 };
int f, nRest, nBlocks, N;
N = 1;
for (f = 0; f < 5; f++) {
nBlocks = grid_dim / seive[f];
nRest = grid_dim - nBlocks*seive[f];
if (nRest == 0) {
N = seive[f];
break;
}
}
return ( N );
}
/*
int Choose_divider(int number, int max_divider){
int seive[12]={2, 3, 4, 5, 7, 11, 13, 17, 19, 23, 29, 31};
int f, nRest, nBlocks, N, N_accepted;
N=1;N_accepted=1;
do {
N=1;
for(f=0; f<12; f++){
nBlocks=number/seive[f];
nRest=number - nBlocks*seive[f];
if(nRest==0) {
N=seive[f];
N_accepted=N_accepted*N;
break;
}
}
number=number/N;
} while ( (N_accepted)<=max_divider && N>1 );
return(N_accepted/N);
}
*/
void MSD_limited_init() {
//---------> Specific nVidia stuff
cudaDeviceSetCacheConfig (cudaFuncCachePreferShared);
cudaDeviceSetSharedMemConfig (cudaSharedMemBankSizeFourByte);
}
int MSD_limited(float *d_input, float *d_MSD, int nDMs, int nTimesamples, int offset) {
//---------> Task specific
ushort nBlocks_x, nBlocks_y;
int nBlocks_total, nSteps_x, nSteps_y, nRest;
float *d_output;
//---------> determining data block size per kernel
nSteps_x = PD_NTHREADS;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>32) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y = nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
//---------> determining number of threads for final kernel
int nThreads=2048;
int itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug: (MSD_limited)\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d is processed\n", nBlocks_x, nSteps_x, nRest);
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*3*4));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
cudaMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*sizeof(float))/(1024.0*1024), nBlocks_total*3);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("---------------------------<\n");
#endif
//---------> Allocation of temporary memory
cudaMalloc((void **) &d_output, nBlocks_total*3*sizeof(float));
MSD_init();
MSD_GPU_limited<<<gridSize,blockSize>>>(d_input, d_output, nDMs/nBlocks_y, nTimesamples, offset);
MSD_GPU_final_regular<<<final_gridSize,final_blockSize>>>(d_output, d_MSD, nBlocks_total);
cudaFree(d_output);
#ifdef MSD_DEBUG
float h_MSD[3];
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
if (nRest < 32) return ( nRest );
else return ( 0 );
}
int MSD_limited(float *d_input, float *d_MSD, float *d_temp, MSD_Configuration *MSD_conf) {
#ifdef MSD_DEBUG
MSD_conf->print();
#endif
MSD_init();
MSD_GPU_limited<<<MSD_conf->partials_gridSize,MSD_conf->partials_blockSize>>>(d_input, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], MSD_conf->nSteps.y, (int) MSD_conf->nTimesamples, (int) MSD_conf->offset);
MSD_GPU_final_regular<<<MSD_conf->final_gridSize,MSD_conf->final_blockSize>>>(&d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], d_MSD, MSD_conf->nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD[3];
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
return (0);
}
int MSD_limited_continuous(float *d_input, float *d_MSD, float *d_previous_partials, int nDMs, int nTimesamples, int offset) {
//---------> Task specific
ushort nBlocks_x, nBlocks_y;
int nBlocks_total, nSteps_x, nSteps_y, nRest;
float *d_output;
//---------> determining data block size per kernel
nSteps_x = PD_NTHREADS;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>32) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y = nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
//---------> determining number of threads for final kernel
int nThreads=2048;
int itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug: (MSD_limited)\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d is processed\n", nBlocks_x, nSteps_x, nRest);
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*3*4));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
cudaMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*sizeof(float))/(1024.0*1024), nBlocks_total*3);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("---------------------------<\n");
#endif
//---------> Allocation of temporary memory
cudaMalloc((void **) &d_output, nBlocks_total*3*sizeof(float));
MSD_init();
MSD_GPU_limited<<<gridSize,blockSize>>>(d_input, d_output, nDMs/nBlocks_y, nTimesamples, offset);
MSD_GPU_final_regular<<<final_gridSize,final_blockSize>>>(d_output, d_MSD, d_previous_partials, nBlocks_total);
cudaFree(d_output);
#ifdef MSD_DEBUG
float h_MSD[3];
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
if (nRest < 32) return ( nRest );
else return ( 0 );
}
int MSD_limited_continuous(float *d_input, float *d_MSD, float *d_previous_partials, float *d_temp, MSD_Configuration *MSD_conf) {
#ifdef MSD_DEBUG
MSD_conf->print();
#endif
MSD_init();
MSD_GPU_limited<<<MSD_conf->partials_gridSize,MSD_conf->partials_blockSize>>>(d_input, &d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], MSD_conf->nSteps.y, (int) MSD_conf->nTimesamples, (int) MSD_conf->offset);
MSD_GPU_final_regular<<<MSD_conf->final_gridSize,MSD_conf->final_blockSize>>>(&d_temp[MSD_conf->address*MSD_PARTIAL_SIZE], d_MSD, d_previous_partials, MSD_conf->nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD[3];
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; Elements:%zu;\n", h_MSD[0], h_MSD[1], (size_t) h_MSD[2]);
printf("---------------------------<\n");
#endif
return (0);
}
int MSD_linear_approximation(float *d_input, float *d_MSD_T, int nTaps, int nDMs, int nTimesamples, int offset){
//---------> Task specific
int nBlocks_x, nBlocks_y, nBlocks_total, nSteps_x, nSteps_y, nRest, nThreads, itemp; //epw = elements per warp 32 for float 64 for float2
float *d_output;
float *d_output_taps;
float *d_MSD_T_base;
//---------> determining data block size per kernel
nSteps_x = 2*PD_NTHREADS-nTaps+4;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>128) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y=nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
//---------> determining number of threads for final kernel
nThreads=2048;
itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug:\n");
printf("Kernel for calculating partials:\n");
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d", nBlocks_x, nSteps_x, nRest);
if(nRest>3*nTaps) printf(" is processed\n");
else printf(" is not processed\n");
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*24));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
cudaMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*2*sizeof(float))/(1024.0*1024), nBlocks_total*3*2);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("\n");
#endif
if(nBlocks_total>0){
//---------> Allocation of temporary memory
if ( cudaSuccess != cudaMalloc((void **) &d_output, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( cudaSuccess != cudaMalloc((void **) &d_output_taps, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( cudaSuccess != cudaMalloc((void **) &d_MSD_T_base, sizeof(float)*3)) {printf("Allocation error!\n"); exit(1001);}
//---------> MSD
MSD_init();
MSD_GPU_LA_ALL<<<gridSize,blockSize>>>(d_input, d_output, d_output_taps, nSteps_y, nTaps, nTimesamples, offset);
MSD_GPU_final_regular<<<final_gridSize, final_blockSize>>>(d_output, d_MSD_T_base, nBlocks_total);
MSD_GPU_final_create_LA<<<final_gridSize, final_blockSize>>>(d_output_taps, d_MSD_T, d_MSD_T_base, nTaps, nBlocks_total);
#ifdef MSD_DEBUG
float h_MSD_T[3], h_MSD_T_base[3];
cudaMemcpy(h_MSD_T, d_MSD_T, 3*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_MSD_T_base, d_MSD_T_base, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("Output: Mean: %e, Standard deviation: %e; modifier:%e;\n", h_MSD_T[0], h_MSD_T[1], h_MSD_T[2]);
printf("GPU results after 1 taps: Mean: %e, Standard deviation: %e; Number of elements:%d;\n", h_MSD_T_base[0], h_MSD_T_base[1], (int) h_MSD_T_base[2]);
printf("---------------------------<\n");
#endif
//---------> De-allocation of temporary memory
cudaFree(d_output);
cudaFree(d_output_taps);
cudaFree(d_MSD_T_base);
}
else {
printf("Number of time samples is too small! Increase number of samples send to the boxcar filters. (MSD_linear_approximation)\n");
exit(1002);
}
if(nRest<64) return(nRest);
else return(0);
}
int MSD_LA_Nth(float *d_input, float *d_bv_in, float *d_MSD_T, float *d_MSD_DIT, int nTaps, int nDMs, int nTimesamples, int offset, int DIT_value){
//---------> Task specific
int nBlocks_x, nBlocks_y, nBlocks_total, nSteps_x, nSteps_y, nRest, nThreads, itemp; //epw = elements per warp 32 for float 64 for float2
float *d_output;
float *d_output_FIR;
float *d_MSD_T_base;
//---------> determining data block size per kernel
nSteps_x = 2*PD_NTHREADS-nTaps+4;
nBlocks_x = (int) ((nTimesamples-offset)/nSteps_x);
nRest = nTimesamples - offset - nBlocks_x*nSteps_x;
if(nRest>0) nBlocks_x++;
nSteps_y = Choose_divider(nDMs,64);
nBlocks_y=nDMs/nSteps_y;
nBlocks_total=nBlocks_x*nBlocks_y;
dim3 gridSize(nBlocks_x, nBlocks_y, 1);
dim3 blockSize(PD_NTHREADS, 1, 1);
//---------> determining number of threads for final kernel
nThreads=2048;
itemp=0;
while(itemp==0 && nThreads>32){
nThreads=(nThreads>>1);
itemp=(int) (nBlocks_total/(nThreads*32));
}
if(nThreads<32) nThreads=32;
dim3 final_gridSize(1, 1, 1);
dim3 final_blockSize(nThreads, 1, 1);
#ifdef MSD_DEBUG
printf("\n\n");
printf("----------------> MSD debug:\n");
printf("Kernel for calculating partials: (MSD_LA_Nth)\n");
printf("nTimesamples:%d; offset:%d, nDMs:%d;\n", nTimesamples, offset, nDMs);
printf("ThreadBlocks (TB) in x:%d; Elements processed by TB in x:%d; Remainder in x:%d", nBlocks_x, nSteps_x, nRest);
if(nRest>3*nTaps) printf(" is processed\n");
else printf(" is not processed\n");
printf("ThreadBlocks (TB) in y:%d; Elements processed by TB in y:%d; Remainder in y:%d is processed\n", nBlocks_y, nSteps_y, 0);
printf("gridSize=(%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize=(%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
printf("Shared memory required: %0.3f B\n", (float) (PD_NTHREADS*24));
printf("Kernel for final calculation of mean and standard deviation:\n");
size_t free_mem,total_mem;
cudaMemGetInfo(&free_mem,&total_mem);
printf("Memory required for temporary storage:%0.3f MB which is %d floats\n",(nBlocks_total*3*2*sizeof(float))/(1024.0*1024), nBlocks_total*3*2);
printf("Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
printf("gridSize=(%d,%d,%d)\n", final_gridSize.x, final_gridSize.y, final_gridSize.z);
printf("blockSize=(%d,%d,%d)\n", final_blockSize.x, final_blockSize.y, final_blockSize.z);
printf("\n");
#endif
if(nBlocks_total>0){
//---------> Allocation of temporary memory
if ( cudaSuccess != cudaMalloc((void **) &d_output, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( cudaSuccess != cudaMalloc((void **) &d_output_FIR, nBlocks_total*3*sizeof(float))) {printf("Allocation error!\n"); exit(1001);}
if ( cudaSuccess != cudaMalloc((void **) &d_MSD_T_base, sizeof(float)*3)) {printf("Allocation error!\n"); exit(1001);}
//---------> MSD
MSD_init();
MSD_GPU_LA_ALL_Nth<<<gridSize,blockSize>>>(d_input, d_bv_in, d_output, d_output_FIR, nSteps_y, nTaps, nTimesamples, offset);
MSD_GPU_final_regular<<<final_gridSize, final_blockSize>>>(d_output, d_MSD_T_base, nBlocks_total);
MSD_GPU_final_create_LA_Nth<<<final_gridSize, final_blockSize>>>(d_output_FIR, d_MSD_T, d_MSD_T_base, d_MSD_DIT, nTaps, nBlocks_total, DIT_value);
#ifdef MSD_DEBUG
float h_MSD_T[4], h_MSD_T_base[3], h_MSD_DIT[3];
cudaMemcpy(h_MSD_T, d_MSD_T, 4*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_MSD_T_base, d_MSD_T_base, 3*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_MSD_DIT, d_MSD_DIT, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf("d_MSD_T: BV Mean: %f, Standard deviation: %f; modifier:%f; DIT Mean:%f\n", h_MSD_T[0], h_MSD_T[1], h_MSD_T[2], h_MSD_T[3]);
printf("MSD for d_bv_in: Mean: %f, Standard deviation: %f; Number of elements:%d;\n", h_MSD_T_base[0], h_MSD_T_base[1], (int) h_MSD_T_base[2]);
printf("MSD for DIT: Mean: %f, Standard deviation: %f; Number of elements:%d;\n", h_MSD_DIT[0], h_MSD_DIT[1], (int) h_MSD_DIT[2]);
printf("---------------------------<\n");
#endif
//---------> De-allocation of temporary memory
cudaFree(d_output);
cudaFree(d_output_FIR);
cudaFree(d_MSD_T_base);
}
else {
printf("WARNING: Number of time samples is too small! Increase number of samples send to the boxcar filters. (MSD_LA_Nth)\n");
return(1);
}
return(0);
}
|
32f8965c7881431a93cb89f42577dfbb2795ace0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file imagemodel_k.cu
* \brief Kernel declarations for image model computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/imagemodel_k.h"
namespace flowfilter {
namespace gpu {
//######################
// 5 support
//######################
#define IMS_R 2
#define IMS_W 5
__constant__ float smooth_mask[] = {0.0625, 0.25, 0.375, 0.25, 0.0625};
__constant__ float diff_mask[] = {-0.125, -0.25, 0, 0.25, 0.125};
/**
* \brief Apply a smooth mask to input image in X and Y directions.
*
* NOTE: reading float, either from a float image or a normalized
* image is faster than reading unsigned char directly.
*/
__global__ void imagePrefilter_k(hipTextureObject_t inputImage,
gpuimage_t<float2> imgPrefiltered) {
const int height = imgPrefiltered.height;
const int width = imgPrefiltered.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
//#################################
// SMOOTHING IN X
//#################################
float smooth_x = 0.0f;
#pragma unroll
for(int c = -IMS_R; c <= IMS_R; c ++) {
smooth_x += smooth_mask[c + IMS_R] * tex2D<float>(inputImage, pix.x + c, pix.y);
}
//#################################
// SMOOTHING IN Y
//#################################
float smooth_y = 0.0f;
#pragma unroll
for(int r = -IMS_R; r <= IMS_R; r ++) {
smooth_y += smooth_mask[r + IMS_R] * tex2D<float>(inputImage, pix.x, pix.y + r);
}
//#################################
// PACK RESULTS
//#################################
// {smooth_y, smooth_x}
*coordPitch(imgPrefiltered, pix) = make_float2(smooth_y, smooth_x);
}
/**
* \brief Compute image gradient and constant term from XY smoothed image.
*/
__global__ void imageModel_k(hipTextureObject_t imgPrefiltered,
gpuimage_t<float> imgConstant,
gpuimage_t<float2> imgGradient) {
const int height = imgConstant.height;
const int width = imgConstant.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// imgPrefiltered texture element
float2 imElement;
float diff_x = 0.0;
float diff_y = 0.0;
float smooth = 0.0;
//#################################
// DIFFERENCING IN X
//#################################
#pragma unroll
for(int c = -IMS_R; c <= IMS_R; c ++) {
// texture coordinate
imElement = tex2D<float2>(imgPrefiltered, pix.x + c, pix.y);
// convolution with difference kernel
diff_x += diff_mask[c + IMS_R]*imElement.x;
// convolution with smooth kernel
smooth += smooth_mask[c + IMS_R]*imElement.x;
}
//#################################
// DIFFERENCING IN Y
//#################################
#pragma unroll
for(int r = -IMS_R; r <= IMS_R; r ++) {
imElement = tex2D<float2>(imgPrefiltered, pix.x, pix.y + r);
// convolution difference kernel
diff_y += diff_mask[r + IMS_R]*imElement.y;
}
//#################################
// PACK RESULTS
//#################################
// {diff_x, diff_y}
*coordPitch(imgGradient, pix) = make_float2(diff_x, diff_y);
*coordPitch(imgConstant, pix) = smooth;
}
}; // namespace gpu
}; // namespace flowfilter
|
32f8965c7881431a93cb89f42577dfbb2795ace0.cu
|
/**
* \file imagemodel_k.cu
* \brief Kernel declarations for image model computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/imagemodel_k.h"
namespace flowfilter {
namespace gpu {
//######################
// 5 support
//######################
#define IMS_R 2
#define IMS_W 5
__constant__ float smooth_mask[] = {0.0625, 0.25, 0.375, 0.25, 0.0625};
__constant__ float diff_mask[] = {-0.125, -0.25, 0, 0.25, 0.125};
/**
* \brief Apply a smooth mask to input image in X and Y directions.
*
* NOTE: reading float, either from a float image or a normalized
* image is faster than reading unsigned char directly.
*/
__global__ void imagePrefilter_k(cudaTextureObject_t inputImage,
gpuimage_t<float2> imgPrefiltered) {
const int height = imgPrefiltered.height;
const int width = imgPrefiltered.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
//#################################
// SMOOTHING IN X
//#################################
float smooth_x = 0.0f;
#pragma unroll
for(int c = -IMS_R; c <= IMS_R; c ++) {
smooth_x += smooth_mask[c + IMS_R] * tex2D<float>(inputImage, pix.x + c, pix.y);
}
//#################################
// SMOOTHING IN Y
//#################################
float smooth_y = 0.0f;
#pragma unroll
for(int r = -IMS_R; r <= IMS_R; r ++) {
smooth_y += smooth_mask[r + IMS_R] * tex2D<float>(inputImage, pix.x, pix.y + r);
}
//#################################
// PACK RESULTS
//#################################
// {smooth_y, smooth_x}
*coordPitch(imgPrefiltered, pix) = make_float2(smooth_y, smooth_x);
}
/**
* \brief Compute image gradient and constant term from XY smoothed image.
*/
__global__ void imageModel_k(cudaTextureObject_t imgPrefiltered,
gpuimage_t<float> imgConstant,
gpuimage_t<float2> imgGradient) {
const int height = imgConstant.height;
const int width = imgConstant.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// imgPrefiltered texture element
float2 imElement;
float diff_x = 0.0;
float diff_y = 0.0;
float smooth = 0.0;
//#################################
// DIFFERENCING IN X
//#################################
#pragma unroll
for(int c = -IMS_R; c <= IMS_R; c ++) {
// texture coordinate
imElement = tex2D<float2>(imgPrefiltered, pix.x + c, pix.y);
// convolution with difference kernel
diff_x += diff_mask[c + IMS_R]*imElement.x;
// convolution with smooth kernel
smooth += smooth_mask[c + IMS_R]*imElement.x;
}
//#################################
// DIFFERENCING IN Y
//#################################
#pragma unroll
for(int r = -IMS_R; r <= IMS_R; r ++) {
imElement = tex2D<float2>(imgPrefiltered, pix.x, pix.y + r);
// convolution difference kernel
diff_y += diff_mask[r + IMS_R]*imElement.y;
}
//#################################
// PACK RESULTS
//#################################
// {diff_x, diff_y}
*coordPitch(imgGradient, pix) = make_float2(diff_x, diff_y);
*coordPitch(imgConstant, pix) = smooth;
}
}; // namespace gpu
}; // namespace flowfilter
|
88b5ee352f78aaf6df1999586fb9dae77470581c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //CGA_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<SizeT> sorted_graph, SizeTVec2D node_alignments,
SizeTVec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, SizeT* node_count,
SizeT* sorted_poa, SizeT* node_id_to_pos,
SizeT* incoming_edges, uint16_t* incoming_edge_count,
SizeT* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
SizeT* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
SizeT to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
SizeTVec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A T C G A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A C C G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A T T G A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T G
* / \
* graph A C A T A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes;
SizeT* node_count;
SizeT* graph;
SizeT* node_id_to_pos;
SizeT* incoming_edges;
uint16_t* incoming_edge_count;
SizeT* outgoing_edges;
uint16_t* outgoing_edge_count;
uint16_t* incoming_edge_w;
uint16_t* node_coverage_counts;
SizeT* node_alignments;
uint16_t* node_alignment_count;
//buffers that don't need initialization
SizeT* predecessors;
int32_t* scores;
uint8_t* consensus;
uint16_t* coverage;
//default data size limits
BatchSize batch_size;
//allocate unified memory so they can be accessed by both host and device.
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&nodes, batch_size.max_nodes_per_window * sizeof(uint8_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&node_count, sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&graph, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&node_id_to_pos, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edges, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edge_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&outgoing_edges, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&outgoing_edge_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edge_w, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&node_coverage_counts, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&node_alignments, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&node_alignment_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&predecessors, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&scores, batch_size.max_nodes_per_window * sizeof(int32_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
CGA_CU_CHECK_ERR(hipMallocManaged((void**)&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset((void**)incoming_edge_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)outgoing_edge_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)node_coverage_counts, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)node_alignment_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost<SizeT>(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
CGA_CU_CHECK_ERR(hipDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
CGA_CU_CHECK_ERR(hipFree(nodes));
CGA_CU_CHECK_ERR(hipFree(node_count));
CGA_CU_CHECK_ERR(hipFree(graph));
CGA_CU_CHECK_ERR(hipFree(node_id_to_pos));
CGA_CU_CHECK_ERR(hipFree(incoming_edges));
CGA_CU_CHECK_ERR(hipFree(incoming_edge_count));
CGA_CU_CHECK_ERR(hipFree(outgoing_edges));
CGA_CU_CHECK_ERR(hipFree(outgoing_edge_count));
CGA_CU_CHECK_ERR(hipFree(incoming_edge_w));
CGA_CU_CHECK_ERR(hipFree(node_coverage_counts));
CGA_CU_CHECK_ERR(hipFree(node_alignments));
CGA_CU_CHECK_ERR(hipFree(node_alignment_count));
CGA_CU_CHECK_ERR(hipFree(predecessors));
CGA_CU_CHECK_ERR(hipFree(scores));
CGA_CU_CHECK_ERR(hipFree(consensus));
CGA_CU_CHECK_ERR(hipFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
88b5ee352f78aaf6df1999586fb9dae77470581c.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //CGA_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<SizeT> sorted_graph, SizeTVec2D node_alignments,
SizeTVec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, SizeT* node_count,
SizeT* sorted_poa, SizeT* node_id_to_pos,
SizeT* incoming_edges, uint16_t* incoming_edge_count,
SizeT* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
SizeT* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
SizeT to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
SizeTVec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A — A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A — T — C — G — A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A — C — C — G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A — T — T — G — A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T — G
* / \
* graph A — C — A — T — A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes;
SizeT* node_count;
SizeT* graph;
SizeT* node_id_to_pos;
SizeT* incoming_edges;
uint16_t* incoming_edge_count;
SizeT* outgoing_edges;
uint16_t* outgoing_edge_count;
uint16_t* incoming_edge_w;
uint16_t* node_coverage_counts;
SizeT* node_alignments;
uint16_t* node_alignment_count;
//buffers that don't need initialization
SizeT* predecessors;
int32_t* scores;
uint8_t* consensus;
uint16_t* coverage;
//default data size limits
BatchSize batch_size;
//allocate unified memory so they can be accessed by both host and device.
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&nodes, batch_size.max_nodes_per_window * sizeof(uint8_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&node_count, sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&graph, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&node_id_to_pos, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edges, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edge_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&outgoing_edges, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&outgoing_edge_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edge_w, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&node_coverage_counts, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&node_alignments, batch_size.max_nodes_per_window * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&node_alignment_count, batch_size.max_nodes_per_window * sizeof(uint16_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&predecessors, batch_size.max_nodes_per_window * sizeof(SizeT)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&scores, batch_size.max_nodes_per_window * sizeof(int32_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
CGA_CU_CHECK_ERR(cudaMallocManaged((void**)&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset((void**)incoming_edge_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)outgoing_edge_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)node_coverage_counts, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
memset((void**)node_alignment_count, 0, batch_size.max_nodes_per_window * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost<SizeT>(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
CGA_CU_CHECK_ERR(cudaDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
CGA_CU_CHECK_ERR(cudaFree(nodes));
CGA_CU_CHECK_ERR(cudaFree(node_count));
CGA_CU_CHECK_ERR(cudaFree(graph));
CGA_CU_CHECK_ERR(cudaFree(node_id_to_pos));
CGA_CU_CHECK_ERR(cudaFree(incoming_edges));
CGA_CU_CHECK_ERR(cudaFree(incoming_edge_count));
CGA_CU_CHECK_ERR(cudaFree(outgoing_edges));
CGA_CU_CHECK_ERR(cudaFree(outgoing_edge_count));
CGA_CU_CHECK_ERR(cudaFree(incoming_edge_w));
CGA_CU_CHECK_ERR(cudaFree(node_coverage_counts));
CGA_CU_CHECK_ERR(cudaFree(node_alignments));
CGA_CU_CHECK_ERR(cudaFree(node_alignment_count));
CGA_CU_CHECK_ERR(cudaFree(predecessors));
CGA_CU_CHECK_ERR(cudaFree(scores));
CGA_CU_CHECK_ERR(cudaFree(consensus));
CGA_CU_CHECK_ERR(cudaFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
33ca7f6df4887b9e016a6f1f54c4e416dc573e48.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
33ca7f6df4887b9e016a6f1f54c4e416dc573e48.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
42455a361ff00c10ac2504df4165b3a40cd04727.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "elementwise_1D_1D_minus.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in1 = NULL;
hipMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
hipMalloc(&in2, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
elementwise_1D_1D_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
elementwise_1D_1D_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
elementwise_1D_1D_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
42455a361ff00c10ac2504df4165b3a40cd04727.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "elementwise_1D_1D_minus.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in1 = NULL;
cudaMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
cudaMalloc(&in2, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
elementwise_1D_1D_minus<<<gridBlock,threadBlock>>>(in1,in2,out,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
elementwise_1D_1D_minus<<<gridBlock,threadBlock>>>(in1,in2,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
elementwise_1D_1D_minus<<<gridBlock,threadBlock>>>(in1,in2,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
67e72d59597f6137e2514d2d2af1256e4cc5bbff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Naive sort
* used if the quicksort uses too many levels
*/
__global__ void kernel_quicksort(int* values, int n) {
#define MAX_LEVELS 1000
int pivot, L, R;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int start[MAX_LEVELS];
int end[MAX_LEVELS];
start[idx] = idx;
end[idx] = n - 1;
while (idx >= 0) {
L = start[idx];
R = end[idx];
if (L < R) {
pivot = values[L];
while (L < R) {
while (values[R] >= pivot && L < R)
R--;
if(L < R)
values[L++] = values[R];
while (values[L] < pivot && L < R)
L++;
if (L < R)
values[R--] = values[L];
}
values[L] = pivot;
start[idx + 1] = L + 1;
end[idx + 1] = end[idx];
end[idx++] = L;
if (end[idx] - start[idx] > end[idx - 1] - start[idx - 1]) {
int tmp = start[idx];
start[idx] = start[idx - 1];
start[idx - 1] = tmp;
tmp = end[idx];
end[idx] = end[idx - 1];
end[idx - 1] = tmp;
}
}
else
idx--;
}
}
|
67e72d59597f6137e2514d2d2af1256e4cc5bbff.cu
|
#include "includes.h"
/*
* Naive sort
* used if the quicksort uses too many levels
*/
__global__ void kernel_quicksort(int* values, int n) {
#define MAX_LEVELS 1000
int pivot, L, R;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int start[MAX_LEVELS];
int end[MAX_LEVELS];
start[idx] = idx;
end[idx] = n - 1;
while (idx >= 0) {
L = start[idx];
R = end[idx];
if (L < R) {
pivot = values[L];
while (L < R) {
while (values[R] >= pivot && L < R)
R--;
if(L < R)
values[L++] = values[R];
while (values[L] < pivot && L < R)
L++;
if (L < R)
values[R--] = values[L];
}
values[L] = pivot;
start[idx + 1] = L + 1;
end[idx + 1] = end[idx];
end[idx++] = L;
if (end[idx] - start[idx] > end[idx - 1] - start[idx - 1]) {
int tmp = start[idx];
start[idx] = start[idx - 1];
start[idx - 1] = tmp;
tmp = end[idx];
end[idx] = end[idx - 1];
end[idx - 1] = tmp;
}
}
else
idx--;
}
}
|
a1fc039787177eba7c654317df2de43cdb187bb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muz1 = mu[k-1][j][i] * strz[k-1];
a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz2 += 3.0 * mu[k][j][i] * strz[k];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1];
a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
double _t_2_ = u_1[k][j][i-2];
_t_2_ -= u_1[k][j][i];
double _t_1_ = a_mux1 * _t_2_;
double _t_3_ = u_1[k][j][i-1];
_t_3_ -= u_1[k][j][i];
_t_1_ += a_mux2 * _t_3_;
double _t_4_ = u_1[k][j][i+1];
_t_4_ -= u_1[k][j][i];
_t_1_ += a_mux3 * _t_4_;
double _t_5_ = u_1[k][j][i+2];
_t_5_ -= u_1[k][j][i];
_t_1_ += a_mux4 * _t_5_;
double _t_0_ = strx[i] * _t_1_;
double _t_8_ = u_1[k][j-2][i];
_t_8_ -= u_1[k][j][i];
double _t_7_ = 2.0 * a_muy1;
double _v_23_ = la[k][j-1][i] * stry[j-1];
_t_7_ += _v_23_;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
double _t_9_ = 3.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_9_ += 2.0 * a_muy2;
double _t_11_ = 3.0 * la[k][j][i] * stry[j];
_t_11_ += 2.0 * a_muy3;
double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]);
_t_13_ += 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
double _t_6_ = _t_7_ * _t_8_;
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j+1][i] * stry[j+1];
double _t_10_ = u_1[k][j-1][i];
_t_10_ -= u_1[k][j][i];
_t_6_ += _t_9_ * _t_10_;
_t_11_ += _v_23_;
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
double _t_12_ = u_1[k][j+1][i];
_t_12_ -= u_1[k][j][i];
_t_6_ += _t_11_ * _t_12_;
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
double _t_14_ = u_1[k][j+2][i];
_t_14_ -= u_1[k][j][i];
_t_6_ += _t_13_ * _t_14_;
_t_0_ += stry[j] * _t_6_;
double _t_16_ = u_1[k-2][j][i];
_t_16_ -= u_1[k][j][i];
double _t_15_ = a_muz1 * _t_16_;
double _t_17_ = -(u_1[k][j][i]);
_t_17_ += u_1[k-1][j][i];
_t_15_ += a_muz2 * _t_17_;
double _t_18_ = -(u_1[k][j][i]);
_t_18_ += u_1[k+1][j][i];
_t_15_ += a_muz3 * _t_18_;
double _t_19_ = -(u_1[k][j][i]);
_t_19_ += u_1[k+2][j][i];
_t_15_ += a_muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
a_r2 = 1.0 / 6.0 * _t_0_;
double _t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
double _t_40_ = u_0[k][j-1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
double _t_43_ = u_0[k][j+1][i-2];
_t_43_ -= u_0[k][j+1][i+2];
double _t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
double _t_24_ = 8.0 * _t_25_;
_t_24_ += u_0[k][j-2][i-2];
_t_24_ -= u_0[k][j+2][i-2];
double _t_37_ = u_0[k][j-2][i-2];
_t_37_ -= u_0[k][j-2][i+2];
double _t_45_ = u_0[k][j+2][i-2];
_t_45_ -= u_0[k][j+2][i+2];
double _t_32_ = u_0[k][j-2][i+2];
_t_32_ -= u_0[k][j+2][i+2];
_t_32_ += 8.0 * _t_33_;
double _t_22_ = mu[k][j][i-2] * _t_24_;
double _t_28_ = -u_0[k][j-1][i-1];
_t_28_ += u_0[k][j+1][i-1];
double _t_41_ = -u_0[k][j-1][i-1];
_t_41_ += u_0[k][j-1][i+1];
double _t_44_ = -u_0[k][j+1][i-1];
_t_44_ += u_0[k][j+1][i+1];
double _t_31_ = -u_0[k][j-1][i+1];
_t_31_ += u_0[k][j+1][i+1];
double _t_27_ = 8.0 * _t_28_;
_t_27_ += u_0[k][j-2][i-1];
_t_27_ -= u_0[k][j+2][i-1];
double _t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
double _t_46_ = -u_0[k][j+2][i-1];
_t_46_ += u_0[k][j+2][i+1];
double _t_30_ = u_0[k][j-2][i+1];
_t_30_ -= u_0[k][j+2][i+1];
_t_30_ += 8.0 * _t_31_;
_t_22_ -= mu[k][j][i-1] * _t_27_;
_t_22_ += mu[k][j][i+1] * _t_30_;
_t_22_ -= mu[k][j][i+2] * _t_32_;
double _t_21_ = strx[i] * stry[j];
double _t_20_ = _t_21_ * _t_22_;
_t_37_ += 8.0 * _t_38_;
double _t_35_ = la[k][j-2][i] * _t_37_;
_t_40_ += 8.0 * _t_41_;
_t_35_ -= la[k][j-1][i] * _t_40_;
_t_43_ += 8.0 * _t_44_;
_t_35_ += la[k][j+1][i] * _t_43_;
_t_45_ += 8.0 * _t_46_;
_t_35_ -= la[k][j+2][i] * _t_45_;
double _t_34_ = strx[i] * stry[j];
_t_20_ += _t_34_ * _t_35_;
double _t_51_ = -u_2[k-1][j-2][i];
_t_51_ += u_2[k+1][j-2][i];
double _t_50_ = 8.0 * _t_51_;
_t_50_ += u_2[k-2][j-2][i];
_t_50_ -= u_2[k+2][j-2][i];
double _t_48_ = la[k][j-2][i] * _t_50_;
double _t_54_ = -u_2[k-1][j-1][i];
_t_54_ += u_2[k+1][j-1][i];
double _t_53_ = 8.0 * _t_54_;
_t_53_ += u_2[k-2][j-1][i];
_t_53_ -= u_2[k+2][j-1][i];
_t_48_ -= la[k][j-1][i] * _t_53_;
double _t_57_ = -u_2[k-1][j+1][i];
_t_57_ += u_2[k+1][j+1][i];
double _t_70_ = u_2[k+1][j+1][i];
_t_70_ += -u_2[k+1][j-1][i];
double _t_56_ = 8.0 * _t_57_;
_t_56_ += u_2[k-2][j+1][i];
_t_56_ -= u_2[k+2][j+1][i];
_t_48_ += la[k][j+1][i] * _t_56_;
double _t_59_ = -u_2[k-1][j+2][i];
_t_59_ += u_2[k+1][j+2][i];
double _t_69_ = -(u_2[k+1][j+2][i]);
_t_69_ += 8.0 * _t_70_;
_t_69_ += u_2[k+1][j-2][i];
double _t_66_ = -(u_2[k-1][j+2][i]);
_t_66_ += u_2[k-1][j-2][i];
double _t_58_ = 8.0 * _t_59_;
_t_58_ += u_2[k-2][j+2][i];
_t_58_ -= u_2[k+2][j+2][i];
_t_48_ -= la[k][j+2][i] * _t_58_;
double _t_47_ = stry[j] * strz[k];
_t_20_ += _t_47_ * _t_48_;
double _t_64_ = -u_2[k-2][j-1][i];
_t_64_ += u_2[k-2][j+1][i];
double _t_63_ = 8.0 * _t_64_;
_t_63_ += u_2[k-2][j-2][i];
_t_63_ -= u_2[k-2][j+2][i];
double _t_61_ = mu[k-2][j][i] * _t_63_;
_t_61_ += mu[k+1][j][i] * _t_69_;
double _t_67_ = -u_2[k-1][j-1][i];
_t_67_ += u_2[k-1][j+1][i];
_t_66_ += 8.0 * _t_67_;
_t_61_ -= mu[k-1][j][i] * _t_66_;
double _t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
double _t_71_ = 8.0 * _t_72_;
_t_71_ += u_2[k+2][j-2][i];
_t_71_ -= u_2[k+2][j+2][i];
_t_61_ -= mu[k+2][j][i] * _t_71_;
double _t_60_ = stry[j] * strz[k];
_t_20_ += _t_60_ * _t_61_;
a_r2 += _t_20_;
double uacc_1kc0jc0ic0 = cof * a_r2;
uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i];
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
double _t_89_ = u_1[k-1][j][i];
_t_89_ -= u_1[k+1][j][i];
double _t_88_ = b_muz1 * _t_89_;
double _t_90_ = u_1[k][j][i];
_t_90_ -= u_1[k+1][j][i];
_t_88_ += b_muz2 * _t_90_;
double _t_91_ = u_1[k+2][j][i];
_t_91_ -= u_1[k+1][j][i];
_t_88_ += b_muz3 * _t_91_;
double _t_92_ = u_1[k+3][j][i];
_t_92_ -= u_1[k+1][j][i];
_t_88_ += b_muz4 * _t_92_;
double _t_73_ = strz[k+1] * _t_88_;
double _t_75_ = u_1[k+1][j][i-2];
_t_75_ -= u_1[k+1][j][i];
double _t_74_ = b_mux1 * _t_75_;
double _t_76_ = u_1[k+1][j][i-1];
_t_76_ -= u_1[k+1][j][i];
_t_74_ += b_mux2 * _t_76_;
double _t_77_ = u_1[k+1][j][i+1];
_t_77_ -= u_1[k+1][j][i];
_t_74_ += b_mux3 * _t_77_;
double _t_78_ = u_1[k+1][j][i+2];
_t_78_ -= u_1[k+1][j][i];
_t_74_ += b_mux4 * _t_78_;
_t_73_ += strx[i] * _t_74_;
double _t_81_ = u_1[k+1][j-2][i];
_t_81_ -= u_1[k+1][j][i];
double _t_80_ = 2.0 * b_muy1;
double _v_76_ = la[k+1][j-1][i] * stry[j-1];
_t_80_ += _v_76_;
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
double _t_82_ = 3.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_82_ += 2.0 * b_muy2;
double _t_84_ = 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 2.0 * b_muy3;
double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]);
_t_86_ += 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
double _t_79_ = _t_80_ * _t_81_;
_t_82_ += la[k+1][j-2][i] * stry[j-2];
double _v_79_ = la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_82_ += _v_79_;
double _t_83_ = u_1[k+1][j-1][i];
_t_83_ -= u_1[k+1][j][i];
_t_79_ += _t_82_ * _t_83_;
_t_84_ += _v_76_;
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
double _t_85_ = u_1[k+1][j+1][i];
_t_85_ -= u_1[k+1][j][i];
_t_79_ += _t_84_ * _t_85_;
_t_86_ += _v_79_;
double _t_87_ = -(u_1[k+1][j][i]);
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
_t_73_ += stry[j] * _t_79_;
b_r2 = 1.0 / 6.0 * _t_73_;
double _t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
double _t_126_ = u_2[k-1][j-1][i];
_t_126_ -= u_2[k+3][j-1][i];
double _t_129_ = u_2[k-1][j+1][i];
_t_129_ -= u_2[k+3][j+1][i];
double _t_145_ = -u_2[k+3][j-1][i];
_t_145_ += u_2[k+3][j+1][i];
double _t_136_ = 8.0 * _t_137_;
_t_136_ += u_2[k-1][j-2][i];
_t_136_ -= u_2[k-1][j+2][i];
double _t_123_ = u_2[k-1][j-2][i];
_t_123_ -= u_2[k+3][j-2][i];
double _t_131_ = u_2[k-1][j+2][i];
_t_131_ -= u_2[k+3][j+2][i];
double _t_144_ = u_2[k+3][j-2][i];
_t_144_ -= u_2[k+3][j+2][i];
_t_144_ += 8.0 * _t_145_;
double _t_134_ = mu[k-1][j][i] * _t_136_;
double _t_143_ = -u_2[k+2][j-1][i];
_t_143_ += u_2[k+2][j+1][i];
double _t_127_ = u_2[k+2][j-1][i];
_t_127_ += -u_2[k][j-1][i];
double _t_130_ = u_2[k+2][j+1][i];
_t_130_ += -u_2[k][j+1][i];
double _t_140_ = -u_2[k][j-1][i];
_t_140_ += u_2[k][j+1][i];
double _t_142_ = 8.0 * _t_143_;
_t_142_ += u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
double _t_124_ = u_2[k+2][j-2][i];
_t_124_ += -u_2[k][j-2][i];
double _t_132_ = u_2[k+2][j+2][i];
_t_132_ += -u_2[k][j+2][i];
double _t_139_ = u_2[k][j-2][i];
_t_139_ -= u_2[k][j+2][i];
_t_139_ += 8.0 * _t_140_;
_t_134_ += mu[k+2][j][i] * _t_142_;
_t_134_ -= mu[k][j][i] * _t_139_;
_t_134_ -= mu[k+3][j][i] * _t_144_;
double _t_135_ = stry[j] * strz[k+1];
double _t_96_ = strx[i] * stry[j];
double _t_133_ = _t_135_ * 1.0 / 144.0;
double _t_93_ = _t_133_ * _t_134_;
_t_123_ += 8.0 * _t_124_;
double _t_121_ = la[k+1][j-2][i] * _t_123_;
_t_126_ += 8.0 * _t_127_;
_t_121_ -= la[k+1][j-1][i] * _t_126_;
_t_129_ += 8.0 * _t_130_;
_t_121_ += la[k+1][j+1][i] * _t_129_;
_t_131_ += 8.0 * _t_132_;
_t_121_ -= la[k+1][j+2][i] * _t_131_;
double _t_120_ = _t_135_;
_t_93_ += _t_120_ * _t_121_;
double _t_98_ = -u_0[k+1][j-1][i-2];
_t_98_ += u_0[k+1][j+1][i-2];
double _t_113_ = u_0[k+1][j-1][i-2];
_t_113_ -= u_0[k+1][j-1][i+2];
double _t_116_ = u_0[k+1][j+1][i-2];
_t_116_ -= u_0[k+1][j+1][i+2];
double _t_106_ = -u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
double _t_97_ = 8.0 * _t_98_;
_t_97_ += u_0[k+1][j-2][i-2];
_t_97_ -= u_0[k+1][j+2][i-2];
double _t_110_ = u_0[k+1][j-2][i-2];
_t_110_ -= u_0[k+1][j-2][i+2];
double _t_118_ = u_0[k+1][j+2][i-2];
_t_118_ -= u_0[k+1][j+2][i+2];
double _t_105_ = u_0[k+1][j-2][i+2];
_t_105_ -= u_0[k+1][j+2][i+2];
_t_105_ += 8.0 * _t_106_;
double _t_95_ = mu[k+1][j][i-2] * _t_97_;
double _t_101_ = -u_0[k+1][j-1][i-1];
_t_101_ += u_0[k+1][j+1][i-1];
double _t_114_ = -u_0[k+1][j-1][i-1];
_t_114_ += u_0[k+1][j-1][i+1];
double _t_117_ = -u_0[k+1][j+1][i-1];
_t_117_ += u_0[k+1][j+1][i+1];
double _t_104_ = -u_0[k+1][j-1][i+1];
_t_104_ += u_0[k+1][j+1][i+1];
double _t_100_ = 8.0 * _t_101_;
_t_100_ += u_0[k+1][j-2][i-1];
_t_100_ -= u_0[k+1][j+2][i-1];
double _t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
double _t_119_ = -u_0[k+1][j+2][i-1];
_t_119_ += u_0[k+1][j+2][i+1];
double _t_103_ = u_0[k+1][j-2][i+1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_103_ += 8.0 * _t_104_;
_t_95_ -= mu[k+1][j][i-1] * _t_100_;
_t_95_ += mu[k+1][j][i+1] * _t_103_;
_t_95_ -= mu[k+1][j][i+2] * _t_105_;
double _t_94_ = _t_96_ * 1.0 / 144.0;
_t_93_ += _t_94_ * _t_95_;
_t_110_ += 8.0 * _t_111_;
double _t_108_ = la[k+1][j-2][i] * _t_110_;
_t_113_ += 8.0 * _t_114_;
_t_108_ -= la[k+1][j-1][i] * _t_113_;
_t_116_ += 8.0 * _t_117_;
_t_108_ += la[k+1][j+1][i] * _t_116_;
_t_118_ += 8.0 * _t_119_;
_t_108_ -= la[k+1][j+2][i] * _t_118_;
double _t_107_ = _t_96_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
double _v_105_ = cof * b_r2;
double uacc_1kp1jc0ic0 = _v_105_;
uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i];
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
hipMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_1;
hipMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_2;
hipMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_0;
hipMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_1;
hipMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_2;
hipMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
double *strz;
hipMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( sw4_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipLaunchKernelGGL(( sw4_2) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipLaunchKernelGGL(( sw4_3) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (uacc_0);
hipFree (uacc_1);
hipFree (uacc_2);
hipFree (u_0);
hipFree (u_1);
hipFree (u_2);
hipFree (mu);
hipFree (la);
hipFree (strx);
hipFree (stry);
hipFree (strz);
}
|
a1fc039787177eba7c654317df2de43cdb187bb4.cu
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muz1 = mu[k-1][j][i] * strz[k-1];
a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz2 += 3.0 * mu[k][j][i] * strz[k];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1];
a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
double _t_2_ = u_1[k][j][i-2];
_t_2_ -= u_1[k][j][i];
double _t_1_ = a_mux1 * _t_2_;
double _t_3_ = u_1[k][j][i-1];
_t_3_ -= u_1[k][j][i];
_t_1_ += a_mux2 * _t_3_;
double _t_4_ = u_1[k][j][i+1];
_t_4_ -= u_1[k][j][i];
_t_1_ += a_mux3 * _t_4_;
double _t_5_ = u_1[k][j][i+2];
_t_5_ -= u_1[k][j][i];
_t_1_ += a_mux4 * _t_5_;
double _t_0_ = strx[i] * _t_1_;
double _t_8_ = u_1[k][j-2][i];
_t_8_ -= u_1[k][j][i];
double _t_7_ = 2.0 * a_muy1;
double _v_23_ = la[k][j-1][i] * stry[j-1];
_t_7_ += _v_23_;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
double _t_9_ = 3.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_9_ += 2.0 * a_muy2;
double _t_11_ = 3.0 * la[k][j][i] * stry[j];
_t_11_ += 2.0 * a_muy3;
double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]);
_t_13_ += 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
double _t_6_ = _t_7_ * _t_8_;
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j+1][i] * stry[j+1];
double _t_10_ = u_1[k][j-1][i];
_t_10_ -= u_1[k][j][i];
_t_6_ += _t_9_ * _t_10_;
_t_11_ += _v_23_;
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
double _t_12_ = u_1[k][j+1][i];
_t_12_ -= u_1[k][j][i];
_t_6_ += _t_11_ * _t_12_;
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
double _t_14_ = u_1[k][j+2][i];
_t_14_ -= u_1[k][j][i];
_t_6_ += _t_13_ * _t_14_;
_t_0_ += stry[j] * _t_6_;
double _t_16_ = u_1[k-2][j][i];
_t_16_ -= u_1[k][j][i];
double _t_15_ = a_muz1 * _t_16_;
double _t_17_ = -(u_1[k][j][i]);
_t_17_ += u_1[k-1][j][i];
_t_15_ += a_muz2 * _t_17_;
double _t_18_ = -(u_1[k][j][i]);
_t_18_ += u_1[k+1][j][i];
_t_15_ += a_muz3 * _t_18_;
double _t_19_ = -(u_1[k][j][i]);
_t_19_ += u_1[k+2][j][i];
_t_15_ += a_muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
a_r2 = 1.0 / 6.0 * _t_0_;
double _t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
double _t_40_ = u_0[k][j-1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
double _t_43_ = u_0[k][j+1][i-2];
_t_43_ -= u_0[k][j+1][i+2];
double _t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
double _t_24_ = 8.0 * _t_25_;
_t_24_ += u_0[k][j-2][i-2];
_t_24_ -= u_0[k][j+2][i-2];
double _t_37_ = u_0[k][j-2][i-2];
_t_37_ -= u_0[k][j-2][i+2];
double _t_45_ = u_0[k][j+2][i-2];
_t_45_ -= u_0[k][j+2][i+2];
double _t_32_ = u_0[k][j-2][i+2];
_t_32_ -= u_0[k][j+2][i+2];
_t_32_ += 8.0 * _t_33_;
double _t_22_ = mu[k][j][i-2] * _t_24_;
double _t_28_ = -u_0[k][j-1][i-1];
_t_28_ += u_0[k][j+1][i-1];
double _t_41_ = -u_0[k][j-1][i-1];
_t_41_ += u_0[k][j-1][i+1];
double _t_44_ = -u_0[k][j+1][i-1];
_t_44_ += u_0[k][j+1][i+1];
double _t_31_ = -u_0[k][j-1][i+1];
_t_31_ += u_0[k][j+1][i+1];
double _t_27_ = 8.0 * _t_28_;
_t_27_ += u_0[k][j-2][i-1];
_t_27_ -= u_0[k][j+2][i-1];
double _t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
double _t_46_ = -u_0[k][j+2][i-1];
_t_46_ += u_0[k][j+2][i+1];
double _t_30_ = u_0[k][j-2][i+1];
_t_30_ -= u_0[k][j+2][i+1];
_t_30_ += 8.0 * _t_31_;
_t_22_ -= mu[k][j][i-1] * _t_27_;
_t_22_ += mu[k][j][i+1] * _t_30_;
_t_22_ -= mu[k][j][i+2] * _t_32_;
double _t_21_ = strx[i] * stry[j];
double _t_20_ = _t_21_ * _t_22_;
_t_37_ += 8.0 * _t_38_;
double _t_35_ = la[k][j-2][i] * _t_37_;
_t_40_ += 8.0 * _t_41_;
_t_35_ -= la[k][j-1][i] * _t_40_;
_t_43_ += 8.0 * _t_44_;
_t_35_ += la[k][j+1][i] * _t_43_;
_t_45_ += 8.0 * _t_46_;
_t_35_ -= la[k][j+2][i] * _t_45_;
double _t_34_ = strx[i] * stry[j];
_t_20_ += _t_34_ * _t_35_;
double _t_51_ = -u_2[k-1][j-2][i];
_t_51_ += u_2[k+1][j-2][i];
double _t_50_ = 8.0 * _t_51_;
_t_50_ += u_2[k-2][j-2][i];
_t_50_ -= u_2[k+2][j-2][i];
double _t_48_ = la[k][j-2][i] * _t_50_;
double _t_54_ = -u_2[k-1][j-1][i];
_t_54_ += u_2[k+1][j-1][i];
double _t_53_ = 8.0 * _t_54_;
_t_53_ += u_2[k-2][j-1][i];
_t_53_ -= u_2[k+2][j-1][i];
_t_48_ -= la[k][j-1][i] * _t_53_;
double _t_57_ = -u_2[k-1][j+1][i];
_t_57_ += u_2[k+1][j+1][i];
double _t_70_ = u_2[k+1][j+1][i];
_t_70_ += -u_2[k+1][j-1][i];
double _t_56_ = 8.0 * _t_57_;
_t_56_ += u_2[k-2][j+1][i];
_t_56_ -= u_2[k+2][j+1][i];
_t_48_ += la[k][j+1][i] * _t_56_;
double _t_59_ = -u_2[k-1][j+2][i];
_t_59_ += u_2[k+1][j+2][i];
double _t_69_ = -(u_2[k+1][j+2][i]);
_t_69_ += 8.0 * _t_70_;
_t_69_ += u_2[k+1][j-2][i];
double _t_66_ = -(u_2[k-1][j+2][i]);
_t_66_ += u_2[k-1][j-2][i];
double _t_58_ = 8.0 * _t_59_;
_t_58_ += u_2[k-2][j+2][i];
_t_58_ -= u_2[k+2][j+2][i];
_t_48_ -= la[k][j+2][i] * _t_58_;
double _t_47_ = stry[j] * strz[k];
_t_20_ += _t_47_ * _t_48_;
double _t_64_ = -u_2[k-2][j-1][i];
_t_64_ += u_2[k-2][j+1][i];
double _t_63_ = 8.0 * _t_64_;
_t_63_ += u_2[k-2][j-2][i];
_t_63_ -= u_2[k-2][j+2][i];
double _t_61_ = mu[k-2][j][i] * _t_63_;
_t_61_ += mu[k+1][j][i] * _t_69_;
double _t_67_ = -u_2[k-1][j-1][i];
_t_67_ += u_2[k-1][j+1][i];
_t_66_ += 8.0 * _t_67_;
_t_61_ -= mu[k-1][j][i] * _t_66_;
double _t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
double _t_71_ = 8.0 * _t_72_;
_t_71_ += u_2[k+2][j-2][i];
_t_71_ -= u_2[k+2][j+2][i];
_t_61_ -= mu[k+2][j][i] * _t_71_;
double _t_60_ = stry[j] * strz[k];
_t_20_ += _t_60_ * _t_61_;
a_r2 += _t_20_;
double uacc_1kc0jc0ic0 = cof * a_r2;
uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i];
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
double _t_89_ = u_1[k-1][j][i];
_t_89_ -= u_1[k+1][j][i];
double _t_88_ = b_muz1 * _t_89_;
double _t_90_ = u_1[k][j][i];
_t_90_ -= u_1[k+1][j][i];
_t_88_ += b_muz2 * _t_90_;
double _t_91_ = u_1[k+2][j][i];
_t_91_ -= u_1[k+1][j][i];
_t_88_ += b_muz3 * _t_91_;
double _t_92_ = u_1[k+3][j][i];
_t_92_ -= u_1[k+1][j][i];
_t_88_ += b_muz4 * _t_92_;
double _t_73_ = strz[k+1] * _t_88_;
double _t_75_ = u_1[k+1][j][i-2];
_t_75_ -= u_1[k+1][j][i];
double _t_74_ = b_mux1 * _t_75_;
double _t_76_ = u_1[k+1][j][i-1];
_t_76_ -= u_1[k+1][j][i];
_t_74_ += b_mux2 * _t_76_;
double _t_77_ = u_1[k+1][j][i+1];
_t_77_ -= u_1[k+1][j][i];
_t_74_ += b_mux3 * _t_77_;
double _t_78_ = u_1[k+1][j][i+2];
_t_78_ -= u_1[k+1][j][i];
_t_74_ += b_mux4 * _t_78_;
_t_73_ += strx[i] * _t_74_;
double _t_81_ = u_1[k+1][j-2][i];
_t_81_ -= u_1[k+1][j][i];
double _t_80_ = 2.0 * b_muy1;
double _v_76_ = la[k+1][j-1][i] * stry[j-1];
_t_80_ += _v_76_;
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
double _t_82_ = 3.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_82_ += 2.0 * b_muy2;
double _t_84_ = 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 2.0 * b_muy3;
double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]);
_t_86_ += 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
double _t_79_ = _t_80_ * _t_81_;
_t_82_ += la[k+1][j-2][i] * stry[j-2];
double _v_79_ = la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_82_ += _v_79_;
double _t_83_ = u_1[k+1][j-1][i];
_t_83_ -= u_1[k+1][j][i];
_t_79_ += _t_82_ * _t_83_;
_t_84_ += _v_76_;
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
double _t_85_ = u_1[k+1][j+1][i];
_t_85_ -= u_1[k+1][j][i];
_t_79_ += _t_84_ * _t_85_;
_t_86_ += _v_79_;
double _t_87_ = -(u_1[k+1][j][i]);
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
_t_73_ += stry[j] * _t_79_;
b_r2 = 1.0 / 6.0 * _t_73_;
double _t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
double _t_126_ = u_2[k-1][j-1][i];
_t_126_ -= u_2[k+3][j-1][i];
double _t_129_ = u_2[k-1][j+1][i];
_t_129_ -= u_2[k+3][j+1][i];
double _t_145_ = -u_2[k+3][j-1][i];
_t_145_ += u_2[k+3][j+1][i];
double _t_136_ = 8.0 * _t_137_;
_t_136_ += u_2[k-1][j-2][i];
_t_136_ -= u_2[k-1][j+2][i];
double _t_123_ = u_2[k-1][j-2][i];
_t_123_ -= u_2[k+3][j-2][i];
double _t_131_ = u_2[k-1][j+2][i];
_t_131_ -= u_2[k+3][j+2][i];
double _t_144_ = u_2[k+3][j-2][i];
_t_144_ -= u_2[k+3][j+2][i];
_t_144_ += 8.0 * _t_145_;
double _t_134_ = mu[k-1][j][i] * _t_136_;
double _t_143_ = -u_2[k+2][j-1][i];
_t_143_ += u_2[k+2][j+1][i];
double _t_127_ = u_2[k+2][j-1][i];
_t_127_ += -u_2[k][j-1][i];
double _t_130_ = u_2[k+2][j+1][i];
_t_130_ += -u_2[k][j+1][i];
double _t_140_ = -u_2[k][j-1][i];
_t_140_ += u_2[k][j+1][i];
double _t_142_ = 8.0 * _t_143_;
_t_142_ += u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
double _t_124_ = u_2[k+2][j-2][i];
_t_124_ += -u_2[k][j-2][i];
double _t_132_ = u_2[k+2][j+2][i];
_t_132_ += -u_2[k][j+2][i];
double _t_139_ = u_2[k][j-2][i];
_t_139_ -= u_2[k][j+2][i];
_t_139_ += 8.0 * _t_140_;
_t_134_ += mu[k+2][j][i] * _t_142_;
_t_134_ -= mu[k][j][i] * _t_139_;
_t_134_ -= mu[k+3][j][i] * _t_144_;
double _t_135_ = stry[j] * strz[k+1];
double _t_96_ = strx[i] * stry[j];
double _t_133_ = _t_135_ * 1.0 / 144.0;
double _t_93_ = _t_133_ * _t_134_;
_t_123_ += 8.0 * _t_124_;
double _t_121_ = la[k+1][j-2][i] * _t_123_;
_t_126_ += 8.0 * _t_127_;
_t_121_ -= la[k+1][j-1][i] * _t_126_;
_t_129_ += 8.0 * _t_130_;
_t_121_ += la[k+1][j+1][i] * _t_129_;
_t_131_ += 8.0 * _t_132_;
_t_121_ -= la[k+1][j+2][i] * _t_131_;
double _t_120_ = _t_135_;
_t_93_ += _t_120_ * _t_121_;
double _t_98_ = -u_0[k+1][j-1][i-2];
_t_98_ += u_0[k+1][j+1][i-2];
double _t_113_ = u_0[k+1][j-1][i-2];
_t_113_ -= u_0[k+1][j-1][i+2];
double _t_116_ = u_0[k+1][j+1][i-2];
_t_116_ -= u_0[k+1][j+1][i+2];
double _t_106_ = -u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
double _t_97_ = 8.0 * _t_98_;
_t_97_ += u_0[k+1][j-2][i-2];
_t_97_ -= u_0[k+1][j+2][i-2];
double _t_110_ = u_0[k+1][j-2][i-2];
_t_110_ -= u_0[k+1][j-2][i+2];
double _t_118_ = u_0[k+1][j+2][i-2];
_t_118_ -= u_0[k+1][j+2][i+2];
double _t_105_ = u_0[k+1][j-2][i+2];
_t_105_ -= u_0[k+1][j+2][i+2];
_t_105_ += 8.0 * _t_106_;
double _t_95_ = mu[k+1][j][i-2] * _t_97_;
double _t_101_ = -u_0[k+1][j-1][i-1];
_t_101_ += u_0[k+1][j+1][i-1];
double _t_114_ = -u_0[k+1][j-1][i-1];
_t_114_ += u_0[k+1][j-1][i+1];
double _t_117_ = -u_0[k+1][j+1][i-1];
_t_117_ += u_0[k+1][j+1][i+1];
double _t_104_ = -u_0[k+1][j-1][i+1];
_t_104_ += u_0[k+1][j+1][i+1];
double _t_100_ = 8.0 * _t_101_;
_t_100_ += u_0[k+1][j-2][i-1];
_t_100_ -= u_0[k+1][j+2][i-1];
double _t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
double _t_119_ = -u_0[k+1][j+2][i-1];
_t_119_ += u_0[k+1][j+2][i+1];
double _t_103_ = u_0[k+1][j-2][i+1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_103_ += 8.0 * _t_104_;
_t_95_ -= mu[k+1][j][i-1] * _t_100_;
_t_95_ += mu[k+1][j][i+1] * _t_103_;
_t_95_ -= mu[k+1][j][i+2] * _t_105_;
double _t_94_ = _t_96_ * 1.0 / 144.0;
_t_93_ += _t_94_ * _t_95_;
_t_110_ += 8.0 * _t_111_;
double _t_108_ = la[k+1][j-2][i] * _t_110_;
_t_113_ += 8.0 * _t_114_;
_t_108_ -= la[k+1][j-1][i] * _t_113_;
_t_116_ += 8.0 * _t_117_;
_t_108_ += la[k+1][j+1][i] * _t_116_;
_t_118_ += 8.0 * _t_119_;
_t_108_ -= la[k+1][j+2][i] * _t_118_;
double _t_107_ = _t_96_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
double _v_105_ = cof * b_r2;
double uacc_1kp1jc0ic0 = _v_105_;
uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i];
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
7b4a7e9eacf1c2837b99bdd07d39351c86feba57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const double* A, const double* B, double* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1=0;
double Value2=0;
double Value3=0;
double Value=0;
double I1=A[i];
double I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
7b4a7e9eacf1c2837b99bdd07d39351c86feba57.cu
|
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const double* A, const double* B, double* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1=0;
double Value2=0;
double Value3=0;
double Value=0;
double I1=A[i];
double I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
3ca14666938e3d64cc13b745f2bb18cdcd3bd5dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "quickSort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int *dfirst = NULL;
hipMalloc(&dfirst, XSIZE*YSIZE);
int *dlast = NULL;
hipMalloc(&dlast, XSIZE*YSIZE);
int *list = NULL;
hipMalloc(&list, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
quickSort), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dfirst,dlast,list);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
quickSort), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dfirst,dlast,list);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
quickSort), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dfirst,dlast,list);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3ca14666938e3d64cc13b745f2bb18cdcd3bd5dc.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "quickSort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int *dfirst = NULL;
cudaMalloc(&dfirst, XSIZE*YSIZE);
int *dlast = NULL;
cudaMalloc(&dlast, XSIZE*YSIZE);
int *list = NULL;
cudaMalloc(&list, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
quickSort<<<gridBlock,threadBlock>>>(x,dfirst,dlast,list);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
quickSort<<<gridBlock,threadBlock>>>(x,dfirst,dlast,list);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
quickSort<<<gridBlock,threadBlock>>>(x,dfirst,dlast,list);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
07bca6399b1fc107a89c582368afd31c03745c11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__
void fun(const int N)
{
double a[N];
a[N-1] = N-1;
}
int main()
{
int N = 10;
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, N);
}
|
07bca6399b1fc107a89c582368afd31c03745c11.cu
|
#include <iostream>
using namespace std;
__global__
void fun(const int N)
{
double a[N];
a[N-1] = N-1;
}
int main()
{
int N = 10;
fun<<<1,1>>>(N);
}
|
7276575ac4b8ff8139a7888f1eb249e18a6c4527.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cl_device_assist.cuh"
#include "cl_interface_shared.h"
__global__ void clcuda_func_manualbound(
float *var_A,
int32_t var_N,
CommonKernelData data
) {
if (blockIdx.x * blockDim.x + threadIdx.x >= data.totalX) return;
if (blockIdx.y * blockDim.y + threadIdx.y >= data.totalY) return;
if (blockIdx.z * blockDim.z + threadIdx.z >= data.totalZ) return;
size_t var_global_id;
var_global_id = clcuda_builtin_get_global_id(0U, data);
if (var_global_id < var_N)
{
var_A[var_global_id] = 1.0;
}
}
KERNEL_LAUNCHER void clcuda_launcher_manualbound(
struct _cl_kernel *desc,
float *elapsedMs
) {
dim3 num_grids = dim3(desc->gridX, desc->gridY, desc->gridZ);
dim3 local_size = dim3(desc->localX, desc->localY, desc->localZ);
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
hipLaunchKernelGGL(( clcuda_func_manualbound), dim3(num_grids), dim3(local_size), 0, 0,
(float*) desc->arg_data[0],
*(int32_t*) desc->arg_data[1],
CommonKernelData(desc->totalX, desc->totalY, desc->totalZ)
);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(elapsedMs, start, end);
}
|
7276575ac4b8ff8139a7888f1eb249e18a6c4527.cu
|
#include "cl_device_assist.cuh"
#include "cl_interface_shared.h"
__global__ void clcuda_func_manualbound(
float *var_A,
int32_t var_N,
CommonKernelData data
) {
if (blockIdx.x * blockDim.x + threadIdx.x >= data.totalX) return;
if (blockIdx.y * blockDim.y + threadIdx.y >= data.totalY) return;
if (blockIdx.z * blockDim.z + threadIdx.z >= data.totalZ) return;
size_t var_global_id;
var_global_id = clcuda_builtin_get_global_id(0U, data);
if (var_global_id < var_N)
{
var_A[var_global_id] = 1.0;
}
}
KERNEL_LAUNCHER void clcuda_launcher_manualbound(
struct _cl_kernel *desc,
float *elapsedMs
) {
dim3 num_grids = dim3(desc->gridX, desc->gridY, desc->gridZ);
dim3 local_size = dim3(desc->localX, desc->localY, desc->localZ);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
clcuda_func_manualbound<<<num_grids, local_size>>>(
(float*) desc->arg_data[0],
*(int32_t*) desc->arg_data[1],
CommonKernelData(desc->totalX, desc->totalY, desc->totalZ)
);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(elapsedMs, start, end);
}
|
24987b4cad1799d1d87777cee5813868d470904f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//static hipArray* tex_array;
//m_q: Number of vertical interior grid points, k_q: Number of horizontal grid points
__global__
void k_advection_2D_f32(float dt, float dy, float dx, int m_q, int k_q, float2* U, int pitch_u, hipTextureObject_t Q, float* C, int pitch_c){
const int TILE_WIDTH=8;
const int TILE_HEIGHT=8;
int idy=blockIdx.y*blockDim.y*TILE_HEIGHT+threadIdx.y*TILE_HEIGHT;
int idx=blockIdx.x*blockDim.x*TILE_WIDTH+threadIdx.x*TILE_WIDTH;
int i=0;
float2 p;
C=(float*) ((char*)C+idy*pitch_c);
U=(float2*) ((char*)U+idy*pitch_u);
float2* U_ptr=U;
float* C_ptr=C;
while (i<m_q){
for (int i1=0;i1<TILE_HEIGHT;i1++){
int fy=idy+i1;
if ((fy+i)<m_q){
int j=0;
while(j<k_q){
//printf("y:%d\n",fy);
for (int i2=0;i2<TILE_WIDTH;i2++){
int fx=idx+i2;
if ((fx+j)<k_q){
//printf("i: %d j: %d y: %d x:%d\n",i,j,fy,fx);
float2 v=U_ptr[fx+j];
p.x=(fx+j+1.5f)-(dt*v.x*dx);
p.y=(fy+i+1.5f)-(dt*v.y*dx);
float q=tex2D<float>(Q,p.x,p.y);
C_ptr[fx+j]=q;
}
else{
break;
}
}
j+=gridDim.x*blockDim.x*TILE_WIDTH;
}
}
C_ptr=(float*) ((char*)C_ptr+pitch_c);
U_ptr=(float2*) ((char*)U_ptr+pitch_u);
}
i+=gridDim.y*blockDim.y*TILE_HEIGHT;
C_ptr=(float*) ((char*)C+i*pitch_c);
U_ptr=(float2*) ((char*)U+i*pitch_u);
}
}
__global__
void k_advection_2d_f32(float dt, float dy, float dx, int m_q, int k_q, float2* U, int pitch_u, hipTextureObject_t Q, float2* C, int pitch_c){
const int TILE_WIDTH=8;
const int TILE_HEIGHT=8;
int idy=blockIdx.y*blockDim.y*TILE_HEIGHT+threadIdx.y*TILE_HEIGHT;
int idx=blockIdx.x*blockDim.x*TILE_WIDTH+threadIdx.x*TILE_WIDTH;
int i=0;
float2 p;
C=(float2*) ((char*)C+idy*pitch_c);
U=(float2*) ((char*)U+idy*pitch_u);
float2* U_ptr=U;
float2* C_ptr=C;
while (i<m_q){
for (int i1=0;i1<TILE_HEIGHT;i1++){
int fy=idy+i1;
if ((fy+i)<m_q){
int j=0;
while(j<k_q){
//printf("y:%d\n",fy);
for (int i2=0;i2<TILE_WIDTH;i2++){
int fx=idx+i2;
if ((fx+j)<k_q){
//printf("i: %d j: %d y: %d x:%d\n",i,j,fy,fx);
float2 v=U_ptr[fx+j];
p.x=(fx+j+1.5f)-(dt*v.x*dx);// we add 1.5 because of boundary conditions offset, else it would be 0.5
p.y=(fy+i+1.5f)-(dt*v.y*dx);// we add 1.5 because of boundary conditions offset, else it would be 0.5
float2 q=tex2D<float2>(Q,p.x,p.y);
C_ptr[fx+j]=q;
}
else{
break;
}
}
j+=gridDim.x*blockDim.x*TILE_WIDTH;
}
}
C_ptr=(float2*) ((char*)C_ptr+pitch_c);
U_ptr=(float2*) ((char*)U_ptr+pitch_u);
}
i+=gridDim.y*blockDim.y*TILE_HEIGHT;
C_ptr=(float2*) ((char*)C+i*pitch_c);
U_ptr=(float2*) ((char*)U+i*pitch_u);
}
}
__host__
void advection_2D_f32_device(float dt, float dy, float dx, int m_q, int k_q, float2* U_d, int pitch_u, float* Q_d, int pitch_q, float* C_d, int pitch_c){
if ((m_q<3) || (k_q<3)){
return;
}
//Create Resource description
hipResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr=Q_d;
resDesc.res.pitch2D.width=k_q;
resDesc.res.pitch2D.height=m_q;
resDesc.res.pitch2D.pitchInBytes=pitch_q;
resDesc.res.pitch2D.desc=hipCreateChannelDesc<float>(); //is equivalent to hipCreateChannelDesc<float>()
/*
resDesc.res.pitch2D.desc=hipCreateChannelDesc(32,32,0,0,hipChannelFormatKindFloat); //is equivalent to hipCreateChannelDesc<float2>()
*/
//Create Texture description
hipTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode=hipReadModeElementType;
texDesc.addressMode[0] = hipAddressModeClamp;
//Create Texture Object
hipTextureObject_t Q_tex;
hipError_t error1=hipCreateTextureObject(&Q_tex, &resDesc, &texDesc, NULL);
if (error1 !=hipSuccess){
printf("Errorcode: %d\n",error1);
}
printf("w, h: %d,%d\n",k_q,m_q);
float* C_ptr=(float*) ((char*)C_d+pitch_c)+1;
float2* U_ptr=(float2*) ((char*)U_d+pitch_u)+1;
hipLaunchKernelGGL(( k_advection_2D_f32), dim3(dim3(1,1,1)),dim3(dim3(8,4,1)), 0, 0, dt,dy,dy,m_q-2,k_q-2,U_ptr,pitch_u,Q_tex,C_ptr,pitch_c);
}
|
24987b4cad1799d1d87777cee5813868d470904f.cu
|
#include <stdio.h>
//static cudaArray* tex_array;
//m_q: Number of vertical interior grid points, k_q: Number of horizontal grid points
__global__
void k_advection_2D_f32(float dt, float dy, float dx, int m_q, int k_q, float2* U, int pitch_u, cudaTextureObject_t Q, float* C, int pitch_c){
const int TILE_WIDTH=8;
const int TILE_HEIGHT=8;
int idy=blockIdx.y*blockDim.y*TILE_HEIGHT+threadIdx.y*TILE_HEIGHT;
int idx=blockIdx.x*blockDim.x*TILE_WIDTH+threadIdx.x*TILE_WIDTH;
int i=0;
float2 p;
C=(float*) ((char*)C+idy*pitch_c);
U=(float2*) ((char*)U+idy*pitch_u);
float2* U_ptr=U;
float* C_ptr=C;
while (i<m_q){
for (int i1=0;i1<TILE_HEIGHT;i1++){
int fy=idy+i1;
if ((fy+i)<m_q){
int j=0;
while(j<k_q){
//printf("y:%d\n",fy);
for (int i2=0;i2<TILE_WIDTH;i2++){
int fx=idx+i2;
if ((fx+j)<k_q){
//printf("i: %d j: %d y: %d x:%d\n",i,j,fy,fx);
float2 v=U_ptr[fx+j];
p.x=(fx+j+1.5f)-(dt*v.x*dx);
p.y=(fy+i+1.5f)-(dt*v.y*dx);
float q=tex2D<float>(Q,p.x,p.y);
C_ptr[fx+j]=q;
}
else{
break;
}
}
j+=gridDim.x*blockDim.x*TILE_WIDTH;
}
}
C_ptr=(float*) ((char*)C_ptr+pitch_c);
U_ptr=(float2*) ((char*)U_ptr+pitch_u);
}
i+=gridDim.y*blockDim.y*TILE_HEIGHT;
C_ptr=(float*) ((char*)C+i*pitch_c);
U_ptr=(float2*) ((char*)U+i*pitch_u);
}
}
__global__
void k_advection_2d_f32(float dt, float dy, float dx, int m_q, int k_q, float2* U, int pitch_u, cudaTextureObject_t Q, float2* C, int pitch_c){
const int TILE_WIDTH=8;
const int TILE_HEIGHT=8;
int idy=blockIdx.y*blockDim.y*TILE_HEIGHT+threadIdx.y*TILE_HEIGHT;
int idx=blockIdx.x*blockDim.x*TILE_WIDTH+threadIdx.x*TILE_WIDTH;
int i=0;
float2 p;
C=(float2*) ((char*)C+idy*pitch_c);
U=(float2*) ((char*)U+idy*pitch_u);
float2* U_ptr=U;
float2* C_ptr=C;
while (i<m_q){
for (int i1=0;i1<TILE_HEIGHT;i1++){
int fy=idy+i1;
if ((fy+i)<m_q){
int j=0;
while(j<k_q){
//printf("y:%d\n",fy);
for (int i2=0;i2<TILE_WIDTH;i2++){
int fx=idx+i2;
if ((fx+j)<k_q){
//printf("i: %d j: %d y: %d x:%d\n",i,j,fy,fx);
float2 v=U_ptr[fx+j];
p.x=(fx+j+1.5f)-(dt*v.x*dx);// we add 1.5 because of boundary conditions offset, else it would be 0.5
p.y=(fy+i+1.5f)-(dt*v.y*dx);// we add 1.5 because of boundary conditions offset, else it would be 0.5
float2 q=tex2D<float2>(Q,p.x,p.y);
C_ptr[fx+j]=q;
}
else{
break;
}
}
j+=gridDim.x*blockDim.x*TILE_WIDTH;
}
}
C_ptr=(float2*) ((char*)C_ptr+pitch_c);
U_ptr=(float2*) ((char*)U_ptr+pitch_u);
}
i+=gridDim.y*blockDim.y*TILE_HEIGHT;
C_ptr=(float2*) ((char*)C+i*pitch_c);
U_ptr=(float2*) ((char*)U+i*pitch_u);
}
}
__host__
void advection_2D_f32_device(float dt, float dy, float dx, int m_q, int k_q, float2* U_d, int pitch_u, float* Q_d, int pitch_q, float* C_d, int pitch_c){
if ((m_q<3) || (k_q<3)){
return;
}
//Create Resource description
cudaResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr=Q_d;
resDesc.res.pitch2D.width=k_q;
resDesc.res.pitch2D.height=m_q;
resDesc.res.pitch2D.pitchInBytes=pitch_q;
resDesc.res.pitch2D.desc=cudaCreateChannelDesc<float>(); //is equivalent to cudaCreateChannelDesc<float>()
/*
resDesc.res.pitch2D.desc=cudaCreateChannelDesc(32,32,0,0,cudaChannelFormatKindFloat); //is equivalent to cudaCreateChannelDesc<float2>()
*/
//Create Texture description
cudaTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode=cudaReadModeElementType;
texDesc.addressMode[0] = cudaAddressModeClamp;
//Create Texture Object
cudaTextureObject_t Q_tex;
cudaError_t error1=cudaCreateTextureObject(&Q_tex, &resDesc, &texDesc, NULL);
if (error1 !=cudaSuccess){
printf("Errorcode: %d\n",error1);
}
printf("w, h: %d,%d\n",k_q,m_q);
float* C_ptr=(float*) ((char*)C_d+pitch_c)+1;
float2* U_ptr=(float2*) ((char*)U_d+pitch_u)+1;
k_advection_2D_f32<<<dim3(1,1,1),dim3(8,4,1)>>>(dt,dy,dy,m_q-2,k_q-2,U_ptr,pitch_u,Q_tex,C_ptr,pitch_c);
}
|
7d3cdf705310c7205da309e1156365d1b3cc4ab8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> thrustHst_idata(idata, idata+n);
thrust::device_vector<int> thrustDev_idata(thrustHst_idata);
thrust::device_vector<int> thrustDev_odata(n);
float time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
thrust::exclusive_scan(thrustDev_idata.begin(), thrustDev_idata.end(), thrustDev_odata.begin());
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Thrust scan time is %.4f ms \n", time);
thrust::copy(thrustDev_odata.begin(), thrustDev_odata.end(), odata);
}
}
}
|
7d3cdf705310c7205da309e1156365d1b3cc4ab8.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> thrustHst_idata(idata, idata+n);
thrust::device_vector<int> thrustDev_idata(thrustHst_idata);
thrust::device_vector<int> thrustDev_odata(n);
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
thrust::exclusive_scan(thrustDev_idata.begin(), thrustDev_idata.end(), thrustDev_odata.begin());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Thrust scan time is %.4f ms \n", time);
thrust::copy(thrustDev_odata.begin(), thrustDev_odata.end(), odata);
}
}
}
|
8d5936ef366771078726bf9e52aeb4fabee35ff1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <comm_quda.h>
#include <gauge_fix_ovr_extra.h>
#include <gauge_fix_ovr_hit_devf.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_ALG
static int numParams = 18;
#define LAUNCH_KERNEL_GAUGEFIX(kernel, tp, stream, arg, parity, ...) \
if (tp.aux.x == 0) { \
switch (tp.block.x) { \
case 256:hipLaunchKernelGGL(( kernel<0, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<0, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<0, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<0, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 1) { \
switch (tp.block.x) { \
case 256:hipLaunchKernelGGL(( kernel<1, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<1, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<1, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<1, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 2) { \
switch (tp.block.x) { \
case 256:hipLaunchKernelGGL(( kernel<2, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<2, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<2, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<2, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 3) { \
switch (tp.block.x) { \
case 128:hipLaunchKernelGGL(( kernel<3, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 256:hipLaunchKernelGGL(( kernel<3, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 384:hipLaunchKernelGGL(( kernel<3, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<3, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 640:hipLaunchKernelGGL(( kernel<3, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<3, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 896:hipLaunchKernelGGL(( kernel<3, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<3, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 4) { \
switch (tp.block.x) { \
case 128:hipLaunchKernelGGL(( kernel<4, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 256:hipLaunchKernelGGL(( kernel<4, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 384:hipLaunchKernelGGL(( kernel<4, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<4, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 640:hipLaunchKernelGGL(( kernel<4, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<4, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 896:hipLaunchKernelGGL(( kernel<4, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<4, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 5) { \
switch (tp.block.x) { \
case 128:hipLaunchKernelGGL(( kernel<5, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 256:hipLaunchKernelGGL(( kernel<5, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 384:hipLaunchKernelGGL(( kernel<5, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 512:hipLaunchKernelGGL(( kernel<5, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 640:hipLaunchKernelGGL(( kernel<5, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 768:hipLaunchKernelGGL(( kernel<5, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 896:hipLaunchKernelGGL(( kernel<5, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
case 1024:hipLaunchKernelGGL(( kernel<5, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else { \
errorQuda("Not implemented for %d", tp.aux.x); \
}
/**
* @brief container to pass parameters for the gauge fixing quality kernel
*/
template <typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
/**
* @brief Measure gauge fixing quality
*/
template<int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<Gauge> argQ){
typedef complex<Float> Cmplx;
int idx_cb = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
while (idx_cb < argQ.threads) {
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = argQ.X[dr];
int x[4];
getCoords(x, idx_cb, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += argQ.border[dr];
X[dr] += 2 * argQ.border[dr];
}
#endif
Matrix<Cmplx,3> delta;
setZero(&delta);
//load upward links
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float *)(U.data), linkIndex(x, X), mu, parity);
delta -= U;
}
//18*gauge_dir
data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x;
//2
//load downward links
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
SubTraceUnit(delta);
//12
data.y += getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
idx_cb += blockDim.x * gridDim.x;
}
reduce2d<blockSize,2>(argQ, data);
}
/**
* @brief Tunable object for the gauge fixing quality kernel
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
bool tuneGridDim() const { return true; }
public:
GaugeFixQuality(GaugeFixQualityArg<Gauge> &argQ) : argQ(argQ) { }
~GaugeFixQuality () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Float, Gauge, gauge_dir);
qudaDeviceSynchronize();
if ( comm_size() != 1 ) comm_allreduce_array((double*)argQ.result_h, 2);
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads * comm_size());
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads * comm_size());
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x";
vol << argQ.X[1] << "x";
vol << argQ.X[2] << "x";
vol << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
//long long bytes() const { return (1)*2*gauge_dir*argQ.dataOr.Bytes(); }//no accounting the reduction!!!! argQ.dataOr.Bytes() return 0....
long long bytes() const {
return 2LL * gauge_dir * 2 * argQ.threads * numParams * sizeof(Float);
} //no accounting the reduction!!!!
};
/**
* @brief container to pass parameters for the gauge fixing kernel
*/
template <typename Float, typename Gauge>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost)
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation for single-GPU
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix(GaugeFixArg<Float, Gauge> arg, int parity){
typedef complex<Float> Cmplx;
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#endif
int mu = (threadIdx.x / blockSize);
int oddbit = parity;
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
oddbit = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, oddbit);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, oddbit);
}
// 4 threads per lattice site
else{
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#endif
int mu = (threadIdx.x / blockSize);
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
//load upward link
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link1;
//load downward link
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the gauge fixing kernel
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFix : Tunable {
GaugeFixArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim (TuneParam ¶m) const {
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(GaugeFixArg<Float, Gauge> &arg) : arg(arg), parity(0) { }
~GaugeFix () { }
void setParity(const int par){
parity = par;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFix, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} //no accounting the reduction!!!!
};
#ifdef MULTI_GPU
template <typename Float, typename Gauge>
struct GaugeFixInteriorPointsArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixInteriorPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost)
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
if ( comm_dim_partitioned(dir)) border[dir] = data.R()[dir] + 1; //skip BORDER_RADIUS + face border point
else border[dir] = 0;
}
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir] - border[dir] * 2;
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation in the interior points for multi-GPU implementation
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> arg, int parity){
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
typedef complex<Float> Complex;
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
#ifdef MULTI_GPU
int za = (idx / (X[0] / 2));
int zb = (za / X[1]);
x[1] = za - zb * X[1];
x[3] = (zb / X[2]);
x[2] = zb - x[3] * X[2];
int p = 0; for ( int dr = 0; dr < 4; ++dr ) p += arg.border[dr];
p = p & 1;
int x1odd = (x[1] + x[2] + x[3] + parity + p) & 1;
//int x1odd = (x[1] + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + x1odd) - za * X[0];
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#else
getCoords(x, idx, X, parity);
#endif
int mu = (threadIdx.x / blockSize);
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
parity = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
}
// 4 threads per lattice site
else{
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link1;
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the interior points of the gauge fixing
* kernel in multi-GPU implementation
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixInteriorPoints : Tunable {
GaugeFixInteriorPointsArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim (TuneParam ¶m) const {
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) {}
~GaugeFixInteriorPoints () { }
void setParity(const int par) { parity = par; }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFixInteriorPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} // Only correct if there is no link reconstruction load+save
};
template <typename Float, typename Gauge>
struct GaugeFixBorderPointsArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
int border[4];
int *borderpoints[2];
int *faceindicessize[2];
size_t faceVolume[4];
size_t faceVolumeCB[4];
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixBorderPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost, size_t faceVolume_[4], size_t faceVolumeCB_[4])
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
border[dir] = data.R()[dir];
}
/*for(int dir=0; dir<4; ++dir){
if(comm_dim_partitioned(dir)) border[dir] = BORDER_RADIUS;
else border[dir] = 0;
}
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;*/
for ( int dir = 0; dir < 4; ++dir ) {
faceVolume[dir] = faceVolume_[dir];
faceVolumeCB[dir] = faceVolumeCB_[dir];
}
if ( comm_partitioned() ) PreCalculateLatticeIndices(faceVolume, faceVolumeCB, X, border, threads, borderpoints);
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation in the border points for multi-GPU implementation
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> arg, int parity){
typedef complex<Float> Cmplx;
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
int mu = (threadIdx.x / blockSize);
idx = arg.borderpoints[parity][idx];
int X[4], x[4];
x[3] = idx / (arg.X[0] * arg.X[1] * arg.X[2]);
x[2] = (idx / (arg.X[0] * arg.X[1])) % arg.X[2];
x[1] = (idx / arg.X[0]) % arg.X[1];
x[0] = idx % arg.X[0];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr] + 2 * arg.border[dr];
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
parity = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
}
// 4 threads per lattice site
else{
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link1;
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the border points of the gauge fixing kernel in multi-GPU implementation
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixBorderPoints : Tunable {
GaugeFixBorderPointsArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim(TuneParam ¶m) const
{
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) { }
~GaugeFixBorderPoints () {
if ( comm_partitioned() ) for ( int i = 0; i < 2; i++ ) pool_device_free(arg.borderpoints[i]);
}
void setParity(const int par){
parity = par;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFixBorderPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} // Only correct if there is no link reconstruction load+save
};
template <typename Gauge>
struct GaugeFixUnPackArg {
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
GaugeFixUnPackArg(Gauge & dataOr, cudaGaugeField & data)
: dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
}
};
template<int NElems, typename Float, typename Gauge, bool pack>
__global__ void Kernel_UnPackGhost(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= size ) return;
int X[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
int za, xodd;
int borderid = 0;
parity = 1 - parity;
switch ( face ) {
case 0: //X FACE
za = idx / ( X[1] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[0] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[1] = (2 * idx + xodd) - za * X[1];
break;
case 1: //Y FACE
za = idx / ( X[0] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[1] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 2: //Z FACE
za = idx / ( X[0] / 2);
x[3] = za / X[1];
x[1] = za - x[3] * X[1];
x[2] = borderid;
xodd = (borderid + x[1] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 3: //T FACE
za = idx / ( X[0] / 2);
x[2] = za / X[1];
x[1] = za - x[2] * X[1];
x[3] = borderid;
xodd = (borderid + x[1] + x[2] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
}
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
x[face] -= 1;
parity = 1 - parity;
int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
typedef complex<Float> Cmplx;
typedef typename mapper<Float>::type RegType;
RegType tmp[NElems];
RegType data[18];
if ( pack ) {
arg.dataOr.load(data, id, dir, parity);
arg.dataOr.reconstruct.Pack(tmp, data, id);
for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = ((Cmplx*)tmp)[i];
}
else{
for ( int i = 0; i < NElems / 2; ++i ) ((Cmplx*)tmp)[i] = array[idx + size * i];
arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R);
arg.dataOr.save(data, id, dir, parity);
}
}
template<int NElems, typename Float, typename Gauge, bool pack>
__global__ void Kernel_UnPackTop(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= size ) return;
int X[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
int za, xodd;
int borderid = arg.X[face] - 1;
switch ( face ) {
case 0: //X FACE
za = idx / ( X[1] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[0] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[1] = (2 * idx + xodd) - za * X[1];
break;
case 1: //Y FACE
za = idx / ( X[0] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[1] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 2: //Z FACE
za = idx / ( X[0] / 2);
x[3] = za / X[1];
x[1] = za - x[3] * X[1];
x[2] = borderid;
xodd = (borderid + x[1] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 3: //T FACE
za = idx / ( X[0] / 2);
x[2] = za / X[1];
x[1] = za - x[2] * X[1];
x[3] = borderid;
xodd = (borderid + x[1] + x[2] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
}
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
typedef complex<Float> Cmplx;
typedef typename mapper<Float>::type RegType;
RegType tmp[NElems];
RegType data[18];
if ( pack ) {
arg.dataOr.load(data, id, dir, parity);
arg.dataOr.reconstruct.Pack(tmp, data, id);
for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = ((Cmplx*)tmp)[i];
}
else{
for ( int i = 0; i < NElems / 2; ++i ) ((Cmplx*)tmp)[i] = array[idx + size * i];
arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R);
arg.dataOr.save(data, id, dir, parity);
}
}
#endif
template<typename Float, typename Gauge, int NElems, int gauge_dir>
void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data,
const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance,
const int reunit_interval, const int stopWtheta) {
TimeProfile profileInternalGaugeFixOVR("InternalGaugeFixQudaOVR", false);
profileInternalGaugeFixOVR.TPSTART(QUDA_PROFILE_COMPUTE);
double flop = 0;
double byte = 0;
printfQuda("\tOverrelaxation boost parameter: %lf\n", (double)relax_boost);
printfQuda("\tStop criterium: %lf\n", tolerance);
if ( stopWtheta ) printfQuda("\tStop criterium method: theta\n");
else printfQuda("\tStop criterium method: Delta\n");
printfQuda("\tMaximum number of iterations: %d\n", Nsteps);
printfQuda("\tReunitarize at every %d steps\n", reunit_interval);
printfQuda("\tPrint convergence results at every %d steps\n", verbose_interval);
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int)));
hipMemset(num_failures_dev, 0, sizeof(int));
GaugeFixQualityArg<Gauge> argQ(dataOr, data);
GaugeFixQuality<Float,Gauge, gauge_dir> GaugeFixQuality(argQ);
GaugeFixArg<Float, Gauge> arg(dataOr, data, relax_boost);
GaugeFix<Float,Gauge, gauge_dir> gaugeFix(arg);
#ifdef MULTI_GPU
void *send[4];
void *recv[4];
void *sendg[4];
void *recvg[4];
void *send_d[4];
void *recv_d[4];
void *sendg_d[4];
void *recvg_d[4];
void *hostbuffer_h[4];
hipStream_t GFStream[9];
size_t offset[4];
size_t bytes[4];
size_t faceVolume[4];
size_t faceVolumeCB[4];
// do the exchange
MsgHandle *mh_recv_back[4];
MsgHandle *mh_recv_fwd[4];
MsgHandle *mh_send_fwd[4];
MsgHandle *mh_send_back[4];
int X[4];
dim3 block[4];
dim3 grid[4];
if ( comm_partitioned() ) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
if ( !commDimPartitioned(dir) && data.R()[dir] != 0 ) errorQuda("Not supported!\n");
}
for ( int i = 0; i < 4; i++ ) {
faceVolume[i] = 1;
for ( int j = 0; j < 4; j++ ) {
if ( i == j ) continue;
faceVolume[i] *= X[j];
}
faceVolumeCB[i] = faceVolume[i] / 2;
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
offset[d] = faceVolumeCB[d] * NElems;
bytes[d] = sizeof(Float) * offset[d];
send_d[d] = device_malloc(bytes[d]);
recv_d[d] = device_malloc(bytes[d]);
sendg_d[d] = device_malloc(bytes[d]);
recvg_d[d] = device_malloc(bytes[d]);
hipStreamCreate(&GFStream[d]);
hipStreamCreate(&GFStream[4 + d]);
#ifndef GPU_COMMS
hostbuffer_h[d] = (void*)pinned_malloc(4 * bytes[d]);
#endif
block[d] = make_uint3(128, 1, 1);
grid[d] = make_uint3((faceVolumeCB[d] + block[d].x - 1) / block[d].x, 1, 1);
}
hipStreamCreate(&GFStream[8]);
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
recvg[d] = recvg_d[d];
sendg[d] = sendg_d[d];
#else
recv[d] = hostbuffer_h[d];
send[d] = static_cast<char*>(hostbuffer_h[d]) + bytes[d];
recvg[d] = static_cast<char*>(hostbuffer_h[d]) + 3 * bytes[d];
sendg[d] = static_cast<char*>(hostbuffer_h[d]) + 2 * bytes[d];
#endif
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(recvg[d], d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(sendg[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(send[d], d, +1, bytes[d]);
}
}
GaugeFixUnPackArg<Gauge> dataexarg(dataOr, data);
GaugeFixBorderPointsArg<Float, Gauge> argBorder(dataOr, data, relax_boost, faceVolume, faceVolumeCB);
GaugeFixBorderPoints<Float,Gauge, gauge_dir> gfixBorderPoints(argBorder);
GaugeFixInteriorPointsArg<Float, Gauge> argInt(dataOr, data, relax_boost);
GaugeFixInteriorPoints<Float,Gauge, gauge_dir> gfixIntPoints(argInt);
#endif
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action0 = argQ.getAction();
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost);
if ( num_failures > 0 ) {
pool_device_free(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
hipMemset(num_failures_dev, 0, sizeof(int));
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int p = 0; p < 2; p++ ) {
#ifndef MULTI_GPU
gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
#else
if ( !comm_partitioned() ) {
gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
}
else{
gfixIntPoints.setParity(p);
gfixBorderPoints.setParity(p); //compute border points
gfixBorderPoints.apply(0);
flop += (double)gfixBorderPoints.flops();
byte += (double)gfixBorderPoints.bytes();
flop += (double)gfixIntPoints.flops();
byte += (double)gfixIntPoints.bytes();
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_start(mh_recv_back[d]);
comm_start(mh_recv_fwd[d]);
}
//wait for the update to the halo points before start packing...
qudaDeviceSynchronize();
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
//extract top face
Kernel_UnPackTop<NElems, Float, Gauge, true><< < grid[d], block[d], 0, GFStream[d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(send_d[d]), p, d, d);
//extract bottom ghost
Kernel_UnPackGhost<NElems, Float, Gauge, true><< < grid[d], block[d], 0, GFStream[4 + d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(sendg_d[d]), 1 - p, d, d);
}
#ifdef GPU_COMMS
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
qudaStreamSynchronize(GFStream[d]);
comm_start(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[4 + d]);
comm_start(mh_send_back[d]);
}
#else
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
hipMemcpyAsync(send[d], send_d[d], bytes[d], hipMemcpyDeviceToHost, GFStream[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
hipMemcpyAsync(sendg[d], sendg_d[d], bytes[d], hipMemcpyDeviceToHost, GFStream[4 + d]);
}
#endif
//compute interior points
gfixIntPoints.apply(GFStream[8]);
#ifndef GPU_COMMS
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
qudaStreamSynchronize(GFStream[d]);
comm_start(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[4 + d]);
comm_start(mh_send_back[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_recv_back[d]);
hipMemcpyAsync(recv_d[d], recv[d], bytes[d], hipMemcpyHostToDevice, GFStream[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_recv_fwd[d]);
hipMemcpyAsync(recvg_d[d], recvg[d], bytes[d], hipMemcpyHostToDevice, GFStream[4 + d]);
}
#endif
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
comm_wait(mh_recv_back[d]);
#endif
Kernel_UnPackGhost<NElems, Float, Gauge, false><< < grid[d], block[d], 0, GFStream[d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recv_d[d]), p, d, d);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
comm_wait(mh_recv_fwd[d]);
#endif
Kernel_UnPackTop<NElems, Float, Gauge, false><< < grid[d], block[d], 0, GFStream[4 + d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recvg_d[d]), 1 - p, d, d);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_send_back[d]);
comm_wait(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[d]);
qudaStreamSynchronize(GFStream[4 + d]);
}
qudaStreamSynchronize(GFStream[8]);
}
#endif
/*gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
#ifdef MULTI_GPU
if(comm_partitioned()){//exchange updated top face links in current parity
for (int d=0; d<4; d++) {
if (!commDimPartitioned(d)) continue;
comm_start(mh_recv_back[d]);
//extract top face
Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(send_d[d]), p, d, d, true);
#ifndef GPU_COMMS
hipMemcpy(send[d], send_d[d], bytes[d], hipMemcpyDeviceToHost);
#else
qudaDeviceSynchronize();
#endif
comm_start(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
comm_wait(mh_send_fwd[d]);
#ifndef GPU_COMMS
hipMemcpy(recv_d[d], recv[d], bytes[d], hipMemcpyHostToDevice);
#endif
//inject top face in ghost
Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recv_d[d]), p, d, d, false);
}
//exchange updated ghost links in opposite parity
for (int d=0; d<4; d++) {
if (!commDimPartitioned(d)) continue;
comm_start(mh_recv_fwd[d]);
Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(sendg_d[d]), 1-p, d, d, true);
#ifndef GPU_COMMS
hipMemcpy(sendg[d], sendg_d[d], bytes[d], hipMemcpyDeviceToHost);
#else
qudaDeviceSynchronize();
#endif
comm_start(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
comm_wait(mh_send_back[d]);
#ifndef GPU_COMMS
hipMemcpy(recvg_d[d], recvg[d], bytes[d], hipMemcpyHostToDevice);
#endif
Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recvg_d[d]), 1-p, d, d, false);
}
}
#endif*/
}
if ((iter % reunit_interval) == (reunit_interval - 1)) {
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost);
if ( num_failures > 0 ) errorQuda("Error in the unitarization\n");
hipMemset(num_failures_dev, 0, sizeof(int));
flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3];
byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes();
}
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action = argQ.getAction();
double diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( stopWtheta ) {
if ( argQ.getTheta() < tolerance ) break;
}
else{
if ( diff < tolerance ) break;
}
action0 = action;
}
if ((iter % reunit_interval) != 0 ) {
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost);
if ( num_failures > 0 ) errorQuda("Error in the unitarization\n");
hipMemset(num_failures_dev, 0, sizeof(int));
flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3];
byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes();
}
if ((iter % verbose_interval) != 0 ) {
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action = argQ.getAction();
double diff = abs(action0 - action);
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
}
pool_device_free(num_failures_dev);
#ifdef MULTI_GPU
if ( comm_partitioned() ) {
data.exchangeExtendedGhost(data.R(),false);
for ( int d = 0; d < 4; d++ ) {
if ( commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
device_free(send_d[d]);
device_free(recv_d[d]);
device_free(sendg_d[d]);
device_free(recvg_d[d]);
hipStreamDestroy(GFStream[d]);
hipStreamDestroy(GFStream[4 + d]);
#ifndef GPU_COMMS
host_free(hostbuffer_h[d]);
#endif
}
}
hipStreamDestroy(GFStream[8]);
}
#endif
checkCudaError();
qudaDeviceSynchronize();
profileInternalGaugeFixOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (flop * 1e-9) / (secs);
double gbytes = byte / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
template<typename Float, int NElems, typename Gauge>
void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) {
if ( gauge_dir != 3 ) {
printfQuda("Starting Landau gauge fixing...\n");
gaugefixingOVR<Float, Gauge, NElems, 4>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
}
else {
printfQuda("Starting Coulomb gauge fixing...\n");
gaugefixingOVR<Float, Gauge, NElems, 3>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
}
}
template<typename Float>
void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
numParams = 18;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingOVR<Float, 18>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
numParams = 12;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingOVR<Float, 12>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
numParams = 8;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingOVR<Float, 8>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with overrelaxation with support for single and multi GPU.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] relax_boost, gauge fixing parameter of the overrelaxation method, most common value is 1.5 or 1.7.
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] reunit_interval, reunitarize gauge field when iteration count is a multiple of this
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const double relax_boost,
const double tolerance, const int reunit_interval, const int stopWtheta) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingOVR<float> (data, gauge_dir, Nsteps, verbose_interval, (float)relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingOVR<double>(data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has not been built");
#endif // GPU_GAUGE_ALG
}
} //namespace quda
|
8d5936ef366771078726bf9e52aeb4fabee35ff1.cu
|
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <comm_quda.h>
#include <gauge_fix_ovr_extra.h>
#include <gauge_fix_ovr_hit_devf.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_ALG
static int numParams = 18;
#define LAUNCH_KERNEL_GAUGEFIX(kernel, tp, stream, arg, parity, ...) \
if (tp.aux.x == 0) { \
switch (tp.block.x) { \
case 256: kernel<0, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<0, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<0, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<0, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 1) { \
switch (tp.block.x) { \
case 256: kernel<1, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<1, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<1, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<1, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 2) { \
switch (tp.block.x) { \
case 256: kernel<2, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<2, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<2, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<2, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 3) { \
switch (tp.block.x) { \
case 128: kernel<3, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 256: kernel<3, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 384: kernel<3, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<3, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 640: kernel<3, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<3, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 896: kernel<3, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<3, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 4) { \
switch (tp.block.x) { \
case 128: kernel<4, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 256: kernel<4, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 384: kernel<4, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<4, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 640: kernel<4, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<4, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 896: kernel<4, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<4, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else if (tp.aux.x == 5) { \
switch (tp.block.x) { \
case 128: kernel<5, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 256: kernel<5, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 384: kernel<5, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 512: kernel<5, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 640: kernel<5, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 768: kernel<5, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 896: kernel<5, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
case 1024: kernel<5, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \
default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \
} \
} else { \
errorQuda("Not implemented for %d", tp.aux.x); \
}
/**
* @brief container to pass parameters for the gauge fixing quality kernel
*/
template <typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
/**
* @brief Measure gauge fixing quality
*/
template<int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<Gauge> argQ){
typedef complex<Float> Cmplx;
int idx_cb = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
while (idx_cb < argQ.threads) {
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = argQ.X[dr];
int x[4];
getCoords(x, idx_cb, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += argQ.border[dr];
X[dr] += 2 * argQ.border[dr];
}
#endif
Matrix<Cmplx,3> delta;
setZero(&delta);
//load upward links
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float *)(U.data), linkIndex(x, X), mu, parity);
delta -= U;
}
//18*gauge_dir
data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x;
//2
//load downward links
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
SubTraceUnit(delta);
//12
data.y += getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
idx_cb += blockDim.x * gridDim.x;
}
reduce2d<blockSize,2>(argQ, data);
}
/**
* @brief Tunable object for the gauge fixing quality kernel
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
bool tuneGridDim() const { return true; }
public:
GaugeFixQuality(GaugeFixQualityArg<Gauge> &argQ) : argQ(argQ) { }
~GaugeFixQuality () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Float, Gauge, gauge_dir);
qudaDeviceSynchronize();
if ( comm_size() != 1 ) comm_allreduce_array((double*)argQ.result_h, 2);
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads * comm_size());
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads * comm_size());
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x";
vol << argQ.X[1] << "x";
vol << argQ.X[2] << "x";
vol << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
//long long bytes() const { return (1)*2*gauge_dir*argQ.dataOr.Bytes(); }//no accounting the reduction!!!! argQ.dataOr.Bytes() return 0....
long long bytes() const {
return 2LL * gauge_dir * 2 * argQ.threads * numParams * sizeof(Float);
} //no accounting the reduction!!!!
};
/**
* @brief container to pass parameters for the gauge fixing kernel
*/
template <typename Float, typename Gauge>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost)
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation for single-GPU
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix(GaugeFixArg<Float, Gauge> arg, int parity){
typedef complex<Float> Cmplx;
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#endif
int mu = (threadIdx.x / blockSize);
int oddbit = parity;
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
oddbit = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, oddbit);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, oddbit);
}
// 4 threads per lattice site
else{
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#endif
int mu = (threadIdx.x / blockSize);
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
//load upward link
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link1;
//load downward link
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the gauge fixing kernel
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFix : Tunable {
GaugeFixArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim (TuneParam ¶m) const {
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(GaugeFixArg<Float, Gauge> &arg) : arg(arg), parity(0) { }
~GaugeFix () { }
void setParity(const int par){
parity = par;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFix, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} //no accounting the reduction!!!!
};
#ifdef MULTI_GPU
template <typename Float, typename Gauge>
struct GaugeFixInteriorPointsArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixInteriorPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost)
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
if ( comm_dim_partitioned(dir)) border[dir] = data.R()[dir] + 1; //skip BORDER_RADIUS + face border point
else border[dir] = 0;
}
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir] - border[dir] * 2;
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation in the interior points for multi-GPU implementation
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> arg, int parity){
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
typedef complex<Float> Complex;
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
#ifdef MULTI_GPU
int za = (idx / (X[0] / 2));
int zb = (za / X[1]);
x[1] = za - zb * X[1];
x[3] = (zb / X[2]);
x[2] = zb - x[3] * X[2];
int p = 0; for ( int dr = 0; dr < 4; ++dr ) p += arg.border[dr];
p = p & 1;
int x1odd = (x[1] + x[2] + x[3] + parity + p) & 1;
//int x1odd = (x[1] + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + x1odd) - za * X[0];
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
#else
getCoords(x, idx, X, parity);
#endif
int mu = (threadIdx.x / blockSize);
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
parity = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
}
// 4 threads per lattice site
else{
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Complex,3> link1;
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the interior points of the gauge fixing
* kernel in multi-GPU implementation
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixInteriorPoints : Tunable {
GaugeFixInteriorPointsArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim (TuneParam ¶m) const {
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) {}
~GaugeFixInteriorPoints () { }
void setParity(const int par) { parity = par; }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFixInteriorPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} // Only correct if there is no link reconstruction load+save
};
template <typename Float, typename Gauge>
struct GaugeFixBorderPointsArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
int border[4];
int *borderpoints[2];
int *faceindicessize[2];
size_t faceVolume[4];
size_t faceVolumeCB[4];
Gauge dataOr;
cudaGaugeField &data;
const Float relax_boost;
GaugeFixBorderPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost, size_t faceVolume_[4], size_t faceVolumeCB_[4])
: dataOr(dataOr), data(data), relax_boost(relax_boost) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
border[dir] = data.R()[dir];
}
/*for(int dir=0; dir<4; ++dir){
if(comm_dim_partitioned(dir)) border[dir] = BORDER_RADIUS;
else border[dir] = 0;
}
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;*/
for ( int dir = 0; dir < 4; ++dir ) {
faceVolume[dir] = faceVolume_[dir];
faceVolumeCB[dir] = faceVolumeCB_[dir];
}
if ( comm_partitioned() ) PreCalculateLatticeIndices(faceVolume, faceVolumeCB, X, border, threads, borderpoints);
}
};
/**
* @brief Kernel to perform gauge fixing with overrelaxation in the border points for multi-GPU implementation
*/
template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> arg, int parity){
typedef complex<Float> Cmplx;
int tid = (threadIdx.x + blockSize) % blockSize;
int idx = blockIdx.x * blockSize + tid;
if ( idx >= arg.threads ) return;
int mu = (threadIdx.x / blockSize);
idx = arg.borderpoints[parity][idx];
int X[4], x[4];
x[3] = idx / (arg.X[0] * arg.X[1] * arg.X[2]);
x[2] = (idx / (arg.X[0] * arg.X[1])) % arg.X[2];
x[1] = (idx / arg.X[0]) % arg.X[1];
x[0] = idx % arg.X[0];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr] + 2 * arg.border[dr];
// 8 threads per lattice site
if ( ImplementationType < 3 ) {
if ( threadIdx.x >= blockSize * 4 ) {
mu -= 4;
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
parity = 1 - parity;
}
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 8x more shared memory than the implementation using atomicadd
if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
// 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
}
// 4 threads per lattice site
else{
idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link;
arg.dataOr.load((Float*)(link.data),idx, mu, parity);
x[mu] = (x[mu] - 1 + X[mu]) % X[mu];
int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
Matrix<Cmplx,3> link1;
arg.dataOr.load((Float*)(link1.data),idx1, mu, 1 - parity);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// this implementation needs 4x more shared memory than the implementation using atomicadd
if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory using atomicadd
if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
// 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd.
// uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization
if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid);
arg.dataOr.save((Float*)(link.data),idx, mu, parity);
arg.dataOr.save((Float*)(link1.data),idx1, mu, 1 - parity);
}
}
/**
* @brief Tunable object for the border points of the gauge fixing kernel in multi-GPU implementation
*/
template<typename Float, typename Gauge, int gauge_dir>
class GaugeFixBorderPoints : Tunable {
GaugeFixBorderPointsArg<Float, Gauge> arg;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
protected:
dim3 createGrid(const TuneParam ¶m) const
{
unsigned int blockx = param.block.x / 8;
if (param.aux.x > 2) blockx = param.block.x / 4;
unsigned int gx = (arg.threads + blockx - 1) / blockx;
return dim3(gx, 1, 1);
}
bool advanceBlockDim(TuneParam ¶m) const
{
// Use param.aux.x to tune and save state for best kernel option
// to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!!
const unsigned int min_threads0 = 32 * 8;
const unsigned int min_threads1 = 32 * 4;
const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0];
const unsigned int atmadd = 0;
unsigned int min_threads = min_threads0;
param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD
if (param.aux.x > 2) min_threads = 32 * 4;
param.block.x += min_threads;
param.block.y = 1;
param.grid = createGrid(param);
if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) {
param.shared_bytes = sharedBytesPerBlock(param);
return true;
} else if (param.aux.x == 0) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 1; // USE FOR ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 1) {
param.block.x = min_threads0;
param.block.y = 1;
param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8;
return true;
} else if (param.aux.x == 2) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 3; // USE FOR NO ATOMIC ADD
param.grid = createGrid(param);
param.shared_bytes = param.block.x * 4 * sizeof(Float);
return true;
} else if (param.aux.x == 3) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 4;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else if (param.aux.x == 4) {
param.block.x = min_threads1;
param.block.y = 1;
param.aux.x = 5;
param.grid = createGrid(param);
param.shared_bytes = param.block.x * sizeof(Float);
return true;
} else {
return false;
}
}
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
switch (param.aux.x) {
case 0: return param.block.x * 4 * sizeof(Float);
case 1: return param.block.x * 4 * sizeof(Float) / 8;
case 2: return param.block.x * 4 * sizeof(Float) / 8;
case 3: return param.block.x * 4 * sizeof(Float);
default: return param.block.x * sizeof(Float);
}
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) { }
~GaugeFixBorderPoints () {
if ( comm_partitioned() ) for ( int i = 0; i < 2; i++ ) pool_device_free(arg.borderpoints[i]);
}
void setParity(const int par){
parity = par;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_GAUGEFIX(computeFixBorderPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir);
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.block = dim3(256, 1, 1);
param.aux.x = 0;
param.grid = createGrid(param);
param.shared_bytes = sharedBytesPerBlock(param);
}
virtual void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps(Tunable::paramString(param));
ps << ", atomicadd=" << param.aux.x;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads;
} // Only correct if there is no link reconstruction
//long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save
long long bytes() const {
return 8LL * 2 * arg.threads * numParams * sizeof(Float);
} // Only correct if there is no link reconstruction load+save
};
template <typename Gauge>
struct GaugeFixUnPackArg {
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
GaugeFixUnPackArg(Gauge & dataOr, cudaGaugeField & data)
: dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
#ifdef MULTI_GPU
border[dir] = data.R()[dir];
#endif
}
}
};
template<int NElems, typename Float, typename Gauge, bool pack>
__global__ void Kernel_UnPackGhost(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= size ) return;
int X[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
int za, xodd;
int borderid = 0;
parity = 1 - parity;
switch ( face ) {
case 0: //X FACE
za = idx / ( X[1] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[0] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[1] = (2 * idx + xodd) - za * X[1];
break;
case 1: //Y FACE
za = idx / ( X[0] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[1] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 2: //Z FACE
za = idx / ( X[0] / 2);
x[3] = za / X[1];
x[1] = za - x[3] * X[1];
x[2] = borderid;
xodd = (borderid + x[1] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 3: //T FACE
za = idx / ( X[0] / 2);
x[2] = za / X[1];
x[1] = za - x[2] * X[1];
x[3] = borderid;
xodd = (borderid + x[1] + x[2] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
}
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
x[face] -= 1;
parity = 1 - parity;
int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
typedef complex<Float> Cmplx;
typedef typename mapper<Float>::type RegType;
RegType tmp[NElems];
RegType data[18];
if ( pack ) {
arg.dataOr.load(data, id, dir, parity);
arg.dataOr.reconstruct.Pack(tmp, data, id);
for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = ((Cmplx*)tmp)[i];
}
else{
for ( int i = 0; i < NElems / 2; ++i ) ((Cmplx*)tmp)[i] = array[idx + size * i];
arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R);
arg.dataOr.save(data, id, dir, parity);
}
}
template<int NElems, typename Float, typename Gauge, bool pack>
__global__ void Kernel_UnPackTop(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= size ) return;
int X[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
int za, xodd;
int borderid = arg.X[face] - 1;
switch ( face ) {
case 0: //X FACE
za = idx / ( X[1] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[0] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[1] = (2 * idx + xodd) - za * X[1];
break;
case 1: //Y FACE
za = idx / ( X[0] / 2);
x[3] = za / X[2];
x[2] = za - x[3] * X[2];
x[1] = borderid;
xodd = (borderid + x[2] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 2: //Z FACE
za = idx / ( X[0] / 2);
x[3] = za / X[1];
x[1] = za - x[3] * X[1];
x[2] = borderid;
xodd = (borderid + x[1] + x[3] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
case 3: //T FACE
za = idx / ( X[0] / 2);
x[2] = za / X[1];
x[1] = za - x[2] * X[1];
x[3] = borderid;
xodd = (borderid + x[1] + x[2] + parity) & 1;
x[0] = (2 * idx + xodd) - za * X[0];
break;
}
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1;
typedef complex<Float> Cmplx;
typedef typename mapper<Float>::type RegType;
RegType tmp[NElems];
RegType data[18];
if ( pack ) {
arg.dataOr.load(data, id, dir, parity);
arg.dataOr.reconstruct.Pack(tmp, data, id);
for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = ((Cmplx*)tmp)[i];
}
else{
for ( int i = 0; i < NElems / 2; ++i ) ((Cmplx*)tmp)[i] = array[idx + size * i];
arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R);
arg.dataOr.save(data, id, dir, parity);
}
}
#endif
template<typename Float, typename Gauge, int NElems, int gauge_dir>
void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data,
const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance,
const int reunit_interval, const int stopWtheta) {
TimeProfile profileInternalGaugeFixOVR("InternalGaugeFixQudaOVR", false);
profileInternalGaugeFixOVR.TPSTART(QUDA_PROFILE_COMPUTE);
double flop = 0;
double byte = 0;
printfQuda("\tOverrelaxation boost parameter: %lf\n", (double)relax_boost);
printfQuda("\tStop criterium: %lf\n", tolerance);
if ( stopWtheta ) printfQuda("\tStop criterium method: theta\n");
else printfQuda("\tStop criterium method: Delta\n");
printfQuda("\tMaximum number of iterations: %d\n", Nsteps);
printfQuda("\tReunitarize at every %d steps\n", reunit_interval);
printfQuda("\tPrint convergence results at every %d steps\n", verbose_interval);
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int)));
cudaMemset(num_failures_dev, 0, sizeof(int));
GaugeFixQualityArg<Gauge> argQ(dataOr, data);
GaugeFixQuality<Float,Gauge, gauge_dir> GaugeFixQuality(argQ);
GaugeFixArg<Float, Gauge> arg(dataOr, data, relax_boost);
GaugeFix<Float,Gauge, gauge_dir> gaugeFix(arg);
#ifdef MULTI_GPU
void *send[4];
void *recv[4];
void *sendg[4];
void *recvg[4];
void *send_d[4];
void *recv_d[4];
void *sendg_d[4];
void *recvg_d[4];
void *hostbuffer_h[4];
cudaStream_t GFStream[9];
size_t offset[4];
size_t bytes[4];
size_t faceVolume[4];
size_t faceVolumeCB[4];
// do the exchange
MsgHandle *mh_recv_back[4];
MsgHandle *mh_recv_fwd[4];
MsgHandle *mh_send_fwd[4];
MsgHandle *mh_send_back[4];
int X[4];
dim3 block[4];
dim3 grid[4];
if ( comm_partitioned() ) {
for ( int dir = 0; dir < 4; ++dir ) {
X[dir] = data.X()[dir] - data.R()[dir] * 2;
if ( !commDimPartitioned(dir) && data.R()[dir] != 0 ) errorQuda("Not supported!\n");
}
for ( int i = 0; i < 4; i++ ) {
faceVolume[i] = 1;
for ( int j = 0; j < 4; j++ ) {
if ( i == j ) continue;
faceVolume[i] *= X[j];
}
faceVolumeCB[i] = faceVolume[i] / 2;
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
offset[d] = faceVolumeCB[d] * NElems;
bytes[d] = sizeof(Float) * offset[d];
send_d[d] = device_malloc(bytes[d]);
recv_d[d] = device_malloc(bytes[d]);
sendg_d[d] = device_malloc(bytes[d]);
recvg_d[d] = device_malloc(bytes[d]);
cudaStreamCreate(&GFStream[d]);
cudaStreamCreate(&GFStream[4 + d]);
#ifndef GPU_COMMS
hostbuffer_h[d] = (void*)pinned_malloc(4 * bytes[d]);
#endif
block[d] = make_uint3(128, 1, 1);
grid[d] = make_uint3((faceVolumeCB[d] + block[d].x - 1) / block[d].x, 1, 1);
}
cudaStreamCreate(&GFStream[8]);
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
recvg[d] = recvg_d[d];
sendg[d] = sendg_d[d];
#else
recv[d] = hostbuffer_h[d];
send[d] = static_cast<char*>(hostbuffer_h[d]) + bytes[d];
recvg[d] = static_cast<char*>(hostbuffer_h[d]) + 3 * bytes[d];
sendg[d] = static_cast<char*>(hostbuffer_h[d]) + 2 * bytes[d];
#endif
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(recvg[d], d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(sendg[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(send[d], d, +1, bytes[d]);
}
}
GaugeFixUnPackArg<Gauge> dataexarg(dataOr, data);
GaugeFixBorderPointsArg<Float, Gauge> argBorder(dataOr, data, relax_boost, faceVolume, faceVolumeCB);
GaugeFixBorderPoints<Float,Gauge, gauge_dir> gfixBorderPoints(argBorder);
GaugeFixInteriorPointsArg<Float, Gauge> argInt(dataOr, data, relax_boost);
GaugeFixInteriorPoints<Float,Gauge, gauge_dir> gfixIntPoints(argInt);
#endif
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action0 = argQ.getAction();
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost);
if ( num_failures > 0 ) {
pool_device_free(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
cudaMemset(num_failures_dev, 0, sizeof(int));
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int p = 0; p < 2; p++ ) {
#ifndef MULTI_GPU
gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
#else
if ( !comm_partitioned() ) {
gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
}
else{
gfixIntPoints.setParity(p);
gfixBorderPoints.setParity(p); //compute border points
gfixBorderPoints.apply(0);
flop += (double)gfixBorderPoints.flops();
byte += (double)gfixBorderPoints.bytes();
flop += (double)gfixIntPoints.flops();
byte += (double)gfixIntPoints.bytes();
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_start(mh_recv_back[d]);
comm_start(mh_recv_fwd[d]);
}
//wait for the update to the halo points before start packing...
qudaDeviceSynchronize();
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
//extract top face
Kernel_UnPackTop<NElems, Float, Gauge, true><< < grid[d], block[d], 0, GFStream[d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(send_d[d]), p, d, d);
//extract bottom ghost
Kernel_UnPackGhost<NElems, Float, Gauge, true><< < grid[d], block[d], 0, GFStream[4 + d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(sendg_d[d]), 1 - p, d, d);
}
#ifdef GPU_COMMS
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
qudaStreamSynchronize(GFStream[d]);
comm_start(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[4 + d]);
comm_start(mh_send_back[d]);
}
#else
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
cudaMemcpyAsync(send[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost, GFStream[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
cudaMemcpyAsync(sendg[d], sendg_d[d], bytes[d], cudaMemcpyDeviceToHost, GFStream[4 + d]);
}
#endif
//compute interior points
gfixIntPoints.apply(GFStream[8]);
#ifndef GPU_COMMS
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
qudaStreamSynchronize(GFStream[d]);
comm_start(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[4 + d]);
comm_start(mh_send_back[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_recv_back[d]);
cudaMemcpyAsync(recv_d[d], recv[d], bytes[d], cudaMemcpyHostToDevice, GFStream[d]);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_recv_fwd[d]);
cudaMemcpyAsync(recvg_d[d], recvg[d], bytes[d], cudaMemcpyHostToDevice, GFStream[4 + d]);
}
#endif
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
comm_wait(mh_recv_back[d]);
#endif
Kernel_UnPackGhost<NElems, Float, Gauge, false><< < grid[d], block[d], 0, GFStream[d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recv_d[d]), p, d, d);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
comm_wait(mh_recv_fwd[d]);
#endif
Kernel_UnPackTop<NElems, Float, Gauge, false><< < grid[d], block[d], 0, GFStream[4 + d] >> > (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recvg_d[d]), 1 - p, d, d);
}
for ( int d = 0; d < 4; d++ ) {
if ( !commDimPartitioned(d)) continue;
comm_wait(mh_send_back[d]);
comm_wait(mh_send_fwd[d]);
qudaStreamSynchronize(GFStream[d]);
qudaStreamSynchronize(GFStream[4 + d]);
}
qudaStreamSynchronize(GFStream[8]);
}
#endif
/*gaugeFix.setParity(p);
gaugeFix.apply(0);
flop += (double)gaugeFix.flops();
byte += (double)gaugeFix.bytes();
#ifdef MULTI_GPU
if(comm_partitioned()){//exchange updated top face links in current parity
for (int d=0; d<4; d++) {
if (!commDimPartitioned(d)) continue;
comm_start(mh_recv_back[d]);
//extract top face
Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(send_d[d]), p, d, d, true);
#ifndef GPU_COMMS
cudaMemcpy(send[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost);
#else
qudaDeviceSynchronize();
#endif
comm_start(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
comm_wait(mh_send_fwd[d]);
#ifndef GPU_COMMS
cudaMemcpy(recv_d[d], recv[d], bytes[d], cudaMemcpyHostToDevice);
#endif
//inject top face in ghost
Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recv_d[d]), p, d, d, false);
}
//exchange updated ghost links in opposite parity
for (int d=0; d<4; d++) {
if (!commDimPartitioned(d)) continue;
comm_start(mh_recv_fwd[d]);
Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(sendg_d[d]), 1-p, d, d, true);
#ifndef GPU_COMMS
cudaMemcpy(sendg[d], sendg_d[d], bytes[d], cudaMemcpyDeviceToHost);
#else
qudaDeviceSynchronize();
#endif
comm_start(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
comm_wait(mh_send_back[d]);
#ifndef GPU_COMMS
cudaMemcpy(recvg_d[d], recvg[d], bytes[d], cudaMemcpyHostToDevice);
#endif
Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recvg_d[d]), 1-p, d, d, false);
}
}
#endif*/
}
if ((iter % reunit_interval) == (reunit_interval - 1)) {
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost);
if ( num_failures > 0 ) errorQuda("Error in the unitarization\n");
cudaMemset(num_failures_dev, 0, sizeof(int));
flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3];
byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes();
}
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action = argQ.getAction();
double diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( stopWtheta ) {
if ( argQ.getTheta() < tolerance ) break;
}
else{
if ( diff < tolerance ) break;
}
action0 = action;
}
if ((iter % reunit_interval) != 0 ) {
unitarizeLinks(data, data, num_failures_dev);
qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost);
if ( num_failures > 0 ) errorQuda("Error in the unitarization\n");
cudaMemset(num_failures_dev, 0, sizeof(int));
flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3];
byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes();
}
if ((iter % verbose_interval) != 0 ) {
GaugeFixQuality.apply(0);
flop += (double)GaugeFixQuality.flops();
byte += (double)GaugeFixQuality.bytes();
double action = argQ.getAction();
double diff = abs(action0 - action);
printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
}
pool_device_free(num_failures_dev);
#ifdef MULTI_GPU
if ( comm_partitioned() ) {
data.exchangeExtendedGhost(data.R(),false);
for ( int d = 0; d < 4; d++ ) {
if ( commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
device_free(send_d[d]);
device_free(recv_d[d]);
device_free(sendg_d[d]);
device_free(recvg_d[d]);
cudaStreamDestroy(GFStream[d]);
cudaStreamDestroy(GFStream[4 + d]);
#ifndef GPU_COMMS
host_free(hostbuffer_h[d]);
#endif
}
}
cudaStreamDestroy(GFStream[8]);
}
#endif
checkCudaError();
qudaDeviceSynchronize();
profileInternalGaugeFixOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (flop * 1e-9) / (secs);
double gbytes = byte / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
template<typename Float, int NElems, typename Gauge>
void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) {
if ( gauge_dir != 3 ) {
printfQuda("Starting Landau gauge fixing...\n");
gaugefixingOVR<Float, Gauge, NElems, 4>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
}
else {
printfQuda("Starting Coulomb gauge fixing...\n");
gaugefixingOVR<Float, Gauge, NElems, 3>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
}
}
template<typename Float>
void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval,
const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
numParams = 18;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingOVR<Float, 18>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
numParams = 12;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingOVR<Float, 12>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
numParams = 8;
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingOVR<Float, 8>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with overrelaxation with support for single and multi GPU.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] relax_boost, gauge fixing parameter of the overrelaxation method, most common value is 1.5 or 1.7.
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] reunit_interval, reunitarize gauge field when iteration count is a multiple of this
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const double relax_boost,
const double tolerance, const int reunit_interval, const int stopWtheta) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingOVR<float> (data, gauge_dir, Nsteps, verbose_interval, (float)relax_boost, tolerance, reunit_interval, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingOVR<double>(data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has not been built");
#endif // GPU_GAUGE_ALG
}
} //namespace quda
|
5a8451fde602c4e5530e3065a1a684e755ec692c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
if( (row < numCRows) && (col < numCColumns) ){
float sum = 0.0;
int n = numAColumns; // or numBRows
for(int i = 0; i < n; ++i){
sum += A[row*n + i] * B[col + i*numCColumns];
}
C[row*numCColumns + col] = sum;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(numCRows * numCColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(hipMalloc((void**) &deviceA, numARows * numAColumns * sizeof(float)));
wbCheck(hipMalloc((void**) &deviceB, numBRows * numBColumns * sizeof(float)));
wbCheck(hipMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA, hostA , numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB , numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
int tile_width = 2;
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numCColumns-1)/tile_width + 1,
(numCRows-1)/tile_width + 1, 1); //nro of blocks in grid (at least 1 blocks)
dim3 DimBlock(tile_width, tile_width, 1); //nro of threads in blocks
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiply), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns );
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float) , hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
5a8451fde602c4e5530e3065a1a684e755ec692c.cu
|
#include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
if( (row < numCRows) && (col < numCColumns) ){
float sum = 0.0;
int n = numAColumns; // or numBRows
for(int i = 0; i < n; ++i){
sum += A[row*n + i] * B[col + i*numCColumns];
}
C[row*numCColumns + col] = sum;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(numCRows * numCColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(cudaMalloc((void**) &deviceA, numARows * numAColumns * sizeof(float)));
wbCheck(cudaMalloc((void**) &deviceB, numBRows * numBColumns * sizeof(float)));
wbCheck(cudaMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA , numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB , numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
int tile_width = 2;
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numCColumns-1)/tile_width + 1,
(numCRows-1)/tile_width + 1, 1); //nro of blocks in grid (at least 1 blocks)
dim3 DimBlock(tile_width, tile_width, 1); //nro of threads in blocks
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiply<<<DimGrid,DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns );
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float) , cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
4a13b6821d0b0d439563ae1aed9dd7e41660f725.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file bias_add_device.cu
* @author Daniel Nichols
* @version 0.1
* @date 2019-06-23
*
* @copyright Copyright (c) 2019
*/
#include "math/bias_add.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace math {
template <typename T>
__global__ void kernel_bias_add_device(T *x, T *bias, T *out, unsigned int x_rows, unsigned int x_cols) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < x_cols * x_rows; i += stride) {
out[i] = x[i] + bias[i / x_cols];
}
}
template <typename T>
void bias_add_device(Tensor<T> *x, Tensor<T> *bias, Tensor<T> *out) {
unsigned int x_rows = x->get_shape(0);
unsigned int x_cols = x->get_shape(1);
hipLaunchKernelGGL(( kernel_bias_add_device), dim3((x_rows * x_cols + BLK_SIZE - 1) / BLK_SIZE), dim3(BLK_SIZE), 0, 0, x->get_ptr(), bias->get_ptr(),
out->get_ptr(), x_rows, x_cols);
}
template void bias_add_device(Tensor<int> *x, Tensor<int> *bias, Tensor<int> *out);
template void bias_add_device(Tensor<float> *x, Tensor<float> *bias, Tensor<float> *out);
template void bias_add_device(Tensor<double> *x, Tensor<double> *bias, Tensor<double> *out);
} // namespace math
} // namespace magmadnn
#undef BLK_SIZE
|
4a13b6821d0b0d439563ae1aed9dd7e41660f725.cu
|
/**
* @file bias_add_device.cu
* @author Daniel Nichols
* @version 0.1
* @date 2019-06-23
*
* @copyright Copyright (c) 2019
*/
#include "math/bias_add.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace math {
template <typename T>
__global__ void kernel_bias_add_device(T *x, T *bias, T *out, unsigned int x_rows, unsigned int x_cols) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < x_cols * x_rows; i += stride) {
out[i] = x[i] + bias[i / x_cols];
}
}
template <typename T>
void bias_add_device(Tensor<T> *x, Tensor<T> *bias, Tensor<T> *out) {
unsigned int x_rows = x->get_shape(0);
unsigned int x_cols = x->get_shape(1);
kernel_bias_add_device<<<(x_rows * x_cols + BLK_SIZE - 1) / BLK_SIZE, BLK_SIZE>>>(x->get_ptr(), bias->get_ptr(),
out->get_ptr(), x_rows, x_cols);
}
template void bias_add_device(Tensor<int> *x, Tensor<int> *bias, Tensor<int> *out);
template void bias_add_device(Tensor<float> *x, Tensor<float> *bias, Tensor<float> *out);
template void bias_add_device(Tensor<double> *x, Tensor<double> *bias, Tensor<double> *out);
} // namespace math
} // namespace magmadnn
#undef BLK_SIZE
|
33f16cc6844984b48954a07cc295e010998a643b.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include "../common/api_entry.h" // XGBAPIThreadLocalEntry
#include "../common/threading_utils.h"
#include "../data/device_adapter.cuh"
#include "../data/proxy_dmatrix.h"
#include "c_api_error.h"
#include "c_api_utils.h"
#include "xgboost/c_api.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/learner.h"
namespace xgboost {
void XGBBuildInfoDevice(Json *p_info) {
auto &info = *p_info;
info["USE_ROCM"] = true;
std::vector<Json> v{Json{Integer{THRUST_MAJOR_VERSION}}, Json{Integer{THRUST_MINOR_VERSION}},
Json{Integer{THRUST_SUBMINOR_VERSION}}};
info["THRUST_VERSION"] = v;
v = {Json{Integer{dh::CUDAVersion().first}}, Json{Integer{dh::CUDAVersion().second}}};
info["TORCH_HIP_VERSION"] = v;
#if defined(XGBOOST_USE_NCCL)
info["USE_NCCL"] = Boolean{true};
v = {Json{Integer{NCCL_MAJOR}}, Json{Integer{NCCL_MINOR}}, Json{Integer{NCCL_PATCH}}};
info["NCCL_VERSION"] = v;
#else
info["USE_NCCL"] = Boolean{false};
#endif
#if defined(XGBOOST_USE_RMM)
info["USE_RMM"] = Boolean{true};
v = {Json{Integer{RMM_VERSION_MAJOR}}, Json{Integer{RMM_VERSION_MINOR}},
Json{Integer{RMM_VERSION_PATCH}}};
info["RMM_VERSION"] = v;
#else
info["USE_RMM"] = Boolean{false};
#endif
}
void XGBoostAPIGuard::SetGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
hipGetDevice(&device_id_);
}
void XGBoostAPIGuard::RestoreGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
hipSetDevice(device_id_);
}
} // namespace xgboost
using namespace xgboost; // NOLINT
XGB_DLL int XGDMatrixCreateFromCudaColumnar(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
xgboost_CHECK_C_ARG_PTR(c_json_config);
xgboost_CHECK_C_ARG_PTR(data);
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CudfAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
XGB_DLL int XGDMatrixCreateFromCudaArrayInterface(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CupyAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
int InplacePreidctCUDA(BoosterHandle handle, char const *c_array_interface,
char const *c_json_config, std::shared_ptr<DMatrix> p_m,
xgboost::bst_ulong const **out_shape, xgboost::bst_ulong *out_dim,
const float **out_result) {
API_BEGIN();
CHECK_HANDLE();
if (!p_m) {
p_m.reset(new data::DMatrixProxy);
}
auto proxy = dynamic_cast<data::DMatrixProxy *>(p_m.get());
CHECK(proxy) << "Invalid input type for inplace predict.";
proxy->SetCUDAArray(c_array_interface);
auto config = Json::Load(StringView{c_json_config});
auto *learner = static_cast<Learner *>(handle);
HostDeviceVector<float> *p_predt{nullptr};
auto type = PredictionType(RequiredArg<Integer>(config, "type", __func__));
float missing = GetMissing(config);
learner->InplacePredict(p_m, type, missing, &p_predt,
RequiredArg<Integer>(config, "iteration_begin", __func__),
RequiredArg<Integer>(config, "iteration_end", __func__));
CHECK(p_predt);
if (learner->Ctx()->IsCUDA()) {
CHECK(p_predt->DeviceCanRead() && !p_predt->HostCanRead());
}
p_predt->SetDevice(proxy->DeviceIdx());
auto &shape = learner->GetThreadLocal().prediction_shape;
size_t n_samples = p_m->Info().num_row_;
auto chunksize = n_samples == 0 ? 0 : p_predt->Size() / n_samples;
bool strict_shape = RequiredArg<Boolean>(config, "strict_shape", __func__);
xgboost_CHECK_C_ARG_PTR(out_result);
xgboost_CHECK_C_ARG_PTR(out_shape);
xgboost_CHECK_C_ARG_PTR(out_dim);
CalcPredictShape(strict_shape, type, n_samples, p_m->Info().num_col_, chunksize,
learner->Groups(), learner->BoostedRounds(), &shape, out_dim);
*out_shape = dmlc::BeginPtr(shape);
*out_result = p_predt->ConstDevicePointer();
API_END();
}
XGB_DLL int XGBoosterPredictFromCudaColumnar(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim,
const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
xgboost_CHECK_C_ARG_PTR(c_json_config);
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
return InplacePreidctCUDA(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
XGB_DLL int XGBoosterPredictFromCudaArray(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim, const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
xgboost_CHECK_C_ARG_PTR(out_result);
return InplacePreidctCUDA(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
|
33f16cc6844984b48954a07cc295e010998a643b.cu
|
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include "../common/api_entry.h" // XGBAPIThreadLocalEntry
#include "../common/threading_utils.h"
#include "../data/device_adapter.cuh"
#include "../data/proxy_dmatrix.h"
#include "c_api_error.h"
#include "c_api_utils.h"
#include "xgboost/c_api.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/learner.h"
namespace xgboost {
void XGBBuildInfoDevice(Json *p_info) {
auto &info = *p_info;
info["USE_CUDA"] = true;
std::vector<Json> v{Json{Integer{THRUST_MAJOR_VERSION}}, Json{Integer{THRUST_MINOR_VERSION}},
Json{Integer{THRUST_SUBMINOR_VERSION}}};
info["THRUST_VERSION"] = v;
v = {Json{Integer{dh::CUDAVersion().first}}, Json{Integer{dh::CUDAVersion().second}}};
info["CUDA_VERSION"] = v;
#if defined(XGBOOST_USE_NCCL)
info["USE_NCCL"] = Boolean{true};
v = {Json{Integer{NCCL_MAJOR}}, Json{Integer{NCCL_MINOR}}, Json{Integer{NCCL_PATCH}}};
info["NCCL_VERSION"] = v;
#else
info["USE_NCCL"] = Boolean{false};
#endif
#if defined(XGBOOST_USE_RMM)
info["USE_RMM"] = Boolean{true};
v = {Json{Integer{RMM_VERSION_MAJOR}}, Json{Integer{RMM_VERSION_MINOR}},
Json{Integer{RMM_VERSION_PATCH}}};
info["RMM_VERSION"] = v;
#else
info["USE_RMM"] = Boolean{false};
#endif
}
void XGBoostAPIGuard::SetGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
cudaGetDevice(&device_id_);
}
void XGBoostAPIGuard::RestoreGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
cudaSetDevice(device_id_);
}
} // namespace xgboost
using namespace xgboost; // NOLINT
XGB_DLL int XGDMatrixCreateFromCudaColumnar(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
xgboost_CHECK_C_ARG_PTR(c_json_config);
xgboost_CHECK_C_ARG_PTR(data);
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CudfAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
XGB_DLL int XGDMatrixCreateFromCudaArrayInterface(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CupyAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
int InplacePreidctCUDA(BoosterHandle handle, char const *c_array_interface,
char const *c_json_config, std::shared_ptr<DMatrix> p_m,
xgboost::bst_ulong const **out_shape, xgboost::bst_ulong *out_dim,
const float **out_result) {
API_BEGIN();
CHECK_HANDLE();
if (!p_m) {
p_m.reset(new data::DMatrixProxy);
}
auto proxy = dynamic_cast<data::DMatrixProxy *>(p_m.get());
CHECK(proxy) << "Invalid input type for inplace predict.";
proxy->SetCUDAArray(c_array_interface);
auto config = Json::Load(StringView{c_json_config});
auto *learner = static_cast<Learner *>(handle);
HostDeviceVector<float> *p_predt{nullptr};
auto type = PredictionType(RequiredArg<Integer>(config, "type", __func__));
float missing = GetMissing(config);
learner->InplacePredict(p_m, type, missing, &p_predt,
RequiredArg<Integer>(config, "iteration_begin", __func__),
RequiredArg<Integer>(config, "iteration_end", __func__));
CHECK(p_predt);
if (learner->Ctx()->IsCUDA()) {
CHECK(p_predt->DeviceCanRead() && !p_predt->HostCanRead());
}
p_predt->SetDevice(proxy->DeviceIdx());
auto &shape = learner->GetThreadLocal().prediction_shape;
size_t n_samples = p_m->Info().num_row_;
auto chunksize = n_samples == 0 ? 0 : p_predt->Size() / n_samples;
bool strict_shape = RequiredArg<Boolean>(config, "strict_shape", __func__);
xgboost_CHECK_C_ARG_PTR(out_result);
xgboost_CHECK_C_ARG_PTR(out_shape);
xgboost_CHECK_C_ARG_PTR(out_dim);
CalcPredictShape(strict_shape, type, n_samples, p_m->Info().num_col_, chunksize,
learner->Groups(), learner->BoostedRounds(), &shape, out_dim);
*out_shape = dmlc::BeginPtr(shape);
*out_result = p_predt->ConstDevicePointer();
API_END();
}
XGB_DLL int XGBoosterPredictFromCudaColumnar(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim,
const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
xgboost_CHECK_C_ARG_PTR(c_json_config);
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
return InplacePreidctCUDA(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
XGB_DLL int XGBoosterPredictFromCudaArray(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim, const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
xgboost_CHECK_C_ARG_PTR(out_result);
return InplacePreidctCUDA(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
|
ceb1ba8273766a45fa40601562860bb2b85c444a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "data_partition_mpi.h"
#include <stdio.h>
data_partition_mpi::data_partition_mpi(void *input,
size_t input_size,
Offset *offsets,
size_t offset_number
):input_(input),input_size_(input_size),offsets_(offsets),offset_number_(offset_number)
{
//allocate pinned memory copy
hipHostMalloc((void **)&input_pin_, input_size_, hipHostMallocPortable|hipHostMallocMapped);
//copy data into it
memcpy(input_pin_, input_, input_size_);
printf("=================> offset number: %ld input size: %ld\n", offset_number_, input_size_);
//allocate pinned offset copy
hipHostMalloc((void **)&offsets_pin_, offset_number_ * sizeof(Offset), hipHostMallocPortable|hipHostMallocMapped);
memcpy(offsets_pin_, offsets_, offset_number_ * sizeof(Offset));
}
|
ceb1ba8273766a45fa40601562860bb2b85c444a.cu
|
#include "data_partition_mpi.h"
#include <stdio.h>
data_partition_mpi::data_partition_mpi(void *input,
size_t input_size,
Offset *offsets,
size_t offset_number
):input_(input),input_size_(input_size),offsets_(offsets),offset_number_(offset_number)
{
//allocate pinned memory copy
cudaHostAlloc((void **)&input_pin_, input_size_, cudaHostAllocPortable|cudaHostAllocMapped);
//copy data into it
memcpy(input_pin_, input_, input_size_);
printf("=================> offset number: %ld input size: %ld\n", offset_number_, input_size_);
//allocate pinned offset copy
cudaHostAlloc((void **)&offsets_pin_, offset_number_ * sizeof(Offset), cudaHostAllocPortable|cudaHostAllocMapped);
memcpy(offsets_pin_, offsets_, offset_number_ * sizeof(Offset));
}
|
fb76a34603ca8c4e5e538b5512d4cbd79cef8f18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
|
fb76a34603ca8c4e5e538b5512d4cbd79cef8f18.cu
|
#include "includes.h"
extern "C" {
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
|
dc9a346ba4e8b10e8db161a07e27dc2fe25facf0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/roi_align_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
int roi_cols,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// RoI could have 4 or 5 columns
const T* offset_bottom_rois = bottom_rois + n * roi_cols;
int roi_batch_ind = 0;
if (roi_cols == 5) {
roi_batch_ind = offset_bottom_rois[0];
offset_bottom_rois++;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[0] * spatial_scale;
T roi_start_h = offset_bottom_rois[1] * spatial_scale;
T roi_end_w = offset_bottom_rois[2] * spatial_scale;
T roi_end_h = offset_bottom_rois[3] * spatial_scale;
// T roi_start_w = roundf(offset_bottom_rois[0] * spatial_scale);
// T roi_start_h = roundf(offset_bottom_rois[1] * spatial_scale);
// T roi_end_w = roundf(offset_bottom_rois[2] * spatial_scale);
// T roi_end_h = roundf(offset_bottom_rois[3] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = c10::hip::compat::max(roi_end_w - roi_start_w, (T)1.);
T roi_height = c10::hip::compat::max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
if (R.size() == 0) {
// Handle empty rois
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// The following mutable_data calls are needed to allocate the tensors
Y->template mutable_data<float>();
return true;
}
assert(sampling_ratio_ >= 0);
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
int output_size = Y->size();
hipLaunchKernelGGL(( RoIAlignForward<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
R.dim32(1),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlign, RoIAlignOp<float, CUDAContext>);
} // namespace caffe2
|
dc9a346ba4e8b10e8db161a07e27dc2fe25facf0.cu
|
#include "caffe2/operators/roi_align_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
int roi_cols,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// RoI could have 4 or 5 columns
const T* offset_bottom_rois = bottom_rois + n * roi_cols;
int roi_batch_ind = 0;
if (roi_cols == 5) {
roi_batch_ind = offset_bottom_rois[0];
offset_bottom_rois++;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[0] * spatial_scale;
T roi_start_h = offset_bottom_rois[1] * spatial_scale;
T roi_end_w = offset_bottom_rois[2] * spatial_scale;
T roi_end_h = offset_bottom_rois[3] * spatial_scale;
// T roi_start_w = roundf(offset_bottom_rois[0] * spatial_scale);
// T roi_start_h = roundf(offset_bottom_rois[1] * spatial_scale);
// T roi_end_w = roundf(offset_bottom_rois[2] * spatial_scale);
// T roi_end_h = roundf(offset_bottom_rois[3] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = c10::cuda::compat::max(roi_end_w - roi_start_w, (T)1.);
T roi_height = c10::cuda::compat::max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
if (R.size() == 0) {
// Handle empty rois
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// The following mutable_data calls are needed to allocate the tensors
Y->template mutable_data<float>();
return true;
}
assert(sampling_ratio_ >= 0);
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
int output_size = Y->size();
RoIAlignForward<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
R.dim32(1),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlign, RoIAlignOp<float, CUDAContext>);
} // namespace caffe2
|
1390fd3b74682480b37f3692eb27c47a1c1136bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// LSU EE 4702-1 (Fall 2016), GPU Programming
//
/// Homework 7
//
// See http://www.ece.lsu.edu/koppel/gpup/2016/hw07.pdf
/// Use this file for your solution.
#include "cuda-coord.cu"
#include "hw07.cuh"
#include <gp/cuda-util-kernel.h>
// Physical State Variables
//
__constant__ float4 *helix_position;
__constant__ float3 *helix_velocity; // Note: float4 would be faster.
__constant__ float4 *helix_orientation;
__constant__ float3 *helix_omega; // Note: float4 would be faster.
__constant__ Timing_Data *timing_data; // Measure execution time of intersect.
__constant__ Helix_Info hi; // Scalar Constants
__global__ void
time_step_intersect_1()
{
/// Homework 7 SOLUTION IN THIS ROUTINE
// Find intersections of one helix segment with some other
// segments. Each block handles several "a" segments, the threads in
// the block check for intersection with other segments, called "b"
// segments.
__shared__ clock_t time_start;
if ( !threadIdx.x ) time_start = clock64();
// Note: The size of the helix_position array is hi.phys_helix_segments.
// Compute how many "a" elements will be handled by each block.
//
const int a_per_block = hi.phys_helix_segments / gridDim.x;
// Compute how many threads handle each "a" element.
//
const int thd_per_a = blockDim.x / a_per_block;
// Compute the smallest "a" element index that this block will handle.
//
const int a_idx_block = blockIdx.x * a_per_block;
/// Assignment of "a" and "b" Values to Threads
//
// The table below is an example of how this routine
// assigns "a" and "b" elements to threads. The table
// is based upon the following values:
//
// blockDim = 8, blockIdx = 4, hi.phys_helix_segments = 1024
// m:a_per_block = 4, d:thd_per_a = 2, a_idx_block = 16
//
// tIx al a b --->
// 0 0 16 0 2 4 ... 1022
// 1 1 17 0 2 4 ... 1022
// 2 2 18 0 2 4 ... 1022
// 3 3 19 0 2 4 ... 1022
// 4 0 16 1 3 5 ... 1023
// 5 1 17 1 3 5 ... 1023
// 6 2 18 1 3 5 ... 1023
// 7 3 19 1 3 5 ... 1023
// | | | |
// | | | |
// | | | |--------> b_idx_start
// | | |--------------> a_idx
// | |--------------------> a_local_idx
// |--------------------------> threadIdx.x
// Compute a_idx and b_idx_start to realize ordering above.
//
const int a_local_idx = threadIdx.x % a_per_block;
const int a_idx = a_idx_block + a_local_idx;
const int b_idx_start = threadIdx.x / a_per_block;
const int min_idx_dist = 0.999f + hi.wire_radius / hi.helix_seg_hlength;
const float four_wire_radius_sq = 4 * hi.wire_radius * hi.wire_radius;
// Declare dynamically allocated shared memory. Will be split
// between array for forces, force, and position cache, pos_cache.
//
extern __shared__ float3 shared[];
const bool use_shared =
hi.opt_sm_option == SMO_one_iteration
|| hi.opt_sm_option == SMO_multiple_iterations;
pVect* const force = shared;
float3* const pos_cache = &shared[a_per_block];
if ( threadIdx.x < a_per_block ) force[threadIdx.x] = mv(0,0,0);
__syncthreads();
const float3 a_position = m3(helix_position[a_idx]);
for ( int j=b_idx_start; j<hi.phys_helix_segments; j += thd_per_a )
{
if ( hi.opt_sm_option == SMO_one_iteration )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] =
m3(helix_position[ j - b_idx_start + threadIdx.x ] );
__syncthreads();
}
else if ( hi.opt_sm_option == SMO_multiple_iterations )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] =
m3(helix_position[ j - b_idx_start + threadIdx.x ] );
__syncthreads();
}
float3 b_position =
use_shared ? pos_cache[b_idx_start] : m3(helix_position[j]);
pVect ab = mv(a_position,b_position);
// Skip if segment is too close.
if ( abs(a_idx-j) < min_idx_dist ) continue;
// Skip if no chance of intersection.
if ( mag_sq(ab) >= four_wire_radius_sq ) continue;
// Compute intersection force based on bounding sphere, an
// admittedly crude approximation.
//
pNorm dist = mn(ab);
const float pen = 2 * hi.wire_radius - dist.magnitude;
float3 f = pen * hi.opt_spring_constant * dist;
// Add force to shared variable. This is time consuming
// (especially in CC 3.x and older GPUs) but done
// infrequently. (A segment can normally only intersect a a few
// other segments.)
//
atomicAdd(&force[a_local_idx].x,f.x);
atomicAdd(&force[a_local_idx].y,f.y);
atomicAdd(&force[a_local_idx].z,f.z);
//
// Optimization Note: Could acquire a lock and then update
// all three components.
}
// Wait for all threads to finish.
__syncthreads();
// Leave it to thread 0 to update velocity.
if ( threadIdx.x >= a_per_block ) return;
// Update velocity and write it.
//
float3 velocity = helix_velocity[a_idx];
velocity -= hi.delta_t_mass_inv * force[a_local_idx];
if ( hi.opt_end_fixed && a_idx + 1 == hi.phys_helix_segments )
velocity = mv(0,0,0);
helix_velocity[a_idx] = velocity;
if ( !threadIdx.x )
{
timing_data[blockIdx.x].intersect_time += clock64() - time_start;
timing_data[blockIdx.x].intersect_count++;
}
}
__global__ void
time_step_intersect_2()
{
/// DO NOT MODIFY THIS ROUTINE.
// Find intersections of one helix segment with some other
// segments. Each block handles several "a" segments, the threads in the
// block check for intersection with other segments, called "b"
// segments.
__shared__ clock_t time_start;
if ( !threadIdx.x ) time_start = clock64();
// Note: The size of the helix_position array is hi.phys_helix_segments.
// Compute how many "a" elements will be handled by each block.
//
const int a_per_block = hi.phys_helix_segments / gridDim.x;
// Compute how many threads handle each "a" element.
//
const int thd_per_a = blockDim.x / a_per_block;
// Compute the smallest "a" element index that this block will handle.
//
const int a_idx_block = blockIdx.x * a_per_block;
/// Assignment of "a" and "b" Values to Threads
//
// The table below is an example of how this routine
// assigns "a" and "b" elements to threads. The table
// is based upon the following values:
//
// blockDim = 8, blockIdx = 4, hi.phys_helix_segments = 1024
// a_per_block = 4, thd_per_a = 2, a_idx_block = 16
//
// tIx al a b --->
// 0 0 16 0 2 4 ...
// 1 0 16 1 3 5
// 2 1 17 0 2 4
// 3 1 17 1 3 5
// 4 2 18 0 2 4
// 5 2 18 1 3 5
// 6 3 19 0 2 4
// 7 3 19 1 3 5
// | | | |
// | | | |
// | | | |--------> b_idx_start
// | | |--------------> a_idx
// | |--------------------> a_local_idx
// |--------------------------> threadIdx.x
// Compute a_idx and b_idx_start to realize ordering above.
//
const int a_local_idx = threadIdx.x / thd_per_a;
const int a_idx = a_idx_block + a_local_idx;
const int b_idx_start = threadIdx.x % thd_per_a;
const float3 a_position = m3(helix_position[a_idx]);
const int min_idx_dist = 0.999f + hi.wire_radius / hi.helix_seg_hlength;
const float four_wire_radius_sq = 4 * hi.wire_radius * hi.wire_radius;
// Declare dynamically allocated shared memory. Will be split
// between array for forces, force, and position cache, pos_cache.
//
extern __shared__ float3 shared[];
pVect* const force = shared;
if ( threadIdx.x < a_per_block ) force[threadIdx.x] = mv(0,0,0);
// Wait for thread 0 to initialize force.
__syncthreads();
const bool use_shared =
hi.opt_sm_option == SMO_one_iteration
|| hi.opt_sm_option == SMO_multiple_iterations;
float3* const pos_cache = &shared[a_per_block];
/// DO NOT MODIFY THIS ROUTINE.
for ( int j=b_idx_start; j<hi.phys_helix_segments; j += thd_per_a )
{
if ( hi.opt_sm_option == SMO_one_iteration )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] = m3(helix_position[j]);
__syncthreads();
}
else if ( hi.opt_sm_option == SMO_multiple_iterations )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] = m3(helix_position[j]);
__syncthreads();
}
float3 b_position =
use_shared ? pos_cache[b_idx_start] : m3(helix_position[j]);
pVect ab = mv(a_position,b_position);
// Skip if segment is too close.
if ( abs(a_idx-j) < min_idx_dist ) continue;
// Skip if no chance of intersection.
if ( mag_sq(ab) >= four_wire_radius_sq ) continue;
// Compute intersection force based on bounding sphere, an
// admittedly crude approximation.
//
pNorm dist = mn(ab);
const float pen = 2 * hi.wire_radius - dist.magnitude;
float3 f = pen * hi.opt_spring_constant * dist;
// Add force to shared variable. This is time consuming but
// done infrequently. (A segment can normally only intersect a
// a few other segments.)
//
atomicAdd(&force[a_local_idx].x,f.x);
atomicAdd(&force[a_local_idx].y,f.y);
atomicAdd(&force[a_local_idx].z,f.z);
//
// Optimization Note: Could acquire a lock and then update
// all three components.
}
// Wait for all threads to finish.
__syncthreads();
// Leave it to thread 0 to update velocity.
if ( threadIdx.x >= a_per_block ) return;
{
// Re-compute a_idx so that first a_per_block threads can write
// velocities.
const int a_local_idx = threadIdx.x;
const int a_idx = a_idx_block + a_local_idx;
// Update velocity and write it.
//
float3 velocity = helix_velocity[a_idx];
velocity -= hi.delta_t_mass_inv * force[a_local_idx];
if ( hi.opt_end_fixed && a_idx + 1 == hi.phys_helix_segments )
velocity = mv(0,0,0);
helix_velocity[a_idx] = velocity;
if ( !threadIdx.x )
{
timing_data[blockIdx.x].intersect_time += clock64() - time_start;
timing_data[blockIdx.x].intersect_count++;
}
}
}
__global__ void time_step();
__global__ void time_step_intersect_1();
__global__ void time_step_intersect_2();
__global__ void time_step_update_pos();
__host__ hipError_t
cuda_setup(GPU_Info *gpu_info)
{
// Pass the device address to host code. (See gp/cuda-util-kernel.h ).
CU_SYM(helix_position);
CU_SYM(helix_velocity);
CU_SYM(helix_orientation);
CU_SYM(helix_omega);
CU_SYM(timing_data);
CU_SYM(hi);
// Return attributes of CUDA functions. The code needs the
// maximum number of threads.
hipError_t e1 = hipSuccess;
gpu_info->GET_INFO(time_step);
gpu_info->GET_INFO(time_step_intersect_1);
gpu_info->GET_INFO(time_step_intersect_2);
gpu_info->GET_INFO(time_step_update_pos);
return e1;
}
__host__ void time_step_launch(int grid_size, int block_size)
{
hipLaunchKernelGGL(( time_step), dim3(grid_size),dim3(block_size), 0, 0, );
}
__device__ void
helix_apply_force_at
(float3 position, float3& force, float3& torque,
float3 force_pos, pVect dir, float magnitude);
__global__ void
time_step()
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Use tid for helix segment number.
if ( tid + 1 > hi.phys_helix_segments ) return;
// The position of segment 0 is fixed, so don't evolve it.
if ( tid == 0 ) return;
pVect vZero = mv(0,0,0);
pVect gravity_force = hi.helix_seg_mass_inv * hi.gravity_accel;
pQuat c_orientation = cq(helix_orientation[tid]);
float3 c_position = m3(helix_position[tid]);
pMatrix3x3 c_rot;
// Initialize c_rot to a rotation matrix based on quaternion c_orientation.
pMatrix_set_rotation(c_rot,c_orientation);
float3 c_u = c_rot * mv(0,0,1); // mv: Make Vector.
float3 c_v = c_rot * mv(0,1,0);
float3 c_ctr_to_right_dir = c_rot * mv(1,0,0);
pVect c_ctr_to_right = hi.helix_seg_hlength * c_ctr_to_right_dir;
float3 c_pos_right = c_position + c_ctr_to_right;
float3 c_pos_left = c_position - c_ctr_to_right;
float3 force = hi.opt_gravity ? gravity_force : vZero;
float3 torque = vZero;
const int pieces = 3;
const float delta_theta = 2 * M_PI / pieces;
/// Compute forces due to right neighbor.
//
if ( tid + 1 < hi.phys_helix_segments )
{
pQuat r_orientation = cq(helix_orientation[tid+1]);
float3 r_position = m3(helix_position[tid+1]);
pMatrix3x3 r_rot;
pMatrix_set_rotation(r_rot,r_orientation);
float3 r_u = r_rot * mv(0,0,1);
float3 r_v = r_rot * mv(0,1,0);
float3 r_ctr_to_right_dir = r_rot * mv(1,0,0);
pVect r_ctr_to_right = hi.helix_seg_hlength * r_ctr_to_right_dir;
float3 r_pos_left = r_position - r_ctr_to_right;
pQuat cn_rot_q = c_orientation * hi.helix_rn_trans;
pMatrix3x3 cn_rot;
pMatrix_set_rotation(cn_rot,cn_rot_q);
pVect n_ru = cn_rot * mv(0,0,1);
pVect n_rv = cn_rot * mv(0,1,0);
for ( int j=0; j<pieces; j++ )
{
const float theta = delta_theta * j;
pCoor c_pt = c_pos_right + cosf(theta) * n_ru + sinf(theta) * n_rv;
pCoor r_pt = r_pos_left + cosf(theta) * r_u + sinf(theta) * r_v;
pNorm dist = mn(c_pt,r_pt);
const float force_mag = dist.magnitude * hi.opt_spring_constant;
helix_apply_force_at(c_position,force,torque,c_pt,dist.v,force_mag);
}
}
/// Compute forces due to left neighbor.
//
if ( tid > 0 )
{
pQuat l_orientation = cq(helix_orientation[tid-1]);
float3 l_position = m3(helix_position[tid-1]);
pMatrix3x3 l_rot;
pMatrix_set_rotation(l_rot,l_orientation);
float3 l_u = l_rot * mv(0,0,1);
float3 l_v = l_rot * mv(0,1,0);
float3 l_ctr_to_right_dir = l_rot * mv(1,0,0);
pVect l_ctr_to_right = hi.helix_seg_hlength * l_ctr_to_right_dir;
float3 l_pos_right = l_position + l_ctr_to_right;
pQuat ln_rot_q = l_orientation * hi.helix_rn_trans;
pMatrix3x3 ln_rot;
pMatrix_set_rotation(ln_rot,ln_rot_q);
pVect n_cu = ln_rot * mv(0,0,1);
pVect n_cv = ln_rot * mv(0,1,0);
for ( int j=0; j<pieces; j++ )
{
const float theta = delta_theta * j;
pCoor c_pt = c_pos_left + cosf(theta) * c_u + sinf(theta) * c_v;
pCoor l_pt = l_pos_right + cosf(theta) * n_cu + sinf(theta) * n_cv;
pNorm dist = mn(c_pt,l_pt);
const float force_mag = dist.magnitude * hi.opt_spring_constant;
helix_apply_force_at(c_position,force,torque,c_pt,dist.v,force_mag);
}
}
float3 velocity = helix_velocity[tid];
velocity *= 0.99999f;
float3 omega = helix_omega[tid];
omega *= 0.99999f;
velocity += hi.delta_t_mass_inv * force;
const float torque_axial_mag = dot( torque, c_ctr_to_right_dir );
pVect torque_axial = torque_axial_mag * c_ctr_to_right_dir;
pVect do_axial = hi.delta_t_ma_axis * torque_axial;
pVect torque_other = torque - torque_axial;
pVect do_other = hi.delta_t_ma_perp_axis * torque_other;
omega += do_axial + do_other;
// Update velocity and omega. Don't update position or orientation
// because we don't want threads in this kernel to accidentally read
// the updated values.
helix_omega[tid] = omega;
helix_velocity[tid] = velocity;
}
__device__ void
helix_apply_force_at
(float3 position, float3& force, float3& torque,
float3 force_pos, pVect dir, float magnitude)
{
// Update force and torque of segment for a force acting on FORCE_POS
// pointing in direction DIR of magnitude MAGNITUDE.
//
force += magnitude * dir;
pVect arm = mv(position,force_pos);
pVect axis = cross( arm, dir );
pVect amt = magnitude * axis;
torque += amt;
}
__host__ void
time_step_intersect_launch
(int grid_size, int block_size, int version, int dynamic_sm_amt)
{
switch ( version ) {
case 1:hipLaunchKernelGGL(( time_step_intersect_1), dim3(grid_size),dim3(block_size),dynamic_sm_amt, 0, );
case 2:hipLaunchKernelGGL(( time_step_intersect_2), dim3(grid_size),dim3(block_size),dynamic_sm_amt, 0, );
}
}
__global__ void
time_step_update_pos()
{
// Update position and orientation of spring segments.
// Use tid for helix segment number.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Skip out-of-range segments.
if ( tid >= hi.phys_helix_segments ) return;
if ( tid == 0 ) return;
// Update Orientation
//
pQuat orientation = cq(helix_orientation[tid]);
float3 omega = helix_omega[tid];
pNorm axis = mn(omega);
helix_orientation[tid] =
c4( quat_normalize
( quat_mult ( mq( axis, hi.delta_t * axis.magnitude ), orientation)));
// Return if at last segment and it is fixed. Note that even
// if the segment's position is fixed, it can still rotate.
//
if ( hi.opt_end_fixed && tid + 1 == hi.phys_helix_segments ) return;
// Update Velocity
//
float3 position = m3(helix_position[tid]);
float3 velocity = helix_velocity[tid];
helix_position[tid] = m4(position + hi.delta_t * velocity,1);
}
__host__ void
time_step_update_pos_launch
(int grid_size, int block_size)
{
hipLaunchKernelGGL(( time_step_update_pos), dim3(grid_size),dim3(block_size), 0, 0, );
}
|
1390fd3b74682480b37f3692eb27c47a1c1136bd.cu
|
/// LSU EE 4702-1 (Fall 2016), GPU Programming
//
/// Homework 7
//
// See http://www.ece.lsu.edu/koppel/gpup/2016/hw07.pdf
/// Use this file for your solution.
#include "cuda-coord.cu"
#include "hw07.cuh"
#include <gp/cuda-util-kernel.h>
// Physical State Variables
//
__constant__ float4 *helix_position;
__constant__ float3 *helix_velocity; // Note: float4 would be faster.
__constant__ float4 *helix_orientation;
__constant__ float3 *helix_omega; // Note: float4 would be faster.
__constant__ Timing_Data *timing_data; // Measure execution time of intersect.
__constant__ Helix_Info hi; // Scalar Constants
__global__ void
time_step_intersect_1()
{
/// Homework 7 SOLUTION IN THIS ROUTINE
// Find intersections of one helix segment with some other
// segments. Each block handles several "a" segments, the threads in
// the block check for intersection with other segments, called "b"
// segments.
__shared__ clock_t time_start;
if ( !threadIdx.x ) time_start = clock64();
// Note: The size of the helix_position array is hi.phys_helix_segments.
// Compute how many "a" elements will be handled by each block.
//
const int a_per_block = hi.phys_helix_segments / gridDim.x;
// Compute how many threads handle each "a" element.
//
const int thd_per_a = blockDim.x / a_per_block;
// Compute the smallest "a" element index that this block will handle.
//
const int a_idx_block = blockIdx.x * a_per_block;
/// Assignment of "a" and "b" Values to Threads
//
// The table below is an example of how this routine
// assigns "a" and "b" elements to threads. The table
// is based upon the following values:
//
// blockDim = 8, blockIdx = 4, hi.phys_helix_segments = 1024
// m:a_per_block = 4, d:thd_per_a = 2, a_idx_block = 16
//
// tIx al a b --->
// 0 0 16 0 2 4 ... 1022
// 1 1 17 0 2 4 ... 1022
// 2 2 18 0 2 4 ... 1022
// 3 3 19 0 2 4 ... 1022
// 4 0 16 1 3 5 ... 1023
// 5 1 17 1 3 5 ... 1023
// 6 2 18 1 3 5 ... 1023
// 7 3 19 1 3 5 ... 1023
// | | | |
// | | | |
// | | | |--------> b_idx_start
// | | |--------------> a_idx
// | |--------------------> a_local_idx
// |--------------------------> threadIdx.x
// Compute a_idx and b_idx_start to realize ordering above.
//
const int a_local_idx = threadIdx.x % a_per_block;
const int a_idx = a_idx_block + a_local_idx;
const int b_idx_start = threadIdx.x / a_per_block;
const int min_idx_dist = 0.999f + hi.wire_radius / hi.helix_seg_hlength;
const float four_wire_radius_sq = 4 * hi.wire_radius * hi.wire_radius;
// Declare dynamically allocated shared memory. Will be split
// between array for forces, force, and position cache, pos_cache.
//
extern __shared__ float3 shared[];
const bool use_shared =
hi.opt_sm_option == SMO_one_iteration
|| hi.opt_sm_option == SMO_multiple_iterations;
pVect* const force = shared;
float3* const pos_cache = &shared[a_per_block];
if ( threadIdx.x < a_per_block ) force[threadIdx.x] = mv(0,0,0);
__syncthreads();
const float3 a_position = m3(helix_position[a_idx]);
for ( int j=b_idx_start; j<hi.phys_helix_segments; j += thd_per_a )
{
if ( hi.opt_sm_option == SMO_one_iteration )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] =
m3(helix_position[ j - b_idx_start + threadIdx.x ] );
__syncthreads();
}
else if ( hi.opt_sm_option == SMO_multiple_iterations )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] =
m3(helix_position[ j - b_idx_start + threadIdx.x ] );
__syncthreads();
}
float3 b_position =
use_shared ? pos_cache[b_idx_start] : m3(helix_position[j]);
pVect ab = mv(a_position,b_position);
// Skip if segment is too close.
if ( abs(a_idx-j) < min_idx_dist ) continue;
// Skip if no chance of intersection.
if ( mag_sq(ab) >= four_wire_radius_sq ) continue;
// Compute intersection force based on bounding sphere, an
// admittedly crude approximation.
//
pNorm dist = mn(ab);
const float pen = 2 * hi.wire_radius - dist.magnitude;
float3 f = pen * hi.opt_spring_constant * dist;
// Add force to shared variable. This is time consuming
// (especially in CC 3.x and older GPUs) but done
// infrequently. (A segment can normally only intersect a a few
// other segments.)
//
atomicAdd(&force[a_local_idx].x,f.x);
atomicAdd(&force[a_local_idx].y,f.y);
atomicAdd(&force[a_local_idx].z,f.z);
//
// Optimization Note: Could acquire a lock and then update
// all three components.
}
// Wait for all threads to finish.
__syncthreads();
// Leave it to thread 0 to update velocity.
if ( threadIdx.x >= a_per_block ) return;
// Update velocity and write it.
//
float3 velocity = helix_velocity[a_idx];
velocity -= hi.delta_t_mass_inv * force[a_local_idx];
if ( hi.opt_end_fixed && a_idx + 1 == hi.phys_helix_segments )
velocity = mv(0,0,0);
helix_velocity[a_idx] = velocity;
if ( !threadIdx.x )
{
timing_data[blockIdx.x].intersect_time += clock64() - time_start;
timing_data[blockIdx.x].intersect_count++;
}
}
__global__ void
time_step_intersect_2()
{
/// DO NOT MODIFY THIS ROUTINE.
// Find intersections of one helix segment with some other
// segments. Each block handles several "a" segments, the threads in the
// block check for intersection with other segments, called "b"
// segments.
__shared__ clock_t time_start;
if ( !threadIdx.x ) time_start = clock64();
// Note: The size of the helix_position array is hi.phys_helix_segments.
// Compute how many "a" elements will be handled by each block.
//
const int a_per_block = hi.phys_helix_segments / gridDim.x;
// Compute how many threads handle each "a" element.
//
const int thd_per_a = blockDim.x / a_per_block;
// Compute the smallest "a" element index that this block will handle.
//
const int a_idx_block = blockIdx.x * a_per_block;
/// Assignment of "a" and "b" Values to Threads
//
// The table below is an example of how this routine
// assigns "a" and "b" elements to threads. The table
// is based upon the following values:
//
// blockDim = 8, blockIdx = 4, hi.phys_helix_segments = 1024
// a_per_block = 4, thd_per_a = 2, a_idx_block = 16
//
// tIx al a b --->
// 0 0 16 0 2 4 ...
// 1 0 16 1 3 5
// 2 1 17 0 2 4
// 3 1 17 1 3 5
// 4 2 18 0 2 4
// 5 2 18 1 3 5
// 6 3 19 0 2 4
// 7 3 19 1 3 5
// | | | |
// | | | |
// | | | |--------> b_idx_start
// | | |--------------> a_idx
// | |--------------------> a_local_idx
// |--------------------------> threadIdx.x
// Compute a_idx and b_idx_start to realize ordering above.
//
const int a_local_idx = threadIdx.x / thd_per_a;
const int a_idx = a_idx_block + a_local_idx;
const int b_idx_start = threadIdx.x % thd_per_a;
const float3 a_position = m3(helix_position[a_idx]);
const int min_idx_dist = 0.999f + hi.wire_radius / hi.helix_seg_hlength;
const float four_wire_radius_sq = 4 * hi.wire_radius * hi.wire_radius;
// Declare dynamically allocated shared memory. Will be split
// between array for forces, force, and position cache, pos_cache.
//
extern __shared__ float3 shared[];
pVect* const force = shared;
if ( threadIdx.x < a_per_block ) force[threadIdx.x] = mv(0,0,0);
// Wait for thread 0 to initialize force.
__syncthreads();
const bool use_shared =
hi.opt_sm_option == SMO_one_iteration
|| hi.opt_sm_option == SMO_multiple_iterations;
float3* const pos_cache = &shared[a_per_block];
/// DO NOT MODIFY THIS ROUTINE.
for ( int j=b_idx_start; j<hi.phys_helix_segments; j += thd_per_a )
{
if ( hi.opt_sm_option == SMO_one_iteration )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] = m3(helix_position[j]);
__syncthreads();
}
else if ( hi.opt_sm_option == SMO_multiple_iterations )
{
__syncthreads();
if ( threadIdx.x < thd_per_a )
pos_cache[threadIdx.x] = m3(helix_position[j]);
__syncthreads();
}
float3 b_position =
use_shared ? pos_cache[b_idx_start] : m3(helix_position[j]);
pVect ab = mv(a_position,b_position);
// Skip if segment is too close.
if ( abs(a_idx-j) < min_idx_dist ) continue;
// Skip if no chance of intersection.
if ( mag_sq(ab) >= four_wire_radius_sq ) continue;
// Compute intersection force based on bounding sphere, an
// admittedly crude approximation.
//
pNorm dist = mn(ab);
const float pen = 2 * hi.wire_radius - dist.magnitude;
float3 f = pen * hi.opt_spring_constant * dist;
// Add force to shared variable. This is time consuming but
// done infrequently. (A segment can normally only intersect a
// a few other segments.)
//
atomicAdd(&force[a_local_idx].x,f.x);
atomicAdd(&force[a_local_idx].y,f.y);
atomicAdd(&force[a_local_idx].z,f.z);
//
// Optimization Note: Could acquire a lock and then update
// all three components.
}
// Wait for all threads to finish.
__syncthreads();
// Leave it to thread 0 to update velocity.
if ( threadIdx.x >= a_per_block ) return;
{
// Re-compute a_idx so that first a_per_block threads can write
// velocities.
const int a_local_idx = threadIdx.x;
const int a_idx = a_idx_block + a_local_idx;
// Update velocity and write it.
//
float3 velocity = helix_velocity[a_idx];
velocity -= hi.delta_t_mass_inv * force[a_local_idx];
if ( hi.opt_end_fixed && a_idx + 1 == hi.phys_helix_segments )
velocity = mv(0,0,0);
helix_velocity[a_idx] = velocity;
if ( !threadIdx.x )
{
timing_data[blockIdx.x].intersect_time += clock64() - time_start;
timing_data[blockIdx.x].intersect_count++;
}
}
}
__global__ void time_step();
__global__ void time_step_intersect_1();
__global__ void time_step_intersect_2();
__global__ void time_step_update_pos();
__host__ cudaError_t
cuda_setup(GPU_Info *gpu_info)
{
// Pass the device address to host code. (See gp/cuda-util-kernel.h ).
CU_SYM(helix_position);
CU_SYM(helix_velocity);
CU_SYM(helix_orientation);
CU_SYM(helix_omega);
CU_SYM(timing_data);
CU_SYM(hi);
// Return attributes of CUDA functions. The code needs the
// maximum number of threads.
cudaError_t e1 = cudaSuccess;
gpu_info->GET_INFO(time_step);
gpu_info->GET_INFO(time_step_intersect_1);
gpu_info->GET_INFO(time_step_intersect_2);
gpu_info->GET_INFO(time_step_update_pos);
return e1;
}
__host__ void time_step_launch(int grid_size, int block_size)
{
time_step<<<grid_size,block_size>>>();
}
__device__ void
helix_apply_force_at
(float3 position, float3& force, float3& torque,
float3 force_pos, pVect dir, float magnitude);
__global__ void
time_step()
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Use tid for helix segment number.
if ( tid + 1 > hi.phys_helix_segments ) return;
// The position of segment 0 is fixed, so don't evolve it.
if ( tid == 0 ) return;
pVect vZero = mv(0,0,0);
pVect gravity_force = hi.helix_seg_mass_inv * hi.gravity_accel;
pQuat c_orientation = cq(helix_orientation[tid]);
float3 c_position = m3(helix_position[tid]);
pMatrix3x3 c_rot;
// Initialize c_rot to a rotation matrix based on quaternion c_orientation.
pMatrix_set_rotation(c_rot,c_orientation);
float3 c_u = c_rot * mv(0,0,1); // mv: Make Vector.
float3 c_v = c_rot * mv(0,1,0);
float3 c_ctr_to_right_dir = c_rot * mv(1,0,0);
pVect c_ctr_to_right = hi.helix_seg_hlength * c_ctr_to_right_dir;
float3 c_pos_right = c_position + c_ctr_to_right;
float3 c_pos_left = c_position - c_ctr_to_right;
float3 force = hi.opt_gravity ? gravity_force : vZero;
float3 torque = vZero;
const int pieces = 3;
const float delta_theta = 2 * M_PI / pieces;
/// Compute forces due to right neighbor.
//
if ( tid + 1 < hi.phys_helix_segments )
{
pQuat r_orientation = cq(helix_orientation[tid+1]);
float3 r_position = m3(helix_position[tid+1]);
pMatrix3x3 r_rot;
pMatrix_set_rotation(r_rot,r_orientation);
float3 r_u = r_rot * mv(0,0,1);
float3 r_v = r_rot * mv(0,1,0);
float3 r_ctr_to_right_dir = r_rot * mv(1,0,0);
pVect r_ctr_to_right = hi.helix_seg_hlength * r_ctr_to_right_dir;
float3 r_pos_left = r_position - r_ctr_to_right;
pQuat cn_rot_q = c_orientation * hi.helix_rn_trans;
pMatrix3x3 cn_rot;
pMatrix_set_rotation(cn_rot,cn_rot_q);
pVect n_ru = cn_rot * mv(0,0,1);
pVect n_rv = cn_rot * mv(0,1,0);
for ( int j=0; j<pieces; j++ )
{
const float theta = delta_theta * j;
pCoor c_pt = c_pos_right + cosf(theta) * n_ru + sinf(theta) * n_rv;
pCoor r_pt = r_pos_left + cosf(theta) * r_u + sinf(theta) * r_v;
pNorm dist = mn(c_pt,r_pt);
const float force_mag = dist.magnitude * hi.opt_spring_constant;
helix_apply_force_at(c_position,force,torque,c_pt,dist.v,force_mag);
}
}
/// Compute forces due to left neighbor.
//
if ( tid > 0 )
{
pQuat l_orientation = cq(helix_orientation[tid-1]);
float3 l_position = m3(helix_position[tid-1]);
pMatrix3x3 l_rot;
pMatrix_set_rotation(l_rot,l_orientation);
float3 l_u = l_rot * mv(0,0,1);
float3 l_v = l_rot * mv(0,1,0);
float3 l_ctr_to_right_dir = l_rot * mv(1,0,0);
pVect l_ctr_to_right = hi.helix_seg_hlength * l_ctr_to_right_dir;
float3 l_pos_right = l_position + l_ctr_to_right;
pQuat ln_rot_q = l_orientation * hi.helix_rn_trans;
pMatrix3x3 ln_rot;
pMatrix_set_rotation(ln_rot,ln_rot_q);
pVect n_cu = ln_rot * mv(0,0,1);
pVect n_cv = ln_rot * mv(0,1,0);
for ( int j=0; j<pieces; j++ )
{
const float theta = delta_theta * j;
pCoor c_pt = c_pos_left + cosf(theta) * c_u + sinf(theta) * c_v;
pCoor l_pt = l_pos_right + cosf(theta) * n_cu + sinf(theta) * n_cv;
pNorm dist = mn(c_pt,l_pt);
const float force_mag = dist.magnitude * hi.opt_spring_constant;
helix_apply_force_at(c_position,force,torque,c_pt,dist.v,force_mag);
}
}
float3 velocity = helix_velocity[tid];
velocity *= 0.99999f;
float3 omega = helix_omega[tid];
omega *= 0.99999f;
velocity += hi.delta_t_mass_inv * force;
const float torque_axial_mag = dot( torque, c_ctr_to_right_dir );
pVect torque_axial = torque_axial_mag * c_ctr_to_right_dir;
pVect do_axial = hi.delta_t_ma_axis * torque_axial;
pVect torque_other = torque - torque_axial;
pVect do_other = hi.delta_t_ma_perp_axis * torque_other;
omega += do_axial + do_other;
// Update velocity and omega. Don't update position or orientation
// because we don't want threads in this kernel to accidentally read
// the updated values.
helix_omega[tid] = omega;
helix_velocity[tid] = velocity;
}
__device__ void
helix_apply_force_at
(float3 position, float3& force, float3& torque,
float3 force_pos, pVect dir, float magnitude)
{
// Update force and torque of segment for a force acting on FORCE_POS
// pointing in direction DIR of magnitude MAGNITUDE.
//
force += magnitude * dir;
pVect arm = mv(position,force_pos);
pVect axis = cross( arm, dir );
pVect amt = magnitude * axis;
torque += amt;
}
__host__ void
time_step_intersect_launch
(int grid_size, int block_size, int version, int dynamic_sm_amt)
{
switch ( version ) {
case 1: time_step_intersect_1<<<grid_size,block_size,dynamic_sm_amt>>>();
case 2: time_step_intersect_2<<<grid_size,block_size,dynamic_sm_amt>>>();
}
}
__global__ void
time_step_update_pos()
{
// Update position and orientation of spring segments.
// Use tid for helix segment number.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Skip out-of-range segments.
if ( tid >= hi.phys_helix_segments ) return;
if ( tid == 0 ) return;
// Update Orientation
//
pQuat orientation = cq(helix_orientation[tid]);
float3 omega = helix_omega[tid];
pNorm axis = mn(omega);
helix_orientation[tid] =
c4( quat_normalize
( quat_mult ( mq( axis, hi.delta_t * axis.magnitude ), orientation)));
// Return if at last segment and it is fixed. Note that even
// if the segment's position is fixed, it can still rotate.
//
if ( hi.opt_end_fixed && tid + 1 == hi.phys_helix_segments ) return;
// Update Velocity
//
float3 position = m3(helix_position[tid]);
float3 velocity = helix_velocity[tid];
helix_position[tid] = m4(position + hi.delta_t * velocity,1);
}
__host__ void
time_step_update_pos_launch
(int grid_size, int block_size)
{
time_step_update_pos<<<grid_size,block_size>>>();
}
|
5204b547df66b89f96637c9d71789ec3c04e208a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_device.h"
#include <cstdlib>
#include <math.h>
#include "../common/macro.h"
namespace va_cv {
int CudaDevice::get_device_count() {
int dev_count = 0;
int err = hipGetDeviceCount( &dev_count );
return err == hipSuccess ? dev_count : -1;
}
int CudaDevice::set_device(int device) {
return hipSetDevice( device );
}
}
|
5204b547df66b89f96637c9d71789ec3c04e208a.cu
|
#include "cuda_device.h"
#include <cstdlib>
#include <math.h>
#include "../common/macro.h"
namespace va_cv {
int CudaDevice::get_device_count() {
int dev_count = 0;
int err = cudaGetDeviceCount( &dev_count );
return err == cudaSuccess ? dev_count : -1;
}
int CudaDevice::set_device(int device) {
return cudaSetDevice( device );
}
}
|
d6353039b108fa995fdc73d4ea1d0835df950544.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "header.h"
using namespace std;
/*--------------------------------------------
--------------------------------------------*/
void get_param(VolumeParameter *VolParam,char *paramFile){ //
FILE *fp1;
if ((fp1 = fopen(paramFile, "rb")) == NULL){ //Reading Binary file! be CAREFULL with 'b'!
fprintf(stderr, "%s is not exist\n", paramFile);
exit(1);
}
/* size_t fread ( void * ptr, size_t size, size_t count, FILE * stream );
Reads an array of count elements, each one with a size of size bytes,
from the stream and stores them in the block of memory specified by ptr.
*/
fread(VolParam->picParam, sizeof(PictureParameter), 1, fp1); //
VolParam->pixel_num = VolParam->picParam->nu * VolParam->picParam->nv; //
VolParam->pic_data_size = sizeof(float) * VolParam->pixel_num; //
VolParam->sub_voxel_num = VolParam->picParam->nx * VolParam->picParam->ny * Z_PIXEL; //
VolParam->subVol_data_size = sizeof(float) * VolParam->sub_voxel_num; //
fclose(fp1);
/*-------------------------Allocate host memory for picture ----------------------------*/
/*----------------- Copy CT projection picture from disk to host memeory ----------------------------------*/
if ((VolParam->picture.picArray = (float **)malloc(sizeof(float *) * VolParam->picParam->np)) == NULL){
fprintf(stderr, "Can't allocate projection region.\n");
exit(1);
}
/*-------------------------------------------------------*/
/*
if ((VolParam->volume.volArray = (float **)malloc(sizeof(float *) * VolParam->picParam->nz / Z_PIXEL)) == NULL){
fprintf(stderr, "Can't allocate volume region.\n");
exit(1);
}
for (int i = 0; i < VolParam->picParam->nz / Z_PIXEL; i++){
checkCudaErrors(hipHostMalloc((void**)&VolParam->volume.volArray[i], VolParam->subVol_data_size, hipHostMallocDefault));
}
*/
checkCudaErrors(hipHostMalloc((void**)&VolParam->subVolume, VolParam->subVol_data_size, hipHostMallocDefault));
//Calcuate_Filter(VolParam->picParam);
//printf("Parameter file name: %s\n", paramFile);
printf("SID: %f\n", VolParam->picParam->sid);
printf("SOD: %f\n", VolParam->picParam->sod);
printf("midplane: %f\n", VolParam->picParam->midplane);
printf("center: %f\n", VolParam->picParam->center);
printf("dpu: %f\n", VolParam->picParam->dpu);
printf("dpv: %f\n", VolParam->picParam->dpv);
printf("nu: %d\n", VolParam->picParam->nu);
printf("nv: %d\n", VolParam->picParam->nv);
printf("np: %d\n", VolParam->picParam->np);
printf("nx: %d\n", VolParam->picParam->nx);
printf("ny: %d\n", VolParam->picParam->ny);
printf("nz: %d\n", VolParam->picParam->nz);
printf("centerx: %f\n", VolParam->picParam->centerx);
printf("centery: %f\n", VolParam->picParam->centery);
printf("centerz: %f\n", VolParam->picParam->centerz);
printf("pixel_pitch: %f\n", VolParam->picParam->pixel_pitch);
printf("slice_pitch: %f\n", VolParam->picParam->slice_pitch);
printf("u_keisu: %f\n", VolParam->picParam->u_keisu);
printf("v_keisu: %f\n", VolParam->picParam->v_keisu);
printf("c0: %f\n", VolParam->picParam->c0);
printf("c1: %f\n", VolParam->picParam->c1);
printf("one picture data size: %d [MB]\n", VolParam->pic_data_size / 1048576);
printf("SubVolume data size: %lld [MB]\n", (long long)VolParam->subVol_data_size / 1048576);
}
/*--------------------------------------------
--------------------------------------------*/
void OutputToDisk(VolumeParameter *volPrm, int Sub_Vol_ID,char *Volname){ //
FILE *volFile;
char filename[1024];
sprintf(filename, "%sOutputVol_%d.dat",Volname,Sub_Vol_ID);
if ((volFile = fopen(filename, "wb")) == NULL){
printf("%s is not exist\n", filename);
exit(1);
}
float *volDump;//buffer for one piece of picture
if ((volDump = (float *)malloc(sizeof(float) * volPrm->picParam->nx * volPrm->picParam->ny)) == NULL){
fprintf(stderr, "memory allocate error!\n");
exit(1);
}
/*data is transfered into short little endian*/
for (int z = 0; z < Z_PIXEL; z++){
for (int y = 0; y < volPrm->picParam->ny; y++){
for (int x = 0; x < volPrm->picParam->nx; x++){
volDump[x + y * volPrm->picParam->nx] =
volPrm->subVolume[x + y *volPrm->picParam->nx + z *volPrm->picParam->nx *volPrm->picParam->ny];
}
}
fwrite(volDump, sizeof(float), volPrm->picParam->nx * volPrm->picParam->ny, volFile);
if (z % 50 == 0){printf(".");}
}
free(volDump);
fclose(volFile);
}
/*--------------------------------------------
--------------------------------------------*/
void free_host_mem(VolumeParameter *VolPrm){
for (int i = 0; i < VolPrm->picParam->np; i++){
hipHostFree(VolPrm->picture.picArray[i]);
}
free(VolPrm->picture.picArray);
hipHostFree(VolPrm->subVolume);
}
|
d6353039b108fa995fdc73d4ea1d0835df950544.cu
|
#include "header.h"
using namespace std;
/*--------------------------------------------
パラメータファイルを読み込む関数
--------------------------------------------*/
void get_param(VolumeParameter *VolParam,char *paramFile){ //再構成パラメータを取得する関数
FILE *fp1;
if ((fp1 = fopen(paramFile, "rb")) == NULL){ //Reading Binary file! be CAREFULL with 'b'!
fprintf(stderr, "%s is not exist\n", paramFile);
exit(1);
}
/* size_t fread ( void * ptr, size_t size, size_t count, FILE * stream );
Reads an array of count elements, each one with a size of size bytes,
from the stream and stores them in the block of memory specified by ptr.
*/
fread(VolParam->picParam, sizeof(PictureParameter), 1, fp1); //共通のパラメータ
VolParam->pixel_num = VolParam->picParam->nu * VolParam->picParam->nv; //投影像の画素数を計算
VolParam->pic_data_size = sizeof(float) * VolParam->pixel_num; //投影像のデータサイズを計算
VolParam->sub_voxel_num = VolParam->picParam->nx * VolParam->picParam->ny * Z_PIXEL; //サブボリュームのボクセル数を計算
VolParam->subVol_data_size = sizeof(float) * VolParam->sub_voxel_num; //サブボリュームのデータサイズを計算
fclose(fp1);
/*-------------------------Allocate host memory for picture ----------------------------*/
/*----------------- Copy CT projection picture from disk to host memeory ----------------------------------*/
if ((VolParam->picture.picArray = (float **)malloc(sizeof(float *) * VolParam->picParam->np)) == NULL){
fprintf(stderr, "Can't allocate projection region.\n");
exit(1);
}
/*---------------------ボリューム格納領域の確保----------------------------------*/
/*
if ((VolParam->volume.volArray = (float **)malloc(sizeof(float *) * VolParam->picParam->nz / Z_PIXEL)) == NULL){
fprintf(stderr, "Can't allocate volume region.\n");
exit(1);
}
for (int i = 0; i < VolParam->picParam->nz / Z_PIXEL; i++){
checkCudaErrors(cudaHostAlloc((void**)&VolParam->volume.volArray[i], VolParam->subVol_data_size, cudaHostAllocDefault));
}
*/
checkCudaErrors(cudaHostAlloc((void**)&VolParam->subVolume, VolParam->subVol_data_size, cudaHostAllocDefault));
//Calcuate_Filter(VolParam->picParam);
//printf("Parameter file name: %s\n", paramFile);
printf("SID: %f\n", VolParam->picParam->sid);
printf("SOD: %f\n", VolParam->picParam->sod);
printf("midplane: %f\n", VolParam->picParam->midplane);
printf("center: %f\n", VolParam->picParam->center);
printf("dpu: %f\n", VolParam->picParam->dpu);
printf("dpv: %f\n", VolParam->picParam->dpv);
printf("nu: %d\n", VolParam->picParam->nu);
printf("nv: %d\n", VolParam->picParam->nv);
printf("np: %d\n", VolParam->picParam->np);
printf("nx: %d\n", VolParam->picParam->nx);
printf("ny: %d\n", VolParam->picParam->ny);
printf("nz: %d\n", VolParam->picParam->nz);
printf("centerx: %f\n", VolParam->picParam->centerx);
printf("centery: %f\n", VolParam->picParam->centery);
printf("centerz: %f\n", VolParam->picParam->centerz);
printf("pixel_pitch: %f\n", VolParam->picParam->pixel_pitch);
printf("slice_pitch: %f\n", VolParam->picParam->slice_pitch);
printf("u_keisu: %f\n", VolParam->picParam->u_keisu);
printf("v_keisu: %f\n", VolParam->picParam->v_keisu);
printf("c0: %f\n", VolParam->picParam->c0);
printf("c1: %f\n", VolParam->picParam->c1);
printf("one picture data size: %d [MB]\n", VolParam->pic_data_size / 1048576);
printf("SubVolume data size: %lld [MB]\n", (long long)VolParam->subVol_data_size / 1048576);
}
/*--------------------------------------------
生成したボリュームを書き出す関数
--------------------------------------------*/
void OutputToDisk(VolumeParameter *volPrm, int Sub_Vol_ID,char *Volname){ //ボリュームをファイルに書き出す関数
FILE *volFile;
char filename[1024];
sprintf(filename, "%sOutputVol_%d.dat",Volname,Sub_Vol_ID);
if ((volFile = fopen(filename, "wb")) == NULL){
printf("%s is not exist\n", filename);
exit(1);
}
float *volDump;//buffer for one piece of picture
if ((volDump = (float *)malloc(sizeof(float) * volPrm->picParam->nx * volPrm->picParam->ny)) == NULL){
fprintf(stderr, "memory allocate error!\n");
exit(1);
}
/*data is transfered into short little endian*/
for (int z = 0; z < Z_PIXEL; z++){
for (int y = 0; y < volPrm->picParam->ny; y++){
for (int x = 0; x < volPrm->picParam->nx; x++){
volDump[x + y * volPrm->picParam->nx] =
volPrm->subVolume[x + y *volPrm->picParam->nx + z *volPrm->picParam->nx *volPrm->picParam->ny];
}
}
fwrite(volDump, sizeof(float), volPrm->picParam->nx * volPrm->picParam->ny, volFile);
if (z % 50 == 0){printf(".");}
}
free(volDump);
fclose(volFile);
}
/*--------------------------------------------
確保したメモリ領域を解放する関数
--------------------------------------------*/
void free_host_mem(VolumeParameter *VolPrm){
for (int i = 0; i < VolPrm->picParam->np; i++){
cudaFreeHost(VolPrm->picture.picArray[i]);
}
free(VolPrm->picture.picArray);
cudaFreeHost(VolPrm->subVolume);
}
|
2cb66d2824cc894bff37652a367a6fe9fc864240.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// common headers and config
#include "config.h"
// GPU version: all read-only input data are allocated to use global memory: val[], cols[], rowDelimiters[]
// output data out[] is also using global memory
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] *vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
// select GPU to be used
hipSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
// this cause int overflow! divid first!
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor);
float maxval = 200.0;
printf ("numRows =%u\n",spmv_numRows );
printf ("nItems=%u\n", spmv_nItems);
printf ("host val[] alloc size =%u\n", spmv_nItems * sizeof(float));
printf ("host cols[] alloc size =%u\n", spmv_nItems * sizeof(int));
printf ("host rowDelimiters[] alloc size =%u\n", (spmv_numRows + 1) * sizeof(int));
HANDLE_ERROR(hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)));
HANDLE_ERROR(hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)));
HANDLE_ERROR(hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
HANDLE_ERROR(hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float))) ;
spmv_refOut = new float[spmv_numRows];
HANDLE_ERROR(hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)));
fill(h_spmv_vec, spmv_numRows, maxval);
HANDLE_ERROR(hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
HANDLE_ERROR(hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)));
HANDLE_ERROR(hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)));
HANDLE_ERROR(hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)));
// Transfer data to device
HANDLE_ERROR(hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice));
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
HANDLE_ERROR(hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost));
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
2cb66d2824cc894bff37652a367a6fe9fc864240.cu
|
// common headers and config
#include "config.h"
// GPU version: all read-only input data are allocated to use global memory: val[], cols[], rowDelimiters[]
// output data out[] is also using global memory
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] *vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
// select GPU to be used
cudaSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
// this cause int overflow! divid first!
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor);
float maxval = 200.0;
printf ("numRows =%u\n",spmv_numRows );
printf ("nItems=%u\n", spmv_nItems);
printf ("host val[] alloc size =%u\n", spmv_nItems * sizeof(float));
printf ("host cols[] alloc size =%u\n", spmv_nItems * sizeof(int));
printf ("host rowDelimiters[] alloc size =%u\n", (spmv_numRows + 1) * sizeof(int));
HANDLE_ERROR(cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)));
HANDLE_ERROR(cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)));
HANDLE_ERROR(cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
HANDLE_ERROR(cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float))) ;
spmv_refOut = new float[spmv_numRows];
HANDLE_ERROR(cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)));
fill(h_spmv_vec, spmv_numRows, maxval);
HANDLE_ERROR(cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
HANDLE_ERROR(cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)));
// Transfer data to device
HANDLE_ERROR(cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice));
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
HANDLE_ERROR(cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost));
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
cc3c3d2d55ad196cdf128bc7a381947599bbf1da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License (MIT)
* This file is part of waifu2x-converter-cpp
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* -*- mode: c++ -*- */
#define UNROLL9(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
F(8); \
#define UNROLL8x3x3(F) \
F(0,0,0); \
F(0,0,1); \
F(0,0,2); \
F(0,1,0); \
F(0,1,1); \
F(0,1,2); \
F(0,2,0); \
F(0,2,1); \
F(0,2,2); \
\
F(1,0,0); \
F(1,0,1); \
F(1,0,2); \
F(1,1,0); \
F(1,1,1); \
F(1,1,2); \
F(1,2,0); \
F(1,2,1); \
F(1,2,2); \
\
F(2,0,0); \
F(2,0,1); \
F(2,0,2); \
F(2,1,0); \
F(2,1,1); \
F(2,1,2); \
F(2,2,0); \
F(2,2,1); \
F(2,2,2); \
\
F(3,0,0); \
F(3,0,1); \
F(3,0,2); \
F(3,1,0); \
F(3,1,1); \
F(3,1,2); \
F(3,2,0); \
F(3,2,1); \
F(3,2,2); \
\
F(4,0,0); \
F(4,0,1); \
F(4,0,2); \
F(4,1,0); \
F(4,1,1); \
F(4,1,2); \
F(4,2,0); \
F(4,2,1); \
F(4,2,2); \
\
F(5,0,0); \
F(5,0,1); \
F(5,0,2); \
F(5,1,0); \
F(5,1,1); \
F(5,1,2); \
F(5,2,0); \
F(5,2,1); \
F(5,2,2); \
\
F(6,0,0); \
F(6,0,1); \
F(6,0,2); \
F(6,1,0); \
F(6,1,1); \
F(6,1,2); \
F(6,2,0); \
F(6,2,1); \
F(6,2,2); \
\
F(7,0,0); \
F(7,0,1); \
F(7,0,2); \
F(7,1,0); \
F(7,1,1); \
F(7,1,2); \
F(7,2,0); \
F(7,2,1); \
F(7,2,2); \
#define UNROLL8(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
#define UNROLL8x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
#define UNROLL10x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
F(0,8); \
F(0,9); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
F(1,8); \
F(1,9); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
F(2,8); \
F(2,9); \
#define BLOCK_SIZE 8
extern __shared__ float shared_buf[];
template <int nInputPlanes>
__device__ void
filter(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float *shared_ptr = shared_buf;
float *in_block0_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block1_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block2_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block0 = in_block0_base + nInputPlanes;
float *in_block1 = in_block1_base + nInputPlanes;
float *in_block2 = in_block2_base + nInputPlanes;
int lid = threadIdx.x;
float bv = biases[lid];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
/*for (unsigned int op=0; op<nOutputPlanes; op++) thread */
{
int op = lid;
int rem = wsz - xi0;
__syncthreads();
if (lid < nInputPlanes/2) {
int bi;
int lid2 = lid*2;
for (bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
*(float2*)&in_block0[bi*nInputPlanes + lid2] = *(float2*)&in01[xi*nInputPlanes + lid2];
*(float2*)&in_block1[bi*nInputPlanes + lid2] = *(float2*)&in11[xi*nInputPlanes + lid2];
*(float2*)&in_block2[bi*nInputPlanes + lid2] = *(float2*)&in21[xi*nInputPlanes + lid2];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[(xi-1)*(int)nInputPlanes + lid2];
} else {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
{
int xi = xi0-1;
if (xi == -1) {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[lid2];
} else {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
}
__syncthreads();
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[nInputPlanes * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
{
const float *w0 = weight + lid;
for (int ip = 0; ip < nInputPlanes; ip++) {
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0; \
sum1 += w_##IDX * i##Y##I1; \
sum2 += w_##IDX * i##Y##I2; \
sum3 += w_##IDX * i##Y##I3; \
sum4 += w_##IDX * i##Y##I4; \
sum5 += w_##IDX * i##Y##I5; \
sum6 += w_##IDX * i##Y##I6; \
sum7 += w_##IDX * i##Y##I7;
{
#define LOAD_INPUT1X(Y,X) float i##Y##X = i##Y##X##_2.x;
UNROLL10x3(LOAD_INPUT1X);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
#define LOAD_INPUT1Y(Y,X) float i##Y##X = i##Y##X##_2.y;
UNROLL10x3(LOAD_INPUT1Y);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
float *out = packed_output + (yi*wsz + (xi0+BI))*nOutputPlanes; \
\
{ \
int opIndex = lid; \
float v = sum##BI; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out[opIndex] = v; \
} \
}
UNROLL8(RELU);
#undef DECL_PTR
#undef LOAD_COEF
#undef CALC
#undef LOAD_INPUT2
#undef LOAD_INPUT1X
#undef LOAD_INPUT1Y
#undef RELU
}
} else {
for (int bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
const float *w0 = weight + lid;
float sum = 0;
for (int ip=0; ip<nInputPlanes; ip++) {
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*nInputPlanes+ip];
i10 = in_block1[(bi-1)*nInputPlanes+ip];
i20 = in_block2[(bi-1)*nInputPlanes+ip];
i01 = in_block0[bi*nInputPlanes+ip];
i11 = in_block1[bi*nInputPlanes+ip];
i21 = in_block2[bi*nInputPlanes+ip];
i02 = in_block0[(bi+1)*nInputPlanes+ip];
i12 = in_block1[(bi+1)*nInputPlanes+ip];
i22 = in_block2[(bi+1)*nInputPlanes+ip];
const float *w = w0;
sum += w[(9*ip+0) * 128]*i00;
sum += w[(9*ip+1) * 128]*i01;
sum += w[(9*ip+2) * 128]*i02;
sum += w[(9*ip+3) * 128]*i10;
sum += w[(9*ip+4) * 128]*i11;
sum += w[(9*ip+5) * 128]*i12;
sum += w[(9*ip+6) * 128]*i20;
sum += w[(9*ip+7) * 128]*i21;
sum += w[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
{
float v = sum;
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
}
}
}
}
}
}
extern "C" __global__ void
filter_i32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<32>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<64>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<128>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
#if __CUDA_ARCH__ >= 300
static inline __device__ float
warp_sum(float v) {
v += __shfl_down_sync(0xFFFFFFFF, v, 1);
v += __shfl_down_sync(0xFFFFFFFF, v, 2);
v += __shfl_down_sync(0xFFFFFFFF, v, 4);
v += __shfl_down_sync(0xFFFFFFFF, v, 8);
v += __shfl_down_sync(0xFFFFFFFF, v, 16);
return v;
}
#endif
template <int nInputPlanes,
int nOutputPlanes>
void __device__
filter_weight_blocking(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
#define INPUT_BLOCK_SIZE 32
#define OUTPUT_BLOCK_SIZE 64 // == blockDim.x
#define X_BLOCK_SIZE 8
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float shared_buf_base[INPUT_BLOCK_SIZE * (X_BLOCK_SIZE+2) * 3];
float *in_block0_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 0;
float *in_block1_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 1;
float *in_block2_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 2;
float *in_block0 = in_block0_base + INPUT_BLOCK_SIZE;
float *in_block1 = in_block1_base + INPUT_BLOCK_SIZE;
float *in_block2 = in_block2_base + INPUT_BLOCK_SIZE;
int lid = threadIdx.x;
{ // ib0
{ // ob0
int op = lid + ob0;
float bv = biases[op];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
float *out_base = packed_output + (yi*wsz + xi0)*nOutputPlanes + op;
float *linp0 = in_block0 + lid;
float *linp1 = in_block1 + lid;
float *linp2 = in_block2 + lid;
__syncthreads();
int rem = wsz - xi0;
const float *inb0 = in01 + ib0+lid;
const float *inb1 = in11 + ib0+lid;
const float *inb2 = in21 + ib0+lid;
if (rem > 8 && xi0 != 0) {
if (lid < INPUT_BLOCK_SIZE) {
linp0[-1*INPUT_BLOCK_SIZE] = linp0[7*INPUT_BLOCK_SIZE];
linp1[-1*INPUT_BLOCK_SIZE] = linp1[7*INPUT_BLOCK_SIZE];
linp2[-1*INPUT_BLOCK_SIZE] = linp2[7*INPUT_BLOCK_SIZE];
linp0[0*INPUT_BLOCK_SIZE] = linp0[8*INPUT_BLOCK_SIZE];
linp1[0*INPUT_BLOCK_SIZE] = linp1[8*INPUT_BLOCK_SIZE];
linp2[0*INPUT_BLOCK_SIZE] = linp2[8*INPUT_BLOCK_SIZE];
}
__syncthreads();
if (lid < INPUT_BLOCK_SIZE) {
int bi;
#pragma unroll
for (bi=1; bi<X_BLOCK_SIZE+1; bi++) {
int xi = xi0 + bi;
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
}
} else {
if (lid < INPUT_BLOCK_SIZE) {
int bi;
for (bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[(xi-1)*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[(xi-1)*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[(xi-1)*(int)nInputPlanes];
} else {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
{
int xi = xi0-1;
if (xi == -1) {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[0];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[0];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[0];
} else {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
}
}
__syncthreads();
const float *w0 = weight + op;
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[INPUT_BLOCK_SIZE * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int ip1 = 0; ip1 < INPUT_BLOCK_SIZE; ip1+=2) {
int ip = ip1 + ib0;
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip1];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(SYM,IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0##_2.SYM; \
sum1 += w_##IDX * i##Y##I1##_2.SYM; \
sum2 += w_##IDX * i##Y##I2##_2.SYM; \
sum3 += w_##IDX * i##Y##I3##_2.SYM; \
sum4 += w_##IDX * i##Y##I4##_2.SYM; \
sum5 += w_##IDX * i##Y##I5##_2.SYM; \
sum6 += w_##IDX * i##Y##I6##_2.SYM; \
sum7 += w_##IDX * i##Y##I7##_2.SYM;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(x, 0,0,0,1,2,3,4,5,6,7);
CALC(x, 1,0,1,2,3,4,5,6,7,8);
CALC(x, 2,0,2,3,4,5,6,7,8,9);
CALC(x, 3,1,0,1,2,3,4,5,6,7);
CALC(x, 4,1,1,2,3,4,5,6,7,8);
CALC(x, 5,1,2,3,4,5,6,7,8,9);
CALC(x, 6,2,0,1,2,3,4,5,6,7);
CALC(x, 7,2,1,2,3,4,5,6,7,8);
CALC(x, 8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(y, 0,0,0,1,2,3,4,5,6,7);
CALC(y, 1,0,1,2,3,4,5,6,7,8);
CALC(y, 2,0,2,3,4,5,6,7,8,9);
CALC(y, 3,1,0,1,2,3,4,5,6,7);
CALC(y, 4,1,1,2,3,4,5,6,7,8);
CALC(y, 5,1,2,3,4,5,6,7,8,9);
CALC(y, 6,2,0,1,2,3,4,5,6,7);
CALC(y, 7,2,1,2,3,4,5,6,7,8);
CALC(y, 8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
\
{ \
float v = sum##BI + out_base[BI*nOutputPlanes]; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out_base[BI*nOutputPlanes] = v; \
} \
}
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
UNROLL8(RELU);
} else if (ib0 == 0) {
out_base[nOutputPlanes*0] = sum0;
out_base[nOutputPlanes*1] = sum1;
out_base[nOutputPlanes*2] = sum2;
out_base[nOutputPlanes*3] = sum3;
out_base[nOutputPlanes*4] = sum4;
out_base[nOutputPlanes*5] = sum5;
out_base[nOutputPlanes*6] = sum6;
out_base[nOutputPlanes*7] = sum7;
} else {
out_base[nOutputPlanes*0] += sum0;
out_base[nOutputPlanes*1] += sum1;
out_base[nOutputPlanes*2] += sum2;
out_base[nOutputPlanes*3] += sum3;
out_base[nOutputPlanes*4] += sum4;
out_base[nOutputPlanes*5] += sum5;
out_base[nOutputPlanes*6] += sum6;
out_base[nOutputPlanes*7] += sum7;
}
} else {
for (int bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
float sum = 0;
for (int ip1=0; ip1<INPUT_BLOCK_SIZE; ip1++) {
int ip = ib0 + ip1;
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i10 = in_block1[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i20 = in_block2[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i01 = in_block0[bi*INPUT_BLOCK_SIZE+ip1];
i11 = in_block1[bi*INPUT_BLOCK_SIZE+ip1];
i21 = in_block2[bi*INPUT_BLOCK_SIZE+ip1];
i02 = in_block0[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i12 = in_block1[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i22 = in_block2[(bi+1)*INPUT_BLOCK_SIZE+ip1];
sum += w0[(9*ip+0) * 128]*i00;
sum += w0[(9*ip+1) * 128]*i01;
sum += w0[(9*ip+2) * 128]*i02;
sum += w0[(9*ip+3) * 128]*i10;
sum += w0[(9*ip+4) * 128]*i11;
sum += w0[(9*ip+5) * 128]*i12;
sum += w0[(9*ip+6) * 128]*i20;
sum += w0[(9*ip+7) * 128]*i21;
sum += w0[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
/* last */
float v = sum + out[op];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
} else if (ib0 == 0) {
out[op] = sum;
} else {
out[op] += sum;
}
}
}
}
}
}
}
extern "C" __global__
void
filter_i128_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<128,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,64>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__ void
filter_i128_o1(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 1;
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
unsigned int lid = threadIdx.x;
float bv0 = biases[0];
/* 128 item */
/* x : (1width/group) */
/* y : (2height/group) */
/* iplane : 1plane / 1item * 128plane */
__shared__ float shared_buf[128 * 10];
float lin00;
float lin01;
float lin02;
float lin10;
float lin11;
float lin12;
float lin20;
float lin21;
float lin22;
float *sum_buffer = shared_buf + 128*9;
#define OUT1_LOAD_WEIGHT(I,Y,X) float w##I##Y##X = weight[(I*16 + lid)*9 + Y*3 + X];
float w00 = weight[lid*9 + 0];
float w01 = weight[lid*9 + 1];
float w02 = weight[lid*9 + 2];
float w10 = weight[lid*9 + 3];
float w11 = weight[lid*9 + 4];
float w12 = weight[lid*9 + 5];
float w20 = weight[lid*9 + 6];
float w21 = weight[lid*9 + 7];
float w22 = weight[lid*9 + 8];
const float *pin01 = in01 + lid;
const float *pin02 = in01 + nInputPlanes + lid;
const float *pin11 = in11 + lid;
const float *pin12 = in11 + nInputPlanes + lid;
const float *pin21 = in21 + lid;
const float *pin22 = in21 + nInputPlanes + lid;
lin01 = pin01[0];
lin02 = pin01[0];
lin11 = pin11[0];
lin12 = pin11[0];
lin21 = pin21[0];
lin22 = pin21[0];
#define OUT1_BODY(LEDGE,REDGE,SUM_RELU) \
{ \
float sum = 0; \
{ \
lin00 = lin01; \
lin01 = lin02; \
\
lin10 = lin11; \
lin11 = lin12; \
\
lin20 = lin21; \
lin21 = lin22; \
\
if (REDGE) { \
lin02 = lin01; \
lin12 = lin11; \
lin22 = lin21; \
} else { \
lin02 = pin02[xi*128]; \
lin12 = pin12[xi*128]; \
lin22 = pin22[xi*128]; \
} \
\
sum += w00 * lin00; \
sum += w10 * lin10; \
sum += w20 * lin20; \
\
sum += w01 * lin01; \
sum += w11 * lin11; \
sum += w21 * lin21; \
\
sum += w02 * lin02; \
sum += w12 * lin12; \
sum += w22 * lin22; \
\
} \
__syncthreads(); \
sum_buffer[lid] = sum; \
__syncthreads(); \
if (lid < 64) { \
float v2 = sum_buffer[lid+64]; \
sum_buffer[lid] += v2; \
} \
__syncthreads(); \
SUM_RELU(0); \
}
#if __CUDA_ARCH__ >= 300
#define SUM_RELU(OI) \
if (lid < 32) { \
float v0 = sum_buffer[lid] + sum_buffer[lid+32]; \
float sum = warp_sum(v0); \
\
if (lid == 0) { \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
} \
} \
#else
#define SUM_RELU(OI) \
if (lid < 32) { \
sum_buffer[lid] += sum_buffer[lid+32]; \
} \
__syncthreads(); \
if (lid < 16) { \
sum_buffer[lid] += sum_buffer[lid+16]; \
} \
__syncthreads(); \
if (lid < 8) { \
sum_buffer[lid] += sum_buffer[lid+8]; \
} \
__syncthreads(); \
if (lid < 4) { \
sum_buffer[lid] += sum_buffer[lid+4]; \
} \
__syncthreads(); \
if (lid < 2) { \
sum_buffer[lid] += sum_buffer[lid+2]; \
} \
__syncthreads(); \
if (lid == 0) { \
float sum = sum_buffer[0] + sum_buffer[1]; \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
}
#endif
for (int xi=0; xi<wsz-1; xi++) {
OUT1_BODY(0,0,SUM_RELU);
}
{
int xi = wsz-1;
OUT1_BODY(0,1,SUM_RELU);
}
}
}
extern "C" __global__ void
filter_i1_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
//int nInputPlanes = 1;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[256+2];
__shared__ float in_block1_base[256+2];
__shared__ float in_block2_base[256+2];
float *in_block0 = in_block0_base + 1;
float *in_block1 = in_block1_base + 1;
float *in_block2 = in_block2_base + 1;
/* 256 item / group */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int xoff = lid / 4U;
unsigned int ooff = (lid % 4U) * 8;
#define IN1_LOAD_COEF(O,Y,X) \
float w##O##Y##X = weight[9 * (O + ooff) + (Y*3) + X];
UNROLL8x3x3(IN1_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=256) {
/* load */
__syncthreads();
{
int xi = xi0 + lid;
if (xi < wsz) {
in_block0[lid] = in01[xi0 + lid];
in_block1[lid] = in11[xi0 + lid];
in_block2[lid] = in21[xi0 + lid];
}
if (lid == 0) {
if (xi == 0) {
in_block0[-1] = in01[0];
in_block1[-1] = in11[0];
in_block2[-1] = in21[0];
} else {
in_block0[-1] = in01[xi-1];
in_block1[-1] = in11[xi-1];
in_block2[-1] = in21[xi-1];
}
}
if (xi == wsz-1) {
in_block0[lid+1] = in01[xi];
in_block1[lid+1] = in11[xi];
in_block2[lid+1] = in21[xi];
}
if ((lid == 255) && (xi < wsz-1)) {
in_block0[256] = in01[xi+1];
in_block1[256] = in11[xi+1];
in_block2[256] = in21[xi+1];
}
}
__syncthreads();
for (int xi1_base=0; xi1_base<4; xi1_base++) {
{
int xi1 = xi1_base*64 + xoff;
int xi = xi0 + xi1;
if (xi < wsz) {
#define IN1_DECLSUM(O) float sum##O = 0;
#define IN1_CALC(O,Y,X) sum##O += in_block##Y[xi1+X-1] * w##O##Y##X;
#define IN1_RELU(O) { \
float v = sum##O; \
int opIndex = ooff + O; \
float bv = biases[opIndex]; \
v += bv; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[opIndex] = v; \
}
UNROLL8(IN1_DECLSUM);
UNROLL8x3x3(IN1_CALC);
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
UNROLL8(IN1_RELU);
}
}
}
}
}
/* blockDim.x == 192 */
extern "C" __global__ void
filter_i3_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 3;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[(64+2)*3];
__shared__ float in_block1_base[(64+2)*3];
__shared__ float in_block2_base[(64+2)*3];
__shared__ float sum_buffer[192];
float *in_block0 = in_block0_base + 3;
float *in_block1 = in_block1_base + 3;
float *in_block2 = in_block2_base + 3;
/* 192 item / group */
/* load 192 item */
/* 3 iplane : */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int ioff = lid / 32U;
unsigned int ooff = lid % 32U;
#define I3_O32_LOAD_COEF(I) \
float w##I = weight[9*nOutputPlanes*ioff+ooff+I*nOutputPlanes];
UNROLL9(I3_O32_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=64) {
/* load */
int nelem = min(wsz - xi0, 64);
int nload = nelem * 3;
if (lid < nload) {
int xi = xi0*3 + lid;
in_block0[lid] = in01[xi];
in_block1[lid] = in11[xi];
in_block2[lid] = in21[xi];
if (lid < 3) {
if (xi <= 2) {
/* left edge */
in_block0[-3+(int)lid] = in01[lid];
in_block1[-3+(int)lid] = in11[lid];
in_block2[-3+(int)lid] = in21[lid];
} else {
/* 0, 1, 2 */
in_block0[-3+(int)lid] = in01[-3+(int)xi];
in_block1[-3+(int)lid] = in11[-3+(int)xi];
in_block2[-3+(int)lid] = in21[-3+(int)xi];
}
}
if (xi >= wsz*3-3) {
/* right edge */
in_block0[lid+3] = in01[xi];
in_block1[lid+3] = in11[xi];
in_block2[lid+3] = in21[xi];
} else if (lid >= 189) {
/* 189, 190, 191 */
in_block0[lid+3] = in01[xi+3];
in_block1[lid+3] = in11[xi+3];
in_block2[lid+3] = in21[xi+3];
}
}
__syncthreads();
for (int xi1=0; xi1<nelem; xi1++) {
int xi = xi0 + xi1;
if (lid < 96) { // 3input x 32output
float sum = 0;
sum += w0 * in_block0[(xi1 - 1)*3+(int)ioff];
sum += w1 * in_block0[(xi1 )*3+(int)ioff];
sum += w2 * in_block0[(xi1 + 1)*3+(int)ioff];
sum += w3 * in_block1[(xi1 - 1)*3+(int)ioff];
sum += w4 * in_block1[(xi1 )*3+(int)ioff];
sum += w5 * in_block1[(xi1 + 1)*3+(int)ioff];
sum += w6 * in_block2[(xi1 - 1)*3+(int)ioff];
sum += w7 * in_block2[(xi1 )*3+(int)ioff];
sum += w8 * in_block2[(xi1 + 1)*3+(int)ioff];
sum_buffer[lid] = sum;
}
__syncthreads();
if (lid < 32) {
int oi = lid;
float v = 0;
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
/* 96 to 32 reduction */
v += sum_buffer[32 * 0 + lid];
v += sum_buffer[32 * 1 + lid];
v += sum_buffer[32 * 2 + lid];
float bv = biases[oi];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[oi] = v;
}
__syncthreads();
}
}
}
/* blockDim.x == 128 */
extern "C" __global__ void
filter_i128_o3(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 3;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float lin00, lin01, lin02;
float lin10, lin11, lin12;
float lin20, lin21, lin22;
__shared__ float sum_buffer[128];
/* 128 item / group */
/* load 128 item (load 3elem/item) */
/* 128 iplane
* 1 input
* 3 output (27coeff)
*/
int ioff = lid;
float bv0 = biases[0];
float bv1 = biases[1];
float bv2 = biases[2];
#define I128_O3_LOAD_COEF(I) \
float w0##I = weight[9*0*nInputPlanes + I*nInputPlanes + ioff]; \
float w1##I = weight[9*1*nInputPlanes + I*nInputPlanes + ioff]; \
float w2##I = weight[9*2*nInputPlanes + I*nInputPlanes + ioff];
UNROLL9(I128_O3_LOAD_COEF);
lin01 = lin02 = in01[lid];
lin11 = lin12 = in11[lid];
lin21 = lin22 = in21[lid];
int addroff = 0;
char *p0 = (char*)(in01 + lid + nInputPlanes);
char *p1 = (char*)(in11 + lid + nInputPlanes);
char *p2 = (char*)(in21 + lid + nInputPlanes);
for (int xi=0; xi<wsz; xi++) {
lin00 = lin01;
lin01 = lin02;
lin10 = lin11;
lin11 = lin12;
lin20 = lin21;
lin21 = lin22;
if (xi == wsz-1) {
/* nop */
} else {
lin02 = *(float *)(p0 + addroff);
lin12 = *(float *)(p1 + addroff);
lin22 = *(float *)(p2 + addroff);
}
addroff += nInputPlanes * sizeof(float);
#define I128_O3(OI) \
{ \
float sum = 0; \
sum += w##OI##0 * lin00; \
sum += w##OI##1 * lin01; \
sum += w##OI##2 * lin02; \
\
sum += w##OI##3 * lin10; \
sum += w##OI##4 * lin11; \
sum += w##OI##5 * lin12; \
\
sum += w##OI##6 * lin20; \
sum += w##OI##7 * lin21; \
sum += w##OI##8 * lin22; \
\
__syncthreads(); \
sum_buffer[lid] = sum; \
\
/* 128 to 1 */ \
__syncthreads(); \
if (lid < 64) { \
sum_buffer[lid] += sum_buffer[lid + 64]; \
} \
__syncthreads(); \
\
SUM_RELU(OI); \
}
I128_O3(0);
I128_O3(1);
I128_O3(2);
}
}
|
cc3c3d2d55ad196cdf128bc7a381947599bbf1da.cu
|
/*
* The MIT License (MIT)
* This file is part of waifu2x-converter-cpp
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* -*- mode: c++ -*- */
#define UNROLL9(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
F(8); \
#define UNROLL8x3x3(F) \
F(0,0,0); \
F(0,0,1); \
F(0,0,2); \
F(0,1,0); \
F(0,1,1); \
F(0,1,2); \
F(0,2,0); \
F(0,2,1); \
F(0,2,2); \
\
F(1,0,0); \
F(1,0,1); \
F(1,0,2); \
F(1,1,0); \
F(1,1,1); \
F(1,1,2); \
F(1,2,0); \
F(1,2,1); \
F(1,2,2); \
\
F(2,0,0); \
F(2,0,1); \
F(2,0,2); \
F(2,1,0); \
F(2,1,1); \
F(2,1,2); \
F(2,2,0); \
F(2,2,1); \
F(2,2,2); \
\
F(3,0,0); \
F(3,0,1); \
F(3,0,2); \
F(3,1,0); \
F(3,1,1); \
F(3,1,2); \
F(3,2,0); \
F(3,2,1); \
F(3,2,2); \
\
F(4,0,0); \
F(4,0,1); \
F(4,0,2); \
F(4,1,0); \
F(4,1,1); \
F(4,1,2); \
F(4,2,0); \
F(4,2,1); \
F(4,2,2); \
\
F(5,0,0); \
F(5,0,1); \
F(5,0,2); \
F(5,1,0); \
F(5,1,1); \
F(5,1,2); \
F(5,2,0); \
F(5,2,1); \
F(5,2,2); \
\
F(6,0,0); \
F(6,0,1); \
F(6,0,2); \
F(6,1,0); \
F(6,1,1); \
F(6,1,2); \
F(6,2,0); \
F(6,2,1); \
F(6,2,2); \
\
F(7,0,0); \
F(7,0,1); \
F(7,0,2); \
F(7,1,0); \
F(7,1,1); \
F(7,1,2); \
F(7,2,0); \
F(7,2,1); \
F(7,2,2); \
#define UNROLL8(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
#define UNROLL8x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
#define UNROLL10x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
F(0,8); \
F(0,9); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
F(1,8); \
F(1,9); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
F(2,8); \
F(2,9); \
#define BLOCK_SIZE 8
extern __shared__ float shared_buf[];
template <int nInputPlanes>
__device__ void
filter(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float *shared_ptr = shared_buf;
float *in_block0_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block1_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block2_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block0 = in_block0_base + nInputPlanes;
float *in_block1 = in_block1_base + nInputPlanes;
float *in_block2 = in_block2_base + nInputPlanes;
int lid = threadIdx.x;
float bv = biases[lid];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
/*for (unsigned int op=0; op<nOutputPlanes; op++) thread */
{
int op = lid;
int rem = wsz - xi0;
__syncthreads();
if (lid < nInputPlanes/2) {
int bi;
int lid2 = lid*2;
for (bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
*(float2*)&in_block0[bi*nInputPlanes + lid2] = *(float2*)&in01[xi*nInputPlanes + lid2];
*(float2*)&in_block1[bi*nInputPlanes + lid2] = *(float2*)&in11[xi*nInputPlanes + lid2];
*(float2*)&in_block2[bi*nInputPlanes + lid2] = *(float2*)&in21[xi*nInputPlanes + lid2];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[(xi-1)*(int)nInputPlanes + lid2];
} else {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
{
int xi = xi0-1;
if (xi == -1) {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[lid2];
} else {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
}
__syncthreads();
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[nInputPlanes * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
{
const float *w0 = weight + lid;
for (int ip = 0; ip < nInputPlanes; ip++) {
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0; \
sum1 += w_##IDX * i##Y##I1; \
sum2 += w_##IDX * i##Y##I2; \
sum3 += w_##IDX * i##Y##I3; \
sum4 += w_##IDX * i##Y##I4; \
sum5 += w_##IDX * i##Y##I5; \
sum6 += w_##IDX * i##Y##I6; \
sum7 += w_##IDX * i##Y##I7;
{
#define LOAD_INPUT1X(Y,X) float i##Y##X = i##Y##X##_2.x;
UNROLL10x3(LOAD_INPUT1X);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
#define LOAD_INPUT1Y(Y,X) float i##Y##X = i##Y##X##_2.y;
UNROLL10x3(LOAD_INPUT1Y);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
float *out = packed_output + (yi*wsz + (xi0+BI))*nOutputPlanes; \
\
{ \
int opIndex = lid; \
float v = sum##BI; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out[opIndex] = v; \
} \
}
UNROLL8(RELU);
#undef DECL_PTR
#undef LOAD_COEF
#undef CALC
#undef LOAD_INPUT2
#undef LOAD_INPUT1X
#undef LOAD_INPUT1Y
#undef RELU
}
} else {
for (int bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
const float *w0 = weight + lid;
float sum = 0;
for (int ip=0; ip<nInputPlanes; ip++) {
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*nInputPlanes+ip];
i10 = in_block1[(bi-1)*nInputPlanes+ip];
i20 = in_block2[(bi-1)*nInputPlanes+ip];
i01 = in_block0[bi*nInputPlanes+ip];
i11 = in_block1[bi*nInputPlanes+ip];
i21 = in_block2[bi*nInputPlanes+ip];
i02 = in_block0[(bi+1)*nInputPlanes+ip];
i12 = in_block1[(bi+1)*nInputPlanes+ip];
i22 = in_block2[(bi+1)*nInputPlanes+ip];
const float *w = w0;
sum += w[(9*ip+0) * 128]*i00;
sum += w[(9*ip+1) * 128]*i01;
sum += w[(9*ip+2) * 128]*i02;
sum += w[(9*ip+3) * 128]*i10;
sum += w[(9*ip+4) * 128]*i11;
sum += w[(9*ip+5) * 128]*i12;
sum += w[(9*ip+6) * 128]*i20;
sum += w[(9*ip+7) * 128]*i21;
sum += w[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
{
float v = sum;
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
}
}
}
}
}
}
extern "C" __global__ void
filter_i32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<32>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<64>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<128>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
#if __CUDA_ARCH__ >= 300
static inline __device__ float
warp_sum(float v) {
v += __shfl_down_sync(0xFFFFFFFF, v, 1);
v += __shfl_down_sync(0xFFFFFFFF, v, 2);
v += __shfl_down_sync(0xFFFFFFFF, v, 4);
v += __shfl_down_sync(0xFFFFFFFF, v, 8);
v += __shfl_down_sync(0xFFFFFFFF, v, 16);
return v;
}
#endif
template <int nInputPlanes,
int nOutputPlanes>
void __device__
filter_weight_blocking(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
#define INPUT_BLOCK_SIZE 32
#define OUTPUT_BLOCK_SIZE 64 // == blockDim.x
#define X_BLOCK_SIZE 8
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float shared_buf_base[INPUT_BLOCK_SIZE * (X_BLOCK_SIZE+2) * 3];
float *in_block0_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 0;
float *in_block1_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 1;
float *in_block2_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 2;
float *in_block0 = in_block0_base + INPUT_BLOCK_SIZE;
float *in_block1 = in_block1_base + INPUT_BLOCK_SIZE;
float *in_block2 = in_block2_base + INPUT_BLOCK_SIZE;
int lid = threadIdx.x;
{ // ib0
{ // ob0
int op = lid + ob0;
float bv = biases[op];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
float *out_base = packed_output + (yi*wsz + xi0)*nOutputPlanes + op;
float *linp0 = in_block0 + lid;
float *linp1 = in_block1 + lid;
float *linp2 = in_block2 + lid;
__syncthreads();
int rem = wsz - xi0;
const float *inb0 = in01 + ib0+lid;
const float *inb1 = in11 + ib0+lid;
const float *inb2 = in21 + ib0+lid;
if (rem > 8 && xi0 != 0) {
if (lid < INPUT_BLOCK_SIZE) {
linp0[-1*INPUT_BLOCK_SIZE] = linp0[7*INPUT_BLOCK_SIZE];
linp1[-1*INPUT_BLOCK_SIZE] = linp1[7*INPUT_BLOCK_SIZE];
linp2[-1*INPUT_BLOCK_SIZE] = linp2[7*INPUT_BLOCK_SIZE];
linp0[0*INPUT_BLOCK_SIZE] = linp0[8*INPUT_BLOCK_SIZE];
linp1[0*INPUT_BLOCK_SIZE] = linp1[8*INPUT_BLOCK_SIZE];
linp2[0*INPUT_BLOCK_SIZE] = linp2[8*INPUT_BLOCK_SIZE];
}
__syncthreads();
if (lid < INPUT_BLOCK_SIZE) {
int bi;
#pragma unroll
for (bi=1; bi<X_BLOCK_SIZE+1; bi++) {
int xi = xi0 + bi;
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
}
} else {
if (lid < INPUT_BLOCK_SIZE) {
int bi;
for (bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[(xi-1)*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[(xi-1)*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[(xi-1)*(int)nInputPlanes];
} else {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
{
int xi = xi0-1;
if (xi == -1) {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[0];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[0];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[0];
} else {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
}
}
__syncthreads();
const float *w0 = weight + op;
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[INPUT_BLOCK_SIZE * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int ip1 = 0; ip1 < INPUT_BLOCK_SIZE; ip1+=2) {
int ip = ip1 + ib0;
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip1];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(SYM,IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0##_2.SYM; \
sum1 += w_##IDX * i##Y##I1##_2.SYM; \
sum2 += w_##IDX * i##Y##I2##_2.SYM; \
sum3 += w_##IDX * i##Y##I3##_2.SYM; \
sum4 += w_##IDX * i##Y##I4##_2.SYM; \
sum5 += w_##IDX * i##Y##I5##_2.SYM; \
sum6 += w_##IDX * i##Y##I6##_2.SYM; \
sum7 += w_##IDX * i##Y##I7##_2.SYM;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(x, 0,0,0,1,2,3,4,5,6,7);
CALC(x, 1,0,1,2,3,4,5,6,7,8);
CALC(x, 2,0,2,3,4,5,6,7,8,9);
CALC(x, 3,1,0,1,2,3,4,5,6,7);
CALC(x, 4,1,1,2,3,4,5,6,7,8);
CALC(x, 5,1,2,3,4,5,6,7,8,9);
CALC(x, 6,2,0,1,2,3,4,5,6,7);
CALC(x, 7,2,1,2,3,4,5,6,7,8);
CALC(x, 8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(y, 0,0,0,1,2,3,4,5,6,7);
CALC(y, 1,0,1,2,3,4,5,6,7,8);
CALC(y, 2,0,2,3,4,5,6,7,8,9);
CALC(y, 3,1,0,1,2,3,4,5,6,7);
CALC(y, 4,1,1,2,3,4,5,6,7,8);
CALC(y, 5,1,2,3,4,5,6,7,8,9);
CALC(y, 6,2,0,1,2,3,4,5,6,7);
CALC(y, 7,2,1,2,3,4,5,6,7,8);
CALC(y, 8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
\
{ \
float v = sum##BI + out_base[BI*nOutputPlanes]; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out_base[BI*nOutputPlanes] = v; \
} \
}
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
UNROLL8(RELU);
} else if (ib0 == 0) {
out_base[nOutputPlanes*0] = sum0;
out_base[nOutputPlanes*1] = sum1;
out_base[nOutputPlanes*2] = sum2;
out_base[nOutputPlanes*3] = sum3;
out_base[nOutputPlanes*4] = sum4;
out_base[nOutputPlanes*5] = sum5;
out_base[nOutputPlanes*6] = sum6;
out_base[nOutputPlanes*7] = sum7;
} else {
out_base[nOutputPlanes*0] += sum0;
out_base[nOutputPlanes*1] += sum1;
out_base[nOutputPlanes*2] += sum2;
out_base[nOutputPlanes*3] += sum3;
out_base[nOutputPlanes*4] += sum4;
out_base[nOutputPlanes*5] += sum5;
out_base[nOutputPlanes*6] += sum6;
out_base[nOutputPlanes*7] += sum7;
}
} else {
for (int bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
float sum = 0;
for (int ip1=0; ip1<INPUT_BLOCK_SIZE; ip1++) {
int ip = ib0 + ip1;
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i10 = in_block1[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i20 = in_block2[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i01 = in_block0[bi*INPUT_BLOCK_SIZE+ip1];
i11 = in_block1[bi*INPUT_BLOCK_SIZE+ip1];
i21 = in_block2[bi*INPUT_BLOCK_SIZE+ip1];
i02 = in_block0[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i12 = in_block1[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i22 = in_block2[(bi+1)*INPUT_BLOCK_SIZE+ip1];
sum += w0[(9*ip+0) * 128]*i00;
sum += w0[(9*ip+1) * 128]*i01;
sum += w0[(9*ip+2) * 128]*i02;
sum += w0[(9*ip+3) * 128]*i10;
sum += w0[(9*ip+4) * 128]*i11;
sum += w0[(9*ip+5) * 128]*i12;
sum += w0[(9*ip+6) * 128]*i20;
sum += w0[(9*ip+7) * 128]*i21;
sum += w0[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
/* last */
float v = sum + out[op];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
} else if (ib0 == 0) {
out[op] = sum;
} else {
out[op] += sum;
}
}
}
}
}
}
}
extern "C" __global__
void
filter_i128_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<128,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,64>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__ void
filter_i128_o1(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 1;
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
unsigned int lid = threadIdx.x;
float bv0 = biases[0];
/* 128 item */
/* x : (1width/group) */
/* y : (2height/group) */
/* iplane : 1plane / 1item * 128plane */
__shared__ float shared_buf[128 * 10];
float lin00;
float lin01;
float lin02;
float lin10;
float lin11;
float lin12;
float lin20;
float lin21;
float lin22;
float *sum_buffer = shared_buf + 128*9;
#define OUT1_LOAD_WEIGHT(I,Y,X) float w##I##Y##X = weight[(I*16 + lid)*9 + Y*3 + X];
float w00 = weight[lid*9 + 0];
float w01 = weight[lid*9 + 1];
float w02 = weight[lid*9 + 2];
float w10 = weight[lid*9 + 3];
float w11 = weight[lid*9 + 4];
float w12 = weight[lid*9 + 5];
float w20 = weight[lid*9 + 6];
float w21 = weight[lid*9 + 7];
float w22 = weight[lid*9 + 8];
const float *pin01 = in01 + lid;
const float *pin02 = in01 + nInputPlanes + lid;
const float *pin11 = in11 + lid;
const float *pin12 = in11 + nInputPlanes + lid;
const float *pin21 = in21 + lid;
const float *pin22 = in21 + nInputPlanes + lid;
lin01 = pin01[0];
lin02 = pin01[0];
lin11 = pin11[0];
lin12 = pin11[0];
lin21 = pin21[0];
lin22 = pin21[0];
#define OUT1_BODY(LEDGE,REDGE,SUM_RELU) \
{ \
float sum = 0; \
{ \
lin00 = lin01; \
lin01 = lin02; \
\
lin10 = lin11; \
lin11 = lin12; \
\
lin20 = lin21; \
lin21 = lin22; \
\
if (REDGE) { \
lin02 = lin01; \
lin12 = lin11; \
lin22 = lin21; \
} else { \
lin02 = pin02[xi*128]; \
lin12 = pin12[xi*128]; \
lin22 = pin22[xi*128]; \
} \
\
sum += w00 * lin00; \
sum += w10 * lin10; \
sum += w20 * lin20; \
\
sum += w01 * lin01; \
sum += w11 * lin11; \
sum += w21 * lin21; \
\
sum += w02 * lin02; \
sum += w12 * lin12; \
sum += w22 * lin22; \
\
} \
__syncthreads(); \
sum_buffer[lid] = sum; \
__syncthreads(); \
if (lid < 64) { \
float v2 = sum_buffer[lid+64]; \
sum_buffer[lid] += v2; \
} \
__syncthreads(); \
SUM_RELU(0); \
}
#if __CUDA_ARCH__ >= 300
#define SUM_RELU(OI) \
if (lid < 32) { \
float v0 = sum_buffer[lid] + sum_buffer[lid+32]; \
float sum = warp_sum(v0); \
\
if (lid == 0) { \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
} \
} \
#else
#define SUM_RELU(OI) \
if (lid < 32) { \
sum_buffer[lid] += sum_buffer[lid+32]; \
} \
__syncthreads(); \
if (lid < 16) { \
sum_buffer[lid] += sum_buffer[lid+16]; \
} \
__syncthreads(); \
if (lid < 8) { \
sum_buffer[lid] += sum_buffer[lid+8]; \
} \
__syncthreads(); \
if (lid < 4) { \
sum_buffer[lid] += sum_buffer[lid+4]; \
} \
__syncthreads(); \
if (lid < 2) { \
sum_buffer[lid] += sum_buffer[lid+2]; \
} \
__syncthreads(); \
if (lid == 0) { \
float sum = sum_buffer[0] + sum_buffer[1]; \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
}
#endif
for (int xi=0; xi<wsz-1; xi++) {
OUT1_BODY(0,0,SUM_RELU);
}
{
int xi = wsz-1;
OUT1_BODY(0,1,SUM_RELU);
}
}
}
extern "C" __global__ void
filter_i1_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
//int nInputPlanes = 1;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[256+2];
__shared__ float in_block1_base[256+2];
__shared__ float in_block2_base[256+2];
float *in_block0 = in_block0_base + 1;
float *in_block1 = in_block1_base + 1;
float *in_block2 = in_block2_base + 1;
/* 256 item / group */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int xoff = lid / 4U;
unsigned int ooff = (lid % 4U) * 8;
#define IN1_LOAD_COEF(O,Y,X) \
float w##O##Y##X = weight[9 * (O + ooff) + (Y*3) + X];
UNROLL8x3x3(IN1_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=256) {
/* load */
__syncthreads();
{
int xi = xi0 + lid;
if (xi < wsz) {
in_block0[lid] = in01[xi0 + lid];
in_block1[lid] = in11[xi0 + lid];
in_block2[lid] = in21[xi0 + lid];
}
if (lid == 0) {
if (xi == 0) {
in_block0[-1] = in01[0];
in_block1[-1] = in11[0];
in_block2[-1] = in21[0];
} else {
in_block0[-1] = in01[xi-1];
in_block1[-1] = in11[xi-1];
in_block2[-1] = in21[xi-1];
}
}
if (xi == wsz-1) {
in_block0[lid+1] = in01[xi];
in_block1[lid+1] = in11[xi];
in_block2[lid+1] = in21[xi];
}
if ((lid == 255) && (xi < wsz-1)) {
in_block0[256] = in01[xi+1];
in_block1[256] = in11[xi+1];
in_block2[256] = in21[xi+1];
}
}
__syncthreads();
for (int xi1_base=0; xi1_base<4; xi1_base++) {
{
int xi1 = xi1_base*64 + xoff;
int xi = xi0 + xi1;
if (xi < wsz) {
#define IN1_DECLSUM(O) float sum##O = 0;
#define IN1_CALC(O,Y,X) sum##O += in_block##Y[xi1+X-1] * w##O##Y##X;
#define IN1_RELU(O) { \
float v = sum##O; \
int opIndex = ooff + O; \
float bv = biases[opIndex]; \
v += bv; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[opIndex] = v; \
}
UNROLL8(IN1_DECLSUM);
UNROLL8x3x3(IN1_CALC);
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
UNROLL8(IN1_RELU);
}
}
}
}
}
/* blockDim.x == 192 */
extern "C" __global__ void
filter_i3_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 3;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[(64+2)*3];
__shared__ float in_block1_base[(64+2)*3];
__shared__ float in_block2_base[(64+2)*3];
__shared__ float sum_buffer[192];
float *in_block0 = in_block0_base + 3;
float *in_block1 = in_block1_base + 3;
float *in_block2 = in_block2_base + 3;
/* 192 item / group */
/* load 192 item */
/* 3 iplane : */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int ioff = lid / 32U;
unsigned int ooff = lid % 32U;
#define I3_O32_LOAD_COEF(I) \
float w##I = weight[9*nOutputPlanes*ioff+ooff+I*nOutputPlanes];
UNROLL9(I3_O32_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=64) {
/* load */
int nelem = min(wsz - xi0, 64);
int nload = nelem * 3;
if (lid < nload) {
int xi = xi0*3 + lid;
in_block0[lid] = in01[xi];
in_block1[lid] = in11[xi];
in_block2[lid] = in21[xi];
if (lid < 3) {
if (xi <= 2) {
/* left edge */
in_block0[-3+(int)lid] = in01[lid];
in_block1[-3+(int)lid] = in11[lid];
in_block2[-3+(int)lid] = in21[lid];
} else {
/* 0, 1, 2 */
in_block0[-3+(int)lid] = in01[-3+(int)xi];
in_block1[-3+(int)lid] = in11[-3+(int)xi];
in_block2[-3+(int)lid] = in21[-3+(int)xi];
}
}
if (xi >= wsz*3-3) {
/* right edge */
in_block0[lid+3] = in01[xi];
in_block1[lid+3] = in11[xi];
in_block2[lid+3] = in21[xi];
} else if (lid >= 189) {
/* 189, 190, 191 */
in_block0[lid+3] = in01[xi+3];
in_block1[lid+3] = in11[xi+3];
in_block2[lid+3] = in21[xi+3];
}
}
__syncthreads();
for (int xi1=0; xi1<nelem; xi1++) {
int xi = xi0 + xi1;
if (lid < 96) { // 3input x 32output
float sum = 0;
sum += w0 * in_block0[(xi1 - 1)*3+(int)ioff];
sum += w1 * in_block0[(xi1 )*3+(int)ioff];
sum += w2 * in_block0[(xi1 + 1)*3+(int)ioff];
sum += w3 * in_block1[(xi1 - 1)*3+(int)ioff];
sum += w4 * in_block1[(xi1 )*3+(int)ioff];
sum += w5 * in_block1[(xi1 + 1)*3+(int)ioff];
sum += w6 * in_block2[(xi1 - 1)*3+(int)ioff];
sum += w7 * in_block2[(xi1 )*3+(int)ioff];
sum += w8 * in_block2[(xi1 + 1)*3+(int)ioff];
sum_buffer[lid] = sum;
}
__syncthreads();
if (lid < 32) {
int oi = lid;
float v = 0;
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
/* 96 to 32 reduction */
v += sum_buffer[32 * 0 + lid];
v += sum_buffer[32 * 1 + lid];
v += sum_buffer[32 * 2 + lid];
float bv = biases[oi];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[oi] = v;
}
__syncthreads();
}
}
}
/* blockDim.x == 128 */
extern "C" __global__ void
filter_i128_o3(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 3;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float lin00, lin01, lin02;
float lin10, lin11, lin12;
float lin20, lin21, lin22;
__shared__ float sum_buffer[128];
/* 128 item / group */
/* load 128 item (load 3elem/item) */
/* 128 iplane
* 1 input
* 3 output (27coeff)
*/
int ioff = lid;
float bv0 = biases[0];
float bv1 = biases[1];
float bv2 = biases[2];
#define I128_O3_LOAD_COEF(I) \
float w0##I = weight[9*0*nInputPlanes + I*nInputPlanes + ioff]; \
float w1##I = weight[9*1*nInputPlanes + I*nInputPlanes + ioff]; \
float w2##I = weight[9*2*nInputPlanes + I*nInputPlanes + ioff];
UNROLL9(I128_O3_LOAD_COEF);
lin01 = lin02 = in01[lid];
lin11 = lin12 = in11[lid];
lin21 = lin22 = in21[lid];
int addroff = 0;
char *p0 = (char*)(in01 + lid + nInputPlanes);
char *p1 = (char*)(in11 + lid + nInputPlanes);
char *p2 = (char*)(in21 + lid + nInputPlanes);
for (int xi=0; xi<wsz; xi++) {
lin00 = lin01;
lin01 = lin02;
lin10 = lin11;
lin11 = lin12;
lin20 = lin21;
lin21 = lin22;
if (xi == wsz-1) {
/* nop */
} else {
lin02 = *(float *)(p0 + addroff);
lin12 = *(float *)(p1 + addroff);
lin22 = *(float *)(p2 + addroff);
}
addroff += nInputPlanes * sizeof(float);
#define I128_O3(OI) \
{ \
float sum = 0; \
sum += w##OI##0 * lin00; \
sum += w##OI##1 * lin01; \
sum += w##OI##2 * lin02; \
\
sum += w##OI##3 * lin10; \
sum += w##OI##4 * lin11; \
sum += w##OI##5 * lin12; \
\
sum += w##OI##6 * lin20; \
sum += w##OI##7 * lin21; \
sum += w##OI##8 * lin22; \
\
__syncthreads(); \
sum_buffer[lid] = sum; \
\
/* 128 to 1 */ \
__syncthreads(); \
if (lid < 64) { \
sum_buffer[lid] += sum_buffer[lid + 64]; \
} \
__syncthreads(); \
\
SUM_RELU(OI); \
}
I128_O3(0);
I128_O3(1);
I128_O3(2);
}
}
|
514a4f5e65eb3920627ebc7e9cc102178802c545.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gradient_viz.h"
#include <util/helper_math.h>
namespace dart {
inline __host__ __device__ unsigned char clamp(int c) {
return min(max(0,c),255);
}
inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) {
float c = v*s;
float hPrime = h/60.0f;
float x = c*(1 - fabs(fmodf(hPrime,2) - 1));
float m = c-v;
int hPrimeInt = hPrime;
switch (hPrimeInt) {
case 0:
return make_uchar3(255*(c+m),255*(x+m),255*(m));
case 1:
return make_uchar3(255*(x+m),255*(c+m),255*(m));
case 2:
return make_uchar3(255*(m),255*(c+m),255*(x+m));
case 3:
return make_uchar3(255*(m),255*(x+m),255*(c+m));
case 4:
return make_uchar3(255*(x+m),255*(m),255*(c+m));
case 5:
return make_uchar3(255*(c+m),255*(m),255*(x+m));
}
return make_uchar3(0,0,0);
}
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
__global__ void gpu_visualizeImageGradient(const float2 * imgGradient, uchar3 * gradientViz, const int width, const int height, const float minMag, const float maxMag) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
float2 grad = imgGradient[index];
const float angle = atan2(grad.y,grad.x);
const float mag = length(grad);
grad = grad / mag;
float h = angle * 180 / M_PI;
if (h < 0) {
h += 360;
}
const float v = min(max(0.0f,(mag-minMag)/(maxMag-minMag)),1.0f);
gradientViz[index] = hsv2rgb(h,1.0,v);
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void visualizeImageGradient(const float2 * imgGradient, uchar3 * gradientViz, const int width, const int height, const float minMag, const float maxMag) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipLaunchKernelGGL(( gpu_visualizeImageGradient), dim3(grid),dim3(block), 0, 0, imgGradient,gradientViz,width,height,minMag,maxMag);
}
}
|
514a4f5e65eb3920627ebc7e9cc102178802c545.cu
|
#include "gradient_viz.h"
#include <util/helper_math.h>
namespace dart {
inline __host__ __device__ unsigned char clamp(int c) {
return min(max(0,c),255);
}
inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) {
float c = v*s;
float hPrime = h/60.0f;
float x = c*(1 - fabs(fmodf(hPrime,2) - 1));
float m = c-v;
int hPrimeInt = hPrime;
switch (hPrimeInt) {
case 0:
return make_uchar3(255*(c+m),255*(x+m),255*(m));
case 1:
return make_uchar3(255*(x+m),255*(c+m),255*(m));
case 2:
return make_uchar3(255*(m),255*(c+m),255*(x+m));
case 3:
return make_uchar3(255*(m),255*(x+m),255*(c+m));
case 4:
return make_uchar3(255*(x+m),255*(m),255*(c+m));
case 5:
return make_uchar3(255*(c+m),255*(m),255*(x+m));
}
return make_uchar3(0,0,0);
}
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
__global__ void gpu_visualizeImageGradient(const float2 * imgGradient, uchar3 * gradientViz, const int width, const int height, const float minMag, const float maxMag) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
float2 grad = imgGradient[index];
const float angle = atan2(grad.y,grad.x);
const float mag = length(grad);
grad = grad / mag;
float h = angle * 180 / M_PI;
if (h < 0) {
h += 360;
}
const float v = min(max(0.0f,(mag-minMag)/(maxMag-minMag)),1.0f);
gradientViz[index] = hsv2rgb(h,1.0,v);
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void visualizeImageGradient(const float2 * imgGradient, uchar3 * gradientViz, const int width, const int height, const float minMag, const float maxMag) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
gpu_visualizeImageGradient<<<grid,block>>>(imgGradient,gradientViz,width,height,minMag,maxMag);
}
}
|
39ffe9b2e022a0a9a13174eae20eb9b77ec62334.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
39ffe9b2e022a0a9a13174eae20eb9b77ec62334.cu
|
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
69e5c3d38813abb5795fbc972a4e4b232c29e211.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "human.cuh"
#include "zombie.cuh"
#include <cutil.h>
#include "../../world/worldCreator.cuh"
#include "../../agent/output.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
using namespace std;
void run(void);
void input_agent_data(int& humanN, int& zombieN);
int main(void){
CUDA_SAFE_CALL(hipSetDevice(0));
float elapsed_time_ms = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
run();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Run time from start to end : %f [sec]\n", elapsed_time_ms/1000);
return 0;
}
void run(void){
worldCreator<int> world;
int maxX = world.getMax_X();
int minX = world.getMin_X();
int maxY = world.getMax_Y();
int minY = world.getMin_Y();
//cout << "maxX:" << maxX << ", minX:" << minX << ", maxY:" << maxY << ", minY:" << minY << endl;
world.make_2D_World();
int* field = world.getField();
int humanN;// = 10;
int zombieN;// = 5;
input_agent_data(humanN, zombieN);
human agent(humanN, minX, maxX, minY, maxY);
zombie zombies(zombieN, minX, maxX, minY, maxY, field);
printf("maxX:%d, minX:%d, maxY:%d, minY:%d \n", maxX,minX,maxY,minY);//check
printf("human=%d, zombie=%d\n", humanN, zombieN);//check
cout << "--------------------------" << endl;
cout << "Simulation starts!" << endl;
/*
cout << "initial" << endl;
for (int i=0; i<humanN; ++i){
cout << "x[" << i << "]=" << agent.getX(i) << ", ";
cout << "y[" << i << "]=" << agent.getY(i) << endl;
}
*/
//make_output_coordinates(agent.get_pointer_x(0), agent.get_pointer_y(0), N);
const int step_count = 100;
for(int i=0; i<step_count; ++i){
cout << "step " << (i+1) << endl;
//int death_counter = 0;
agent.step(zombies, field, minX, maxX, minY, maxY, humanN, zombieN);
zombies.step(zombieN, minX, maxX, minY, maxY, field);
}
cout << "fin" << endl;
cout << "------------------------------" << endl;
cout << "<result>" << endl;
printf("maxX:%d, minX:%d, maxY:%d, minY:%d \n", maxX,minX,maxY,minY);
cout << "the sum of agents=" << (humanN+zombieN) << endl;
cout << "simulation steps=" << step_count << endl;
cout << "left human=" << humanN << endl;
cout << "zombie=" << zombieN << endl;
/*
for (int i=0; i<humanN; ++i){
cout << "x[" << i << "]=" << agent.getX(i) << ", ";
cout << "y[" << i << "]=" << agent.getY(i) << endl;
}
*/
//output_agent_info(agent.get_pointer_x(0), agent.get_pointer_y(0), agent.get_pointer_infect(0), N);
agent.output_human_info(humanN);
zombies.output_zombie_info(zombieN);
cout << "Simulation end!" << endl;
}
void input_agent_data(int& humanN, int& zombieN){
string str, human, zombie;
ifstream ifs("agent.props");
getline(ifs, str);
getline(ifs, str);
human = str;
getline(ifs, str);
getline(ifs, str);
zombie = str;
/**/
stringstream ss;
ss << human;
ss >> humanN;
ss.clear();
ss.str("");
ss << zombie;
ss >> zombieN;
ss.clear();
ss.str("");
}
|
69e5c3d38813abb5795fbc972a4e4b232c29e211.cu
|
#include <stdio.h>
#include "human.cuh"
#include "zombie.cuh"
#include <cutil.h>
#include "../../world/worldCreator.cuh"
#include "../../agent/output.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
using namespace std;
void run(void);
void input_agent_data(int& humanN, int& zombieN);
int main(void){
CUDA_SAFE_CALL(cudaSetDevice(0));
float elapsed_time_ms = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
run();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Run time from start to end : %f [sec]\n", elapsed_time_ms/1000);
return 0;
}
void run(void){
worldCreator<int> world;
int maxX = world.getMax_X();
int minX = world.getMin_X();
int maxY = world.getMax_Y();
int minY = world.getMin_Y();
//cout << "maxX:" << maxX << ", minX:" << minX << ", maxY:" << maxY << ", minY:" << minY << endl;
world.make_2D_World();
int* field = world.getField();
int humanN;// = 10;
int zombieN;// = 5;
input_agent_data(humanN, zombieN);
human agent(humanN, minX, maxX, minY, maxY);
zombie zombies(zombieN, minX, maxX, minY, maxY, field);
printf("maxX:%d, minX:%d, maxY:%d, minY:%d \n", maxX,minX,maxY,minY);//check
printf("human=%d, zombie=%d\n", humanN, zombieN);//check
cout << "--------------------------" << endl;
cout << "Simulation starts!" << endl;
/*
cout << "initial" << endl;
for (int i=0; i<humanN; ++i){
cout << "x[" << i << "]=" << agent.getX(i) << ", ";
cout << "y[" << i << "]=" << agent.getY(i) << endl;
}
*/
//make_output_coordinates(agent.get_pointer_x(0), agent.get_pointer_y(0), N);
const int step_count = 100;
for(int i=0; i<step_count; ++i){
cout << "step " << (i+1) << endl;
//int death_counter = 0;
agent.step(zombies, field, minX, maxX, minY, maxY, humanN, zombieN);
zombies.step(zombieN, minX, maxX, minY, maxY, field);
}
cout << "fin" << endl;
cout << "------------------------------" << endl;
cout << "<result>" << endl;
printf("maxX:%d, minX:%d, maxY:%d, minY:%d \n", maxX,minX,maxY,minY);
cout << "the sum of agents=" << (humanN+zombieN) << endl;
cout << "simulation steps=" << step_count << endl;
cout << "left human=" << humanN << endl;
cout << "zombie=" << zombieN << endl;
/*
for (int i=0; i<humanN; ++i){
cout << "x[" << i << "]=" << agent.getX(i) << ", ";
cout << "y[" << i << "]=" << agent.getY(i) << endl;
}
*/
//output_agent_info(agent.get_pointer_x(0), agent.get_pointer_y(0), agent.get_pointer_infect(0), N);
agent.output_human_info(humanN);
zombies.output_zombie_info(zombieN);
cout << "Simulation end!" << endl;
}
void input_agent_data(int& humanN, int& zombieN){
string str, human, zombie;
ifstream ifs("agent.props");
getline(ifs, str);
getline(ifs, str);
human = str;
getline(ifs, str);
getline(ifs, str);
zombie = str;
/*文字列データを数値へ変換*/
stringstream ss;
ss << human;
ss >> humanN;
ss.clear();
ss.str("");
ss << zombie;
ss >> zombieN;
ss.clear();
ss.str("");
}
|
883f10428716ca41ce7b02396a109606f32113a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Fprop1(const float* in, const float* syn1, float* layer1)
{
int i = threadIdx.x; //256
int j = blockDim.y*blockIdx.y + threadIdx.y; //64
int k = blockIdx.x; //Data.count
atomicAdd(&layer1[256*k + i], in[64*k + j] * syn1[j*256 + i]);
}
|
883f10428716ca41ce7b02396a109606f32113a7.cu
|
#include "includes.h"
__global__ void Fprop1(const float* in, const float* syn1, float* layer1)
{
int i = threadIdx.x; //256
int j = blockDim.y*blockIdx.y + threadIdx.y; //64
int k = blockIdx.x; //Data.count
atomicAdd(&layer1[256*k + i], in[64*k + j] * syn1[j*256 + i]);
}
|
8c7105caeeee311d492530f3e3c7bb40f4dfd0e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <d3d_cudaBase/cudaCommon.h>
#include <iostream>
namespace D3D_CUDA
{
namespace CudaDepthIntegratorMultiClassDeviceCode
{
void initTile(int xRes, int yRes, int zRes, int numClasses, float** voxelsD)
{
const int numBytes = xRes*yRes*zRes*numClasses*sizeof(float);
D3D_CUDA_CHECKED_CALL(hipMalloc(voxelsD, numBytes);)
D3D_CUDA_CHECKED_CALL(hipMemset(*voxelsD, 0, numBytes);)
}
void releaseDeviceMemory(void *addr)
{
D3D_CUDA_CHECKED_CALL(hipFree(addr);)
}
void allocateAndUploadTransformation(float* transformationH, float** transformationD)
{
D3D_CUDA_CHECKED_CALL(hipMalloc(transformationD, 4*4*sizeof(float));)
D3D_CUDA_CHECKED_CALL(hipMemcpy(*transformationD, transformationH, 4*4*sizeof(float), hipMemcpyHostToDevice);)
}
void allocateAndUploadProjection(float* projectionH, float** projectionD)
{
D3D_CUDA_CHECKED_CALL(hipMalloc(projectionD, 3*4*sizeof(float));)
D3D_CUDA_CHECKED_CALL(hipMemcpy(*projectionD, projectionH, 3*4*sizeof(float), hipMemcpyHostToDevice);)
}
void allocateAndUploadDepthData(float* depthDataH, int rows, int cols, float** depthDataD)
{
D3D_CUDA_CHECKED_CALL(hipMalloc(depthDataD, rows*cols*sizeof(float));)
D3D_CUDA_CHECKED_CALL(hipMemcpy(*depthDataD, depthDataH, rows*cols*sizeof(float), hipMemcpyHostToDevice);)
}
void allocateAndUploadClassScoresData(float* classScoresDataH, int rows, int cols, int numClasses, float** classScoresDataD)
{
D3D_CUDA_CHECKED_CALL(hipMalloc(classScoresDataD, rows*cols*numClasses*sizeof(float)); )
D3D_CUDA_CHECKED_CALL(hipMemcpy(*classScoresDataD, classScoresDataH, rows*cols*numClasses*sizeof(float), hipMemcpyHostToDevice);)
}
void downloadTile(int xRes, int yRes, int zRes, float* voxelsD, float* voxelsH)
{
const int numBytes = xRes*yRes*zRes*sizeof(float);
D3D_CUDA_CHECKED_CALL(hipMemcpy(voxelsH, voxelsD, numBytes, hipMemcpyDeviceToHost);)
}
__device__ void project(float* projMat, float* point, float* projPoint)
{
projPoint[0] = projMat[0]*point[0] + projMat[1]*point[1] + projMat[2]*point[2] + projMat[3];
projPoint[1] = projMat[4]*point[0] + projMat[5]*point[1] + projMat[6]*point[2] + projMat[7];
projPoint[2] = projMat[8]*point[0] + projMat[9]*point[1] + projMat[10]*point[2] + projMat[11];
}
__device__ void transform(float* transformMat, float* point, float* transfPoint)
{
transfPoint[0] = transformMat[0]*point[0] + transformMat[1]*point[1] + transformMat[2]*point[2] + transformMat[3];
transfPoint[1] = transformMat[4]*point[0] + transformMat[5]*point[1] + transformMat[6]*point[2] + transformMat[7];
transfPoint[2] = transformMat[8]*point[0] + transformMat[9]*point[1] + transformMat[10]*point[2] + transformMat[11];
}
__global__ void integrationKernel(float* voxelD, const float* boxToGlobalD,
int xRes, int yRes, int zRes,
int numClasses, int freeSpaceClass,
float xBegin, float yBegin, float zBegin,
float deltaX, float deltaY, float deltaZ,
const float* classScoresDataD, const float* depthDataD, int rows, int cols,
float maxDepth, float minDepth,
float epsilon, float eta, float uncertFact, float rho,
const float* projD, float centerX, float centerY, float centerZ, float beta, float skyWeight)
{
__shared__ float proj[12];
__shared__ float boxToGlobal[16];
if (threadIdx.x < 12)
{
proj[threadIdx.x] = projD[threadIdx.x];
}
if (threadIdx.x < 16)
{
boxToGlobal[threadIdx.x] = boxToGlobalD[threadIdx.x];
}
__syncthreads();
const int x = blockIdx.x;
const int y = blockIdx.y;
const int z = threadIdx.x;
// voxel center in box coordinate frame
float voxCenterPoint[3];
voxCenterPoint[0] = xBegin + x*deltaX;
voxCenterPoint[1] = yBegin + y*deltaY;
voxCenterPoint[2] = zBegin + z*deltaZ;
const float voxelDiameter = sqrtf(deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ);
float voxCenterPointG[3];
transform(boxToGlobal, voxCenterPoint, voxCenterPointG);
float dir[3];
dir[0] = voxCenterPointG[0]-centerX;
dir[1] = voxCenterPointG[1]-centerY;
dir[2] = voxCenterPointG[2]-centerZ;
float maxComp = max(abs(dir[0]), max(abs(dir[1]), abs(dir[2])));
dir[0] = dir[0]/maxComp;
dir[1] = dir[1]/maxComp;
dir[2] = dir[2]/maxComp;
float directionWeight = sqrt(dir[0]*dir[0] + dir[1]*dir[1] + dir[2]*dir[2]);
float voxCenterProj[3];
project(proj, voxCenterPointG, voxCenterProj);
if (voxCenterProj[2] >= minDepth && voxCenterProj[2] <= maxDepth)
{
// perspective division
int xp,yp;
xp = round(voxCenterProj[0]/voxCenterProj[2]);
yp = round(voxCenterProj[1]/voxCenterProj[2]);
// test if inside image
if (xp >= 0 && xp < cols && yp >= 0 && yp < rows)
{
const int depthMapIdx = yp*cols + xp;
float depth = depthDataD[depthMapIdx];
// debug
// int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// voxelD[voxInd + freeSpaceClass] += 1;
// voxelD[voxInd] += 2;
// all transition approach
int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// float bestScore = classScoresDataD[depthMapIdx*numClasses];
// int bestClass = 0;
// for (int i = 1; i < numClasses; i++)
// {
// if (classScoresDataD[depthMapIdx*numClasses + i] > bestScore)
// {
// bestScore = classScoresDataD[depthMapIdx*numClasses + i];
// bestClass = i;
// }
// }
// if (bestClass < 3)
// {
int bestLabel = -1;
//int worstLabel = -1;
float bestScore = -1e6;
float secondBestScore = -1e6;
//float worstScore = 1e6;
float averageSolidScore = 0;
for (int c = 0; c < numClasses; c++)
{
if (classScoresDataD[depthMapIdx*numClasses + c] > bestScore)
{
secondBestScore = bestScore;
bestScore = classScoresDataD[depthMapIdx*numClasses + c];
bestLabel = c;
}
else if (classScoresDataD[depthMapIdx*numClasses + c] > secondBestScore)
{
secondBestScore = classScoresDataD[depthMapIdx*numClasses + c];
}
//if (c != freeSpaceClass && classScoresDataD[depthMapIdx*numClasses + c] < worstScore)
//{
//worstScore = classScoresDataD[depthMapIdx*numClasses + c];
//worstLabel = c;
//}
if (c != freeSpaceClass)
{
averageSolidScore += classScoresDataD[depthMapIdx*numClasses + c]*1.0/(numClasses - 1);
}
}
if (depth > 0)
{
float dist = voxCenterProj[2] - depth;
// float distToNextDisparityPlane = max(((depth*depth)*uncertFact), epsilon);
// float distForClassWeight = max(((depth*depth)*uncertFact), epsilon*2.0f/3.0f);
float distToNextDisparityPlane = epsilon;
float distForClassWeight1 = epsilon*1.0f/2.0f;
float distForClassWeight2 = epsilon;
float distForClassWeight3 = epsilon*1.0f/4.0f;
if (dist < distToNextDisparityPlane)
{
// inside band
if (fabs(dist) < distToNextDisparityPlane)
{
if (dist < 0)
{
const float weight = directionWeight*epsilon/distToNextDisparityPlane;
// in front of surface
for (int i = 0; i < numClasses; i++)
{
// if (i == 4)
// continue;
if (i != freeSpaceClass)
{
voxelD[voxInd + i] += beta*weight;
}
}
}
else
{
const float weight = directionWeight*epsilon/distToNextDisparityPlane;
// behind surface
for (int i = 0; i < numClasses; i++)
{
if (i != freeSpaceClass)
{
voxelD[voxInd + i] -= beta*weight;
}
// check if it is the right voxel for dist1
if (distForClassWeight1 < dist + voxelDiameter && dist < distForClassWeight1)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight1 < distN + voxelDiameter && distN < distForClassWeight1)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 0.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 0.0f*rho*averageSolidScore;
}
}
}
// check if it is the right voxel for dist2
if (distForClassWeight2 < dist + voxelDiameter && dist < distForClassWeight2)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight2 < distN + voxelDiameter && distN < distForClassWeight2)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 1.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 1.0f*rho*averageSolidScore;
}
}
}
// check if it is the right voxel for dist3
if (distForClassWeight3 < dist + voxelDiameter && dist < distForClassWeight3)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight3 < distN + voxelDiameter && distN < distForClassWeight3)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 0.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 0.0f*rho*averageSolidScore;
}
}
}
}
}
// viewing ray
}
else
{
float weight = directionWeight*eta*epsilon/distToNextDisparityPlane;
voxelD[voxInd+freeSpaceClass] -= beta*weight;
}
// else
// {
// const float weightInBand = epsilon/distToNextDisparityPlane;
// const float distBehindNextPlane = dist - distToNextDisparityPlane;
// const float weight = weightInBand*exp(-(distBehindNextPlane)/(epsilon*0.5));
// // far behind lets do an exponential decay to get this whole thing working
// for (int i = 0; i < numClasses; i++)
// {
// if (i != freeSpaceClass)
// {
// voxelD[voxInd + i] -= weight;
// }
// }
// }
}
}
// this is done even if we have a depth for the pixel
{
// enter weights for free space originating from sky
// if (exp(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass]) > 0.95)
// {
if( bestLabel == freeSpaceClass)
{
// find second best class
// float secondBestScore = -1e6;
// for (int c = 0; c < numClasses; c++)
// {
// if (c != freeSpaceClass && classScoresDataD[depthMapIdx+numClasses + c] > secondBestScore)
// {
// secondBestScore = classScoresDataD[depthMapIdx+numClasses + c];
// }
// }
voxelD[voxInd+freeSpaceClass] -= directionWeight*rho*skyWeight*(bestScore - secondBestScore);
}
// float sky_weight = max(0.0f, classScoresDataD[depthMapIdx*numClasses + freeSpaceClass]);
// voxelD[voxInd+freeSpaceClass] -= (50.0f)/((xRes + yRes + zRes)) * directionWeight * rho *sky_weight;
// }
}
// }
//// only free space / occupied space transition approach
// if (depth > 0)
// {ls c
// float dist = voxCenterProj[2] - depth;
// float distToNextDisparityPlane = max(((depth*depth)*uncertFact), epsilon);
// int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// if (dist < distToNextDisparityPlane)
// {
// // inside band
// if (fabs(dist) < distToNextDisparityPlane)
// {
// if (dist < 0)
// {
// const float weight = epsilon/distToNextDisparityPlane;
// voxelD[voxInd + freeSpaceClass] -= weight;
//// for (int i = 0; i < numClasses; i++)
//// {
//// if (i != freeSpaceClass)
//// {
//// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
//// }
//// }
// }
// else
// {
// // behind surface
// const float weight = epsilon/distToNextDisparityPlane;
// voxelD[voxInd + freeSpaceClass] += weight;
// for (int i = 0; i < numClasses; i++)
// {
// if (i != freeSpaceClass)
// {
// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
// }
// }
// }
// // viewing ray
// }
// else
// {
// float weight = eta*epsilon/distToNextDisparityPlane;
// voxelD[voxInd+freeSpaceClass] -= weight;
//// for (int i = 0; i < numClasses; i++)
//// {
//// if (i != freeSpaceClass)
//// {
//// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
//// }
//// }
// }
// }
// }
}
}
}
void integrateDepthMap(float* voxelD, const float* boxToGlobalD,
int xRes, int yRes, int zRes,
int numClasses, int freeSpaceClass,
float xBegin, float yBegin, float zBegin,
float deltaX, float deltaY, float deltaZ,
const float* classScoresDataD, const float* depthDataD, int rows, int cols,
float maxDepth, float minDepth,
float epsilon, float eta, float uncertFact, float rho,
const float* projD, float centerX, float centerY, float centerZ, float beta, float skyWeight)
{
dim3 dimGrid(xRes, yRes);
dim3 dimBlock(zRes);
std::cout << "Eta = " << eta << std::endl;
hipLaunchKernelGGL(( integrationKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, voxelD, boxToGlobalD,
xRes, yRes, zRes,
numClasses, freeSpaceClass,
xBegin, yBegin, zBegin,
deltaX, deltaY, deltaZ,
classScoresDataD, depthDataD, rows, cols,
maxDepth, minDepth,
epsilon, eta, uncertFact, rho,
projD, centerX, centerY, centerZ, beta, skyWeight);
D3D_CUDA_CHECK_ERROR
}
}
}
|
8c7105caeeee311d492530f3e3c7bb40f4dfd0e1.cu
|
#include <d3d_cudaBase/cudaCommon.h>
#include <iostream>
namespace D3D_CUDA
{
namespace CudaDepthIntegratorMultiClassDeviceCode
{
void initTile(int xRes, int yRes, int zRes, int numClasses, float** voxelsD)
{
const int numBytes = xRes*yRes*zRes*numClasses*sizeof(float);
D3D_CUDA_CHECKED_CALL(cudaMalloc(voxelsD, numBytes);)
D3D_CUDA_CHECKED_CALL(cudaMemset(*voxelsD, 0, numBytes);)
}
void releaseDeviceMemory(void *addr)
{
D3D_CUDA_CHECKED_CALL(cudaFree(addr);)
}
void allocateAndUploadTransformation(float* transformationH, float** transformationD)
{
D3D_CUDA_CHECKED_CALL(cudaMalloc(transformationD, 4*4*sizeof(float));)
D3D_CUDA_CHECKED_CALL(cudaMemcpy(*transformationD, transformationH, 4*4*sizeof(float), cudaMemcpyHostToDevice);)
}
void allocateAndUploadProjection(float* projectionH, float** projectionD)
{
D3D_CUDA_CHECKED_CALL(cudaMalloc(projectionD, 3*4*sizeof(float));)
D3D_CUDA_CHECKED_CALL(cudaMemcpy(*projectionD, projectionH, 3*4*sizeof(float), cudaMemcpyHostToDevice);)
}
void allocateAndUploadDepthData(float* depthDataH, int rows, int cols, float** depthDataD)
{
D3D_CUDA_CHECKED_CALL(cudaMalloc(depthDataD, rows*cols*sizeof(float));)
D3D_CUDA_CHECKED_CALL(cudaMemcpy(*depthDataD, depthDataH, rows*cols*sizeof(float), cudaMemcpyHostToDevice);)
}
void allocateAndUploadClassScoresData(float* classScoresDataH, int rows, int cols, int numClasses, float** classScoresDataD)
{
D3D_CUDA_CHECKED_CALL(cudaMalloc(classScoresDataD, rows*cols*numClasses*sizeof(float)); )
D3D_CUDA_CHECKED_CALL(cudaMemcpy(*classScoresDataD, classScoresDataH, rows*cols*numClasses*sizeof(float), cudaMemcpyHostToDevice);)
}
void downloadTile(int xRes, int yRes, int zRes, float* voxelsD, float* voxelsH)
{
const int numBytes = xRes*yRes*zRes*sizeof(float);
D3D_CUDA_CHECKED_CALL(cudaMemcpy(voxelsH, voxelsD, numBytes, cudaMemcpyDeviceToHost);)
}
__device__ void project(float* projMat, float* point, float* projPoint)
{
projPoint[0] = projMat[0]*point[0] + projMat[1]*point[1] + projMat[2]*point[2] + projMat[3];
projPoint[1] = projMat[4]*point[0] + projMat[5]*point[1] + projMat[6]*point[2] + projMat[7];
projPoint[2] = projMat[8]*point[0] + projMat[9]*point[1] + projMat[10]*point[2] + projMat[11];
}
__device__ void transform(float* transformMat, float* point, float* transfPoint)
{
transfPoint[0] = transformMat[0]*point[0] + transformMat[1]*point[1] + transformMat[2]*point[2] + transformMat[3];
transfPoint[1] = transformMat[4]*point[0] + transformMat[5]*point[1] + transformMat[6]*point[2] + transformMat[7];
transfPoint[2] = transformMat[8]*point[0] + transformMat[9]*point[1] + transformMat[10]*point[2] + transformMat[11];
}
__global__ void integrationKernel(float* voxelD, const float* boxToGlobalD,
int xRes, int yRes, int zRes,
int numClasses, int freeSpaceClass,
float xBegin, float yBegin, float zBegin,
float deltaX, float deltaY, float deltaZ,
const float* classScoresDataD, const float* depthDataD, int rows, int cols,
float maxDepth, float minDepth,
float epsilon, float eta, float uncertFact, float rho,
const float* projD, float centerX, float centerY, float centerZ, float beta, float skyWeight)
{
__shared__ float proj[12];
__shared__ float boxToGlobal[16];
if (threadIdx.x < 12)
{
proj[threadIdx.x] = projD[threadIdx.x];
}
if (threadIdx.x < 16)
{
boxToGlobal[threadIdx.x] = boxToGlobalD[threadIdx.x];
}
__syncthreads();
const int x = blockIdx.x;
const int y = blockIdx.y;
const int z = threadIdx.x;
// voxel center in box coordinate frame
float voxCenterPoint[3];
voxCenterPoint[0] = xBegin + x*deltaX;
voxCenterPoint[1] = yBegin + y*deltaY;
voxCenterPoint[2] = zBegin + z*deltaZ;
const float voxelDiameter = sqrtf(deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ);
float voxCenterPointG[3];
transform(boxToGlobal, voxCenterPoint, voxCenterPointG);
float dir[3];
dir[0] = voxCenterPointG[0]-centerX;
dir[1] = voxCenterPointG[1]-centerY;
dir[2] = voxCenterPointG[2]-centerZ;
float maxComp = max(abs(dir[0]), max(abs(dir[1]), abs(dir[2])));
dir[0] = dir[0]/maxComp;
dir[1] = dir[1]/maxComp;
dir[2] = dir[2]/maxComp;
float directionWeight = sqrt(dir[0]*dir[0] + dir[1]*dir[1] + dir[2]*dir[2]);
float voxCenterProj[3];
project(proj, voxCenterPointG, voxCenterProj);
if (voxCenterProj[2] >= minDepth && voxCenterProj[2] <= maxDepth)
{
// perspective division
int xp,yp;
xp = round(voxCenterProj[0]/voxCenterProj[2]);
yp = round(voxCenterProj[1]/voxCenterProj[2]);
// test if inside image
if (xp >= 0 && xp < cols && yp >= 0 && yp < rows)
{
const int depthMapIdx = yp*cols + xp;
float depth = depthDataD[depthMapIdx];
// debug
// int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// voxelD[voxInd + freeSpaceClass] += 1;
// voxelD[voxInd] += 2;
// all transition approach
int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// float bestScore = classScoresDataD[depthMapIdx*numClasses];
// int bestClass = 0;
// for (int i = 1; i < numClasses; i++)
// {
// if (classScoresDataD[depthMapIdx*numClasses + i] > bestScore)
// {
// bestScore = classScoresDataD[depthMapIdx*numClasses + i];
// bestClass = i;
// }
// }
// if (bestClass < 3)
// {
int bestLabel = -1;
//int worstLabel = -1;
float bestScore = -1e6;
float secondBestScore = -1e6;
//float worstScore = 1e6;
float averageSolidScore = 0;
for (int c = 0; c < numClasses; c++)
{
if (classScoresDataD[depthMapIdx*numClasses + c] > bestScore)
{
secondBestScore = bestScore;
bestScore = classScoresDataD[depthMapIdx*numClasses + c];
bestLabel = c;
}
else if (classScoresDataD[depthMapIdx*numClasses + c] > secondBestScore)
{
secondBestScore = classScoresDataD[depthMapIdx*numClasses + c];
}
//if (c != freeSpaceClass && classScoresDataD[depthMapIdx*numClasses + c] < worstScore)
//{
//worstScore = classScoresDataD[depthMapIdx*numClasses + c];
//worstLabel = c;
//}
if (c != freeSpaceClass)
{
averageSolidScore += classScoresDataD[depthMapIdx*numClasses + c]*1.0/(numClasses - 1);
}
}
if (depth > 0)
{
float dist = voxCenterProj[2] - depth;
// float distToNextDisparityPlane = max(((depth*depth)*uncertFact), epsilon);
// float distForClassWeight = max(((depth*depth)*uncertFact), epsilon*2.0f/3.0f);
float distToNextDisparityPlane = epsilon;
float distForClassWeight1 = epsilon*1.0f/2.0f;
float distForClassWeight2 = epsilon;
float distForClassWeight3 = epsilon*1.0f/4.0f;
if (dist < distToNextDisparityPlane)
{
// inside band
if (fabs(dist) < distToNextDisparityPlane)
{
if (dist < 0)
{
const float weight = directionWeight*epsilon/distToNextDisparityPlane;
// in front of surface
for (int i = 0; i < numClasses; i++)
{
// if (i == 4)
// continue;
if (i != freeSpaceClass)
{
voxelD[voxInd + i] += beta*weight;
}
}
}
else
{
const float weight = directionWeight*epsilon/distToNextDisparityPlane;
// behind surface
for (int i = 0; i < numClasses; i++)
{
if (i != freeSpaceClass)
{
voxelD[voxInd + i] -= beta*weight;
}
// check if it is the right voxel for dist1
if (distForClassWeight1 < dist + voxelDiameter && dist < distForClassWeight1)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight1 < distN + voxelDiameter && distN < distForClassWeight1)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 0.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 0.0f*rho*averageSolidScore;
}
}
}
// check if it is the right voxel for dist2
if (distForClassWeight2 < dist + voxelDiameter && dist < distForClassWeight2)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight2 < distN + voxelDiameter && distN < distForClassWeight2)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 1.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 1.0f*rho*averageSolidScore;
}
}
}
// check if it is the right voxel for dist3
if (distForClassWeight3 < dist + voxelDiameter && dist < distForClassWeight3)
{
// check if any of the neighboring voxels would be in the range too,
// if so this one is only used if it is further from the camera
float voxCenterPointNextG[3];
float voxCenterPointNext[3];
float voxCenterNextProj[3];
bool useThis = true;
for (int wx = -1; wx <= 1; wx++)
for (int wy = -1; wy <= 1; wy++)
for (int wz = -1; wz <= 1; wz++)
{
voxCenterPointNext[0] = xBegin + (x+wx)*deltaX;
voxCenterPointNext[1] = yBegin + (y+wy)*deltaY;
voxCenterPointNext[2] = zBegin + (z+wz)*deltaZ;
transform(boxToGlobal, voxCenterPointNext, voxCenterPointNextG);
project(proj, voxCenterPointNextG, voxCenterNextProj);
int xpN = round(voxCenterNextProj[0]/voxCenterNextProj[2]);
int ypN = round(voxCenterNextProj[1]/voxCenterNextProj[2]);
if (xpN == xp && ypN == yp)
{
float distN = voxCenterNextProj[2] - depth;
if (distN > dist && distForClassWeight3 < distN + voxelDiameter && distN < distForClassWeight3)
{
useThis = false;
}
}
}
if (useThis)
{
if (i != freeSpaceClass)
{
voxelD[voxInd+i] -= 0.0f*rho*(classScoresDataD[depthMapIdx*numClasses + i]);
}
else
{
voxelD[voxInd+i] += 0.0f*rho*averageSolidScore;
}
}
}
}
}
// viewing ray
}
else
{
float weight = directionWeight*eta*epsilon/distToNextDisparityPlane;
voxelD[voxInd+freeSpaceClass] -= beta*weight;
}
// else
// {
// const float weightInBand = epsilon/distToNextDisparityPlane;
// const float distBehindNextPlane = dist - distToNextDisparityPlane;
// const float weight = weightInBand*exp(-(distBehindNextPlane)/(epsilon*0.5));
// // far behind lets do an exponential decay to get this whole thing working
// for (int i = 0; i < numClasses; i++)
// {
// if (i != freeSpaceClass)
// {
// voxelD[voxInd + i] -= weight;
// }
// }
// }
}
}
// this is done even if we have a depth for the pixel
{
// enter weights for free space originating from sky
// if (exp(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass]) > 0.95)
// {
if( bestLabel == freeSpaceClass)
{
// find second best class
// float secondBestScore = -1e6;
// for (int c = 0; c < numClasses; c++)
// {
// if (c != freeSpaceClass && classScoresDataD[depthMapIdx+numClasses + c] > secondBestScore)
// {
// secondBestScore = classScoresDataD[depthMapIdx+numClasses + c];
// }
// }
voxelD[voxInd+freeSpaceClass] -= directionWeight*rho*skyWeight*(bestScore - secondBestScore);
}
// float sky_weight = max(0.0f, classScoresDataD[depthMapIdx*numClasses + freeSpaceClass]);
// voxelD[voxInd+freeSpaceClass] -= (50.0f)/((xRes + yRes + zRes)) * directionWeight * rho *sky_weight;
// }
}
// }
//// only free space / occupied space transition approach
// if (depth > 0)
// {ls c
// float dist = voxCenterProj[2] - depth;
// float distToNextDisparityPlane = max(((depth*depth)*uncertFact), epsilon);
// int voxInd = (z*xRes*yRes + y*xRes + x)*numClasses;
// if (dist < distToNextDisparityPlane)
// {
// // inside band
// if (fabs(dist) < distToNextDisparityPlane)
// {
// if (dist < 0)
// {
// const float weight = epsilon/distToNextDisparityPlane;
// voxelD[voxInd + freeSpaceClass] -= weight;
//// for (int i = 0; i < numClasses; i++)
//// {
//// if (i != freeSpaceClass)
//// {
//// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
//// }
//// }
// }
// else
// {
// // behind surface
// const float weight = epsilon/distToNextDisparityPlane;
// voxelD[voxInd + freeSpaceClass] += weight;
// for (int i = 0; i < numClasses; i++)
// {
// if (i != freeSpaceClass)
// {
// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
// }
// }
// }
// // viewing ray
// }
// else
// {
// float weight = eta*epsilon/distToNextDisparityPlane;
// voxelD[voxInd+freeSpaceClass] -= weight;
//// for (int i = 0; i < numClasses; i++)
//// {
//// if (i != freeSpaceClass)
//// {
//// voxelD[voxInd + i] += rho*(classScoresDataD[depthMapIdx*numClasses + freeSpaceClass] - classScoresDataD[depthMapIdx*numClasses + i]);
//// }
//// }
// }
// }
// }
}
}
}
void integrateDepthMap(float* voxelD, const float* boxToGlobalD,
int xRes, int yRes, int zRes,
int numClasses, int freeSpaceClass,
float xBegin, float yBegin, float zBegin,
float deltaX, float deltaY, float deltaZ,
const float* classScoresDataD, const float* depthDataD, int rows, int cols,
float maxDepth, float minDepth,
float epsilon, float eta, float uncertFact, float rho,
const float* projD, float centerX, float centerY, float centerZ, float beta, float skyWeight)
{
dim3 dimGrid(xRes, yRes);
dim3 dimBlock(zRes);
std::cout << "Eta = " << eta << std::endl;
integrationKernel<<<dimGrid,dimBlock>>>(voxelD, boxToGlobalD,
xRes, yRes, zRes,
numClasses, freeSpaceClass,
xBegin, yBegin, zBegin,
deltaX, deltaY, deltaZ,
classScoresDataD, depthDataD, rows, cols,
maxDepth, minDepth,
epsilon, eta, uncertFact, rho,
projD, centerX, centerY, centerZ, beta, skyWeight);
D3D_CUDA_CHECK_ERROR
}
}
}
|
23ea030635d798bf942d1b4540c7b9c4a3f5030f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
typedef struct
{
float posx;
float posy;
float range;
float temp;
}
heatsrc_t;
typedef struct
{
unsigned maxiter; // maximum number of iterations
unsigned resolution; // spatial resolution
int algorithm; // 0=>Jacobi, 1=>Gauss
unsigned visres; // visualization resolution
float *u, *uhelp;
float *uvis;
unsigned numsrcs; // number of heat sources
heatsrc_t *heatsrcs;
}
algoparam_t;
// function declarations
int read_input( FILE *infile, algoparam_t *param );
void print_params( algoparam_t *param );
int initialize( algoparam_t *param );
int finalize( algoparam_t *param );
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey );
int coarsen(float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy );
__global__ void gpu_Heat (float *h, float *g, float *d,int N);
__global__ void gpu_HeatReduction(float *res, float *res1);
#define NB 8
#define min(a,b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
for (int i=1; i<sizex-1; i++)
for (int j=1; j<sizey-1; j++) {
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
float cpu_jacobi (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex/nbx;
nby = NB;
by = sizey/nby;
for (int ii=0; ii<nbx; ii++)
for (int jj=0; jj<nby; jj++)
for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++)
for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) {
utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
void usage( char *s )
{
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
}
int main( int argc, char *argv[] ) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
// algorithmic parameters
algoparam_t param;
int np;
// check arguments
if( argc < 4 ) {
usage( argv[0] );
return 1;
}
// check input file
if( !(infile=fopen(argv[1], "r")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename="heat.ppm";
if( !(resfile=fopen(resfilename, "w")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
// check input
if( !read_input(infile, ¶m) )
{
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
int Grid_Dim, Block_Dim; // Grid and Block structure values
if (strcmp(argv[2], "-t")==0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np/Block_Dim + ((np%Block_Dim)!=0);;
if ((Block_Dim*Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
}
else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n");
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
// starting time
float elapsed_time_ms; // which is applicable for asynchronous code also
hipEvent_t start, stop; // using cuda events to measure time
hipEventCreate( &start ); // instrument code to measure start time
hipEventCreate( &stop );
hipEventRecord( start, 0 );
hipEventSynchronize( start );
iter = 0;
float residual;
while(1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float * tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
//residual = cpu_residual (param.u, param.uhelp, np, np);
//printf("residual: %.6f \n", residual);
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
hipEventRecord( stop, 0 ); // instrument code to measue end time
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsed_time_ms, start, stop );
// Flop count after iter iterations
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
finalize( ¶m );
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim);
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
// starting time
hipEventRecord( start, 0 );
hipEventSynchronize( start );
float *dev_u, *dev_uhelp, *dev_res, *res, *result, *dev_result;
res = (float*)calloc(sizeof(float), np*np);
result = (float*)calloc(sizeof(float), np);
// TODO: Allocation on GPU for matrices u and uhelp
hipMalloc( &dev_u, sizeof(float)*(np*np));
hipMalloc( &dev_uhelp, sizeof(float)*(np*np));
hipMalloc( &dev_res, sizeof(float)*(np*np));
hipMalloc( &dev_result, sizeof(float)*np);
// TODO: Copy initial values in u and uhelp from host to GPU
hipMemcpy( dev_u,param.u, sizeof(float)*(np*np), hipMemcpyHostToDevice);
hipMemcpy( dev_uhelp, param.uhelp, sizeof(float)*(np*np), hipMemcpyHostToDevice);
hipMemcpy( dev_res, res, sizeof(float)*(np*np), hipMemcpyHostToDevice);
hipMemcpy( dev_result, result, sizeof(float)*(np), hipMemcpyHostToDevice);
iter = 0;
while(1) {
hipLaunchKernelGGL(( gpu_Heat), dim3(Grid),dim3(Block), 0, 0, dev_u, dev_uhelp, dev_res, np);
hipDeviceSynchronize(); // wait for all threads to complete
// TODO: residual is computed on host, we need to get from GPU values computed in u and uhelp
hipMemcpy( res, dev_res, sizeof(float)*(np*np), hipMemcpyDeviceToHost);
//for(int i=0;i<np;i++) { printf("%.6f ", res[i*2]); }
hipLaunchKernelGGL(( gpu_HeatReduction), dim3(np),dim3(np),np*sizeof(float), 0, dev_res, dev_result);
hipDeviceSynchronize();
hipMemcpy( result, dev_result, sizeof(float)*np, hipMemcpyDeviceToHost);
//hipMemcpy( param.uhelp, dev_uhelp, sizeof(float)*(np*np), hipMemcpyDeviceToHost);
//residual = cpu_residual(param.u, param.uhelp, np, np);
float * tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
float sum =0.0;
for(int i=0;i<np;i++) {
// printf("Result[%d]=%.6f\n",i,result[i]);
sum += result[i];
}
residual = sum;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
// TODO: get result matrix from GPU
hipMemcpy( param.u, dev_u, sizeof(float)*(np*np), hipMemcpyDeviceToHost);
// TODO: free memory used in GPU
hipFree( dev_u ); hipFree( dev_uhelp);
hipEventRecord( stop, 0 ); // instrument code to measue end time
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsed_time_ms, start, stop );
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
hipEventDestroy(start);
hipEventDestroy(stop);
// for plot...
coarsen( param.u, np, np,
param.uvis, param.visres+2, param.visres+2 );
write_image( resfile, param.uvis,
param.visres+2,
param.visres+2 );
finalize( ¶m );
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize( algoparam_t *param )
{
int i, j;
float dist;
// total number of points (including border)
const int np = param->resolution + 2;
//
// allocate memory
//
(param->u) = (float*)calloc( sizeof(float),np*np );
(param->uhelp) = (float*)calloc( sizeof(float),np*np );
(param->uvis) = (float*)calloc( sizeof(float),
(param->visres+2) *
(param->visres+2) );
if( !(param->u) || !(param->uhelp) || !(param->uvis) )
{
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for( i=0; i<param->numsrcs; i++ )
{
/* top row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[j] +=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* bottom row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(1-param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[(np-1)*np+j]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* leftmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* rightmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(1-param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np+(np-1) ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
}
// Copy u into uhelp
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for( j=0; j<np; j++ )
for( i=0; i<np; i++ )
*putmp++ = *pu++;
return 1;
}
/*
* free used memory
*/
int finalize( algoparam_t *param )
{
if( param->u ) {
free(param->u);
param->u = 0;
}
if( param->uhelp ) {
free(param->uhelp);
param->uhelp = 0;
}
if( param->uvis ) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey )
{
// RGB table
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j=1023;
// prepare RGB table
for( i=0; i<256; i++ )
{
r[j]=255; g[j]=i; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=255-i; g[j]=255; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255; b[j]=i;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255-i; b[j]=255;
j--;
}
min=DBL_MAX;
max=-DBL_MAX;
// find minimum and maximum
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
if( u[i*sizex+j]>max )
max=u[i*sizex+j];
if( u[i*sizex+j]<min )
min=u[i*sizex+j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
k=(int)(1023.0*(u[i*sizex+j]-min)/(max-min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen( float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy )
{
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx>newx)
stepx=oldx/newx;
else {
stepx=1;
stopx=oldx;
}
if (oldy>newy)
stepy=oldy/newy;
else {
stepy=1;
stopy=oldy;
}
// NOTE: this only takes the top-left corner,
// and doesnt' do any real coarsening
for( i=0; i<stopy-1; i++ )
{
for( j=0; j<stopx-1; j++ )
{
unew[i*newx+j]=uold[i*oldx*stepy+j*stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input( FILE *infile, algoparam_t *param )
{
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->maxiter) );
if( n!=1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->resolution) );
if( n!=1 )
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs) );
if( n!=1 )
return 0;
(param->heatsrcs) =
(heatsrc_t*) malloc( sizeof(heatsrc_t) * (param->numsrcs) );
for( i=0; i<param->numsrcs; i++ )
{
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%f %f %f %f",
&(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy),
&(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp) );
if( n!=4 )
return 0;
}
return 1;
}
void print_params( algoparam_t *param )
{
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for( i=0; i<param->numsrcs; i++ )
{
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n",
i+1,
param->heatsrcs[i].posx,
param->heatsrcs[i].posy,
param->heatsrcs[i].range,
param->heatsrcs[i].temp );
}
}
|
23ea030635d798bf942d1b4540c7b9c4a3f5030f.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
typedef struct
{
float posx;
float posy;
float range;
float temp;
}
heatsrc_t;
typedef struct
{
unsigned maxiter; // maximum number of iterations
unsigned resolution; // spatial resolution
int algorithm; // 0=>Jacobi, 1=>Gauss
unsigned visres; // visualization resolution
float *u, *uhelp;
float *uvis;
unsigned numsrcs; // number of heat sources
heatsrc_t *heatsrcs;
}
algoparam_t;
// function declarations
int read_input( FILE *infile, algoparam_t *param );
void print_params( algoparam_t *param );
int initialize( algoparam_t *param );
int finalize( algoparam_t *param );
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey );
int coarsen(float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy );
__global__ void gpu_Heat (float *h, float *g, float *d,int N);
__global__ void gpu_HeatReduction(float *res, float *res1);
#define NB 8
#define min(a,b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
for (int i=1; i<sizex-1; i++)
for (int j=1; j<sizey-1; j++) {
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
float cpu_jacobi (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex/nbx;
nby = NB;
by = sizey/nby;
for (int ii=0; ii<nbx; ii++)
for (int jj=0; jj<nby; jj++)
for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++)
for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) {
utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
void usage( char *s )
{
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
}
int main( int argc, char *argv[] ) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
// algorithmic parameters
algoparam_t param;
int np;
// check arguments
if( argc < 4 ) {
usage( argv[0] );
return 1;
}
// check input file
if( !(infile=fopen(argv[1], "r")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename="heat.ppm";
if( !(resfile=fopen(resfilename, "w")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
// check input
if( !read_input(infile, ¶m) )
{
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
int Grid_Dim, Block_Dim; // Grid and Block structure values
if (strcmp(argv[2], "-t")==0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np/Block_Dim + ((np%Block_Dim)!=0);;
if ((Block_Dim*Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
}
else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n");
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
// starting time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaEvent_t start, stop; // using cuda events to measure time
cudaEventCreate( &start ); // instrument code to measure start time
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
cudaEventSynchronize( start );
iter = 0;
float residual;
while(1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float * tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
//residual = cpu_residual (param.u, param.uhelp, np, np);
//printf("residual: %.6f \n", residual);
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
// Flop count after iter iterations
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
finalize( ¶m );
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim);
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
// starting time
cudaEventRecord( start, 0 );
cudaEventSynchronize( start );
float *dev_u, *dev_uhelp, *dev_res, *res, *result, *dev_result;
res = (float*)calloc(sizeof(float), np*np);
result = (float*)calloc(sizeof(float), np);
// TODO: Allocation on GPU for matrices u and uhelp
cudaMalloc( &dev_u, sizeof(float)*(np*np));
cudaMalloc( &dev_uhelp, sizeof(float)*(np*np));
cudaMalloc( &dev_res, sizeof(float)*(np*np));
cudaMalloc( &dev_result, sizeof(float)*np);
// TODO: Copy initial values in u and uhelp from host to GPU
cudaMemcpy( dev_u,param.u, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_uhelp, param.uhelp, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_res, res, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_result, result, sizeof(float)*(np), cudaMemcpyHostToDevice);
iter = 0;
while(1) {
gpu_Heat<<<Grid,Block>>>(dev_u, dev_uhelp, dev_res, np);
cudaThreadSynchronize(); // wait for all threads to complete
// TODO: residual is computed on host, we need to get from GPU values computed in u and uhelp
cudaMemcpy( res, dev_res, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
//for(int i=0;i<np;i++) { printf("%.6f ", res[i*2]); }
gpu_HeatReduction<<<np,np,np*sizeof(float)>>>(dev_res, dev_result);
cudaThreadSynchronize();
cudaMemcpy( result, dev_result, sizeof(float)*np, cudaMemcpyDeviceToHost);
//cudaMemcpy( param.uhelp, dev_uhelp, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
//residual = cpu_residual(param.u, param.uhelp, np, np);
float * tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
float sum =0.0;
for(int i=0;i<np;i++) {
// printf("Result[%d]=%.6f\n",i,result[i]);
sum += result[i];
}
residual = sum;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
// TODO: get result matrix from GPU
cudaMemcpy( param.u, dev_u, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
// TODO: free memory used in GPU
cudaFree( dev_u ); cudaFree( dev_uhelp);
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// for plot...
coarsen( param.u, np, np,
param.uvis, param.visres+2, param.visres+2 );
write_image( resfile, param.uvis,
param.visres+2,
param.visres+2 );
finalize( ¶m );
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize( algoparam_t *param )
{
int i, j;
float dist;
// total number of points (including border)
const int np = param->resolution + 2;
//
// allocate memory
//
(param->u) = (float*)calloc( sizeof(float),np*np );
(param->uhelp) = (float*)calloc( sizeof(float),np*np );
(param->uvis) = (float*)calloc( sizeof(float),
(param->visres+2) *
(param->visres+2) );
if( !(param->u) || !(param->uhelp) || !(param->uvis) )
{
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for( i=0; i<param->numsrcs; i++ )
{
/* top row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[j] +=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* bottom row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(1-param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[(np-1)*np+j]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* leftmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* rightmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(1-param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np+(np-1) ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
}
// Copy u into uhelp
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for( j=0; j<np; j++ )
for( i=0; i<np; i++ )
*putmp++ = *pu++;
return 1;
}
/*
* free used memory
*/
int finalize( algoparam_t *param )
{
if( param->u ) {
free(param->u);
param->u = 0;
}
if( param->uhelp ) {
free(param->uhelp);
param->uhelp = 0;
}
if( param->uvis ) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey )
{
// RGB table
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j=1023;
// prepare RGB table
for( i=0; i<256; i++ )
{
r[j]=255; g[j]=i; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=255-i; g[j]=255; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255; b[j]=i;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255-i; b[j]=255;
j--;
}
min=DBL_MAX;
max=-DBL_MAX;
// find minimum and maximum
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
if( u[i*sizex+j]>max )
max=u[i*sizex+j];
if( u[i*sizex+j]<min )
min=u[i*sizex+j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
k=(int)(1023.0*(u[i*sizex+j]-min)/(max-min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen( float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy )
{
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx>newx)
stepx=oldx/newx;
else {
stepx=1;
stopx=oldx;
}
if (oldy>newy)
stepy=oldy/newy;
else {
stepy=1;
stopy=oldy;
}
// NOTE: this only takes the top-left corner,
// and doesnt' do any real coarsening
for( i=0; i<stopy-1; i++ )
{
for( j=0; j<stopx-1; j++ )
{
unew[i*newx+j]=uold[i*oldx*stepy+j*stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input( FILE *infile, algoparam_t *param )
{
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->maxiter) );
if( n!=1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->resolution) );
if( n!=1 )
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs) );
if( n!=1 )
return 0;
(param->heatsrcs) =
(heatsrc_t*) malloc( sizeof(heatsrc_t) * (param->numsrcs) );
for( i=0; i<param->numsrcs; i++ )
{
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%f %f %f %f",
&(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy),
&(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp) );
if( n!=4 )
return 0;
}
return 1;
}
void print_params( algoparam_t *param )
{
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for( i=0; i<param->numsrcs; i++ )
{
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n",
i+1,
param->heatsrcs[i].posx,
param->heatsrcs[i].posy,
param->heatsrcs[i].range,
param->heatsrcs[i].temp );
}
}
|
236ae535e99b53960d1188d71624a78acf8168f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include <stdio.h>
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Simple 3D volume renderer.
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <math.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "v9volumeRender.h"
//add on 2019/4/19
#define MINLEN 0.0001
//add on 2019/4/19
//these pointers point to gpu.raw data copied from cpu.
//hipArray *d_aniMatrix = 0;
hipArray *d_normInten = 0;
hipArray *d_normGrad = 0;
hipArray *d_probVolume = 0;
//hipArray *d_colormap = 0;
//add on 2019/4/17.
hipArray *d_normNormalX = 0;
hipArray *d_normNormalY = 0;
hipArray *d_normNormalZ = 0;
//add on 2019/4/17.
//NOTE:
//(1)changed 'hipReadModeNormalizedFloat' to 'hipReadModeElementType',
//so that when use tex3D() to extract texture value, it returns VolumeType data whose range is the same as .raw;
//(2)'tex3DRawData.filterMode = hipFilterModeLinear' can only use for returned value being float-point, not for VolumeType;
//(3)'hipReadModeNormalizedFloat' makes .raw values (in 3D texture) are normalized to [0.0, 1.0]. Refer to http://blog.csdn.net/yanghangjun/article/details/5587269.
texture<float, 3, hipReadModeElementType> tex3D_normInten; //range: [0, 1].
texture<float, 3, hipReadModeElementType> tex3D_normGrad; //range: [0, 1].
texture<float, 3, hipReadModeElementType> tex3D_probVolume; //range: [0, n].
texture<float4, 1, hipReadModeElementType> tex1D_colormap;
texture<int, 2, hipReadModeElementType> tex2D_aniMatrix;
//add on 2019/4/17.
texture<float, 3, hipReadModeElementType> tex3D_normNormalX; //range: [0, 1].
texture<float, 3, hipReadModeElementType> tex3D_normNormalY; //range: [0, 1].
texture<float, 3, hipReadModeElementType> tex3D_normNormalZ; //range: [0, 1].
//add on 2019/4/17.
//add on 2020/3/30/
__device__ float4 lassoColor = {1.0f, 0.0f, 1.0f, 1.0f}; //opaque pink.
__device__ float4 bgColor = {1.0f, 1.0f, 1.0f, 0.0f}; //transparent white.
__device__ float4 contextColor = { 0.0f, 0.0f, 1.0f, 0.0f}; //transparent blue.
__device__ float4 tarFeatColor = { 1.0f, 0.0f, 0.0f, 0.0f}; //transparent red.
//__device__ int2 initialSeedPos = {1, 1};
//add on 2020/3/30
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; //inverse view matrix.
struct Ray
{
float3 o; //origin.
float3 d; //direction.
};
// intersect ray with a box.
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__ float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__ float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) | (uint(rgba.y * 255) << 8) | uint(rgba.x * 255);
}
extern "C" void freeCudaBuffers()
{
checkCudaErrors(hipFreeArray(d_normInten));
checkCudaErrors(hipFreeArray(d_normGrad));
checkCudaErrors(hipFreeArray(d_probVolume));
//checkCudaErrors(hipFreeArray(d_colormap));
}
extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
extern "C" void initializeCuda(hipExtent volumeSize,
void *normInten, void *normGrad, void *probVolume,
void *normNormalX, void *normNormalY, void *normNormalZ)
{
//channel.
hipChannelFormatDesc float_ChannelDesc = hipCreateChannelDesc<float>();
//hipChannelFormatDesc float4_ChannelDesc = hipCreateChannelDesc<float4>();
//cudaMemcpy3DParms.
hipMemcpy3DParms copyParams = { 0 };
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
//1.1 create a 3D array on gpu, pointed by 'd_normInten'.
hipMalloc3DArray(&d_normInten, &float_ChannelDesc, volumeSize);
//1.2 copy cpu .raw data (pointed by 'normalizedIntensity') to this gpu 3D array (pointed by 'd_normalizedIntensity').
copyParams.srcPtr = make_hipPitchedPtr(normInten, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normInten;
hipMemcpy3D(©Params);
//1.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normInten.normalized = true; //access with normalized texture coordinates [0.0, 1.0].
tex3D_normInten.filterMode = hipFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normInten.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_normInten.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_normInten, d_normInten, float_ChannelDesc);
//2.1 create a 3D array on gpu, pointed by 'd_normalizedGrad'.
hipMalloc3DArray(&d_normGrad, &float_ChannelDesc, volumeSize);
//2.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//hipMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_hipPitchedPtr(normGrad, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normGrad;
hipMemcpy3D(©Params);
//2.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normGrad.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normGrad.filterMode = hipFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normGrad.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_normGrad.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_normGrad, d_normGrad, float_ChannelDesc);
//3.1 create a 3D array on gpu, pointed by 'd_probVolume'.
hipMalloc3DArray(&d_probVolume, &float_ChannelDesc, volumeSize);
//3.2 copy cpu .raw data (pointed by 'probVolume') to this gpu 3D array (pointed by 'd_probVolume').
//hipMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_hipPitchedPtr(probVolume, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height); //copyParams_resultOfSelected3DComponent.srcPtr = make_hipPitchedPtr(resultOfSelected3DComponent, volumeSize.width * sizeof(unsigned char), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_probVolume;
hipMemcpy3D(©Params);
//3.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_probVolume.normalized = true; //access with normalized texture coordinates [0.0, 1.0].
tex3D_probVolume.filterMode = hipFilterModeLinear; //linear interpolation can only use with float-point. //tex3D_resultOfSelected3DComponent.filterMode = hipFilterModePoint;
tex3D_probVolume.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_probVolume.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_probVolume, d_probVolume, float_ChannelDesc);
/*
//4.1 create a 1D array on gpu, pointed by 'd_colormap'.
hipMallocArray(&d_colormap, &float4_ChannelDesc, numOfRows_colormap, 1);
//4.2 copy cpu .raw colormap (pointed by 'colormap') to this gpu 1D array.
hipMemcpyToArray(d_colormap, 0, 0, colormap, sizeof(float4) * numOfRows_colormap, hipMemcpyHostToDevice);
//4.3 (1)set texture parameters for a gpu 1D texture;
//(2)bind the 1D texture with above 1D colormap array, so that we can use tex1D(transferTex, x) to obtain the x-indexed RGBA color.
tex1D_colormap.normalized = true;
tex1D_colormap.filterMode = hipFilterModeLinear;
tex1D_colormap.addressMode[0] = hipAddressModeClamp;
//bind above 1D colormap array to this 1D texture.
hipBindTextureToArray(tex1D_colormap, d_colormap, float4_ChannelDesc);
*/
//add on 2019/4/17.
//5.1 create a 3D array on gpu, pointed by 'd_normNormalX'.
hipMalloc3DArray(&d_normNormalX, &float_ChannelDesc, volumeSize);
//5.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//hipMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_hipPitchedPtr(normNormalX, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalX;
hipMemcpy3D(©Params);
//5.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalX.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalX.filterMode = hipFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normNormalX.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_normNormalX.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_normNormalX, d_normNormalX, float_ChannelDesc);
//6.1 create a 3D array on gpu, pointed by 'd_normNormalY'.
hipMalloc3DArray(&d_normNormalY, &float_ChannelDesc, volumeSize);
//6.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//hipMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_hipPitchedPtr(normNormalY, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalY;
hipMemcpy3D(©Params);
//6.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalY.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalY.filterMode = hipFilterModeLinear; // linear interpolation can only use with float-point.
tex3D_normNormalY.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_normNormalY.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_normNormalY, d_normNormalY, float_ChannelDesc);
//7.1 create a 3D array on gpu, pointed by 'd_normNormalZ'.
hipMalloc3DArray(&d_normNormalZ, &float_ChannelDesc, volumeSize);
//7.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//hipMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_hipPitchedPtr(normNormalZ, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalZ;
hipMemcpy3D(©Params);
//7.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalZ.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalZ.filterMode = hipFilterModeLinear; // linear interpolation can only use with float-point.
tex3D_normNormalZ.addressMode[0] = hipAddressModeClamp; //clamp texture coordinates
tex3D_normNormalZ.addressMode[1] = hipAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
hipBindTextureToArray(tex3D_normNormalZ, d_normNormalZ, float_ChannelDesc);
//add on 2019/4/17.
}
//add on 2020/3/30.
__device__ void clamp(float4 *sampleCol)
{
if (sampleCol->x < 0.0f)
sampleCol->x = 0.0f;
else if (sampleCol->x > 1.0f)
sampleCol->x = 1.0f;
if (sampleCol->y < 0.0f)
sampleCol->y = 0.0f;
else if (sampleCol->y > 1.0f)
sampleCol->y = 1.0f;
if (sampleCol->z < 0.0f)
sampleCol->z = 0.0f;
else if (sampleCol->z > 1.0f)
sampleCol->z = 1.0f;
if (sampleCol->w < 0.0f)
sampleCol->w = 0.0f;
else if (sampleCol->w > 1.0f)
sampleCol->w = 1.0f;
}
//add on 2020/3/30.
//launch winWidth * winHeight threads.
//volume ray casting algorithm.
__global__ void d_render(int winWidth, int winHeight, hipExtent volumeSize, int maxVolumeDim,
int totAniFrames, int *d_aniMatrix, int theta, float3 lightPos, bool contextOpen,
uint *d_output)
{
//thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
const int maxSteps = 500; //max number of samples along a ray.
const float tstep = 0.01f; //sampling distance between 2 samples along a ray.
const float opacityThreshold = 0.95f;
//both boxMin and boxMax ensure the final image displays in correct dimension along 3 directions.
const float3 boxMin = make_float3(-1.0f * volumeSize.width / maxVolumeDim,
-1.0f * volumeSize.height / maxVolumeDim,
-1.0f * volumeSize.depth / maxVolumeDim);
const float3 boxMax = make_float3(1.0f * volumeSize.width / maxVolumeDim,
1.0f * volumeSize.height / maxVolumeDim,
1.0f * volumeSize.depth / maxVolumeDim);
//d_ouputpixel(x, y)
//d_outputpixel(x, y)[-1.0, 1.0](u, v).
float u = (x / (float)winWidth) * 2.0f - 1.0f; //u range: [-1.0, 1.0].
float v = (y / (float)winHeight) * 2.0f - 1.0f; //v range: [-1.0, 1.0].
//calculate eye ray in world space.
Ray eyeRay;
//eyeRay origin: eyeRay.o = (0, 0, 4).
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
//eyeRay direction.
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
//find intersection with box.
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (hit) //eyeRaybox.
{
if (tnear < 0.0f) tnear = 0.0f; //clamp to near plane.
//march along ray from front to back, accumulating color.
float4 color_xy = bgColor;
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d * tnear; //pos range: [-1, -1, -1] - [1, 1, 1].
float3 step = eyeRay.d * tstep;
for (int i = 0; i < maxSteps; i++)
{
//(1)pos(x, y, z) sample point normInten + normGrad + probVolume + numOfEnhancedFrames + x/y/z normals.
//sample_normInten range: [0, 1].
float sample_normInten = tex3D(tex3D_normInten,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_normGrad range: [0, 1].
float sample_normGrad = tex3D(tex3D_normGrad,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_probVolume range: [0, 1].
float sample_probVolume = tex3D(tex3D_probVolume,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_numOfEnhancedFrames range: [0, 16].
//int sample_numOfEnhancedFrames = round(sample_probVolume * totAniFrames); //ceil(sample_probVolume * totAniFrames);
//sample_normNormal.x range: [0, 1];
//sample_normNormal.y range: [0, 1];
//sample_normNormal.z range : [0, 1].
float3 sample_normNormal;
sample_normNormal.x = tex3D(tex3D_normNormalX,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
sample_normNormal.y = tex3D(tex3D_normNormalY,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
sample_normNormal.z = tex3D(tex3D_normNormalZ,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//(2)pos(x, y, z) sample point RGBA color sampleCol, according to contextOpen (true or false).
//(2.1)pos(x, y, z) sample point RGBA sampleCol without shading.
float opacityScaleFactor = 1.5;
float enhFactor = 900;
float4 sampleCol = { 0.0f, 0.0f, 0.0f, 0.0f };
switch (contextOpen)
{
case true: //both context + tarFeat.
if (sample_probVolume == 0) //context.
{
sampleCol = contextColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
}
else if (sample_probVolume > 0) //tarfet feature.
{
sampleCol = tarFeatColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
sampleCol.w *= (1.0f + log(sample_probVolume + 1) * enhFactor);
}
break;
case false: //tarFeat.
if (sample_probVolume > 0)
{
sampleCol = tarFeatColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
sampleCol.w *= (1.0f + log(sample_probVolume + 1) * enhFactor);
}
break;
}
//(2.2)add shading to pos(x, y, z) sample point RGBA sampleCol.
//compute light direction for pos(x, y, z).
float3 lightDir;
lightDir.x = lightPos.x - pos.x;
lightDir.y = lightPos.y - pos.y;
lightDir.z = lightPos.z - pos.z;
//normalize lightDir.
float len_lightDir = sqrt(lightDir.x * lightDir.x + lightDir.y * lightDir.y + lightDir.z * lightDir.z);
if (len_lightDir < MINLEN)
{
lightDir.x = 0;
lightDir.y = 0;
lightDir.z = 0;
}
else
{
lightDir.x /= len_lightDir;
lightDir.y /= len_lightDir;
lightDir.z /= len_lightDir;
}
//compute diffuse lighting.
float diffuseFactor = 10;
float diffuse = sample_normNormal.x * lightDir.x + sample_normNormal.y * lightDir.y + sample_normNormal.z * lightDir.z;
//add diffuse lighting to sampleCol.
sampleCol.x += diffuse * diffuseFactor;
sampleCol.y += diffuse * diffuseFactor;
sampleCol.z += diffuse * diffuseFactor;
//(2.3)clamp sampleCol to be [0, 1].
clamp(&sampleCol);
/*
//(3)pos(x, y, z) sample point enhanced RGB(optional) + oapcity.
float enhFactor = 9000000; //30;
float4 sampleCol_enhanced = sampleCol_default;
//(3.1)1(rule-enhanced paper): pos(x, y, z) sample point probVolume, sample point enhanced opacity Oe(v).
sampleCol_enhanced.w = sampleCol_default.w * (1.0f + log(sample_probVolume + 1) * enhFactor);
//end 1.
*/
/*
//(3.2)2(, 1): pos(x, y, z) sample point probVolume, sample point new RGB + enhanced opacity Oe(v).
//(3.2.1)pos(x, y, z) sample point new RGB.
if (sample_probVolume > 0)
{
//specify to be red.
sampleCol_enhanced.x = 1.0f;
sampleCol_enhanced.y = 0.0f;
sampleCol_enhanced.z = 0.0f;
}
//(3.2.2)pos(x, y, z) sample point enhanced opacity Oe(v).
sampleCol_enhanced.w = sampleCol_default.w * (1 + log(sample_probVolume + 1) * enhFactor);
//end 2.
*/
/*
//(3.3)clamp pos(x, y, z) sample point enhanced opacity Oe(v) to be [0, 1].
if (sampleCol_enhanced.w < 0.0f)
sampleCol_enhanced.w = 0.0f;
else if (sampleCol_enhanced.w > 1.0f)
sampleCol_enhanced.w = 1.0f;
*/
/*
//add on 2019/4/13.
//(4)pos(x, y, z) sample point removed opacity.
float4 sampleCol_removed = sampleCol_default;
//(4.1)sample point sample_probVolume, sampleCol.removed.w.
//sampleprobVolume, sampleCol.removed.w;
//sample_probVolume, sampleCol.removed.w.
//(Note: (a)remFactor = 0, sample_removed.w = sample_default.w (target feature, remFactor = 0);
//(b)remFactor, ).
float remFactor = 0; //90000000;
sampleCol_removed.w = sampleCol_default.w * (1.0f - log(sample_probVolume + 1) * remFactor);
//end2.
//(4.2)clamp sampleCol_removed.w to be [0, 1].
if (sampleCol_removed.w < 0.0f)
sampleCol_removed.w = 0.0f;
else if (sampleCol_removed.w > 1.0f)
sampleCol_removed.w = 1.0f;
//(5)d_aniMatrix(theta - 1, sample_numOfEnhancedFrames), thetasampleCol_thetaFrame.
float4 sampleCol_thetaFrame;
int enhancedOrRemovedValue = d_aniMatrix[(theta - 1) * (totAniFrames + 1) + sample_numOfEnhancedFrames];
if (enhancedOrRemovedValue == 1)
{
//thetasampleColor_thetaFrame = sampleColor_enhanced.
sampleCol_thetaFrame = sampleCol_enhanced;
}
else if (enhancedOrRemovedValue == 0)
{
//thetasampleColor_thetaFrame = sampleColor_removed.
sampleCol_thetaFrame = sampleCol_removed;
}
//add on 2019/4/13.
*/
//(6)accumulate pos(x, y, z) sample point sampleCol to be color_xy.
//pre-multiply alpha.
sampleCol.x *= sampleCol.w;
sampleCol.y *= sampleCol.w;
sampleCol.z *= sampleCol.w;
//"over" operator for front-to-back blending.
//color_xy = color_xy + sampleCol * (1.0f - color_xy.w);
color_xy = sampleCol + color_xy * (1.0f - sampleCol.w); //refer to https://stackoverflow.com/questions/39555050/how-to-do-the-blending-in-volume-rendering-using-glsl
//exit early if opaque.
if (color_xy.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
//write each [x, y]'s output color to corresponding pixel location in pbo.
d_output[x + y * winWidth] = rgbaFloatToInt(color_xy);
//printf("col = %u.\n", d_output[x + y * winWidth]);
}//end if intersection.
//add on 2020/4/2.
else //if not intersection with data.
{
d_output[x + y * winWidth] = rgbaFloatToInt(bgColor);
//printf("white = %u.\n", rgbaFloatToInt(bgColor));
}
//add on 2020/4/2.
}//end if.
}
/*
//add on 2020.4.1.
__global__ void drawLasso(int winWidth, int winHeight, int windowTitleBarHeight, int ox, int oy,
uint *d_output)
{
//here is correct 4 corners of window.
//d_output[0 + 0 * winWidth] = rgbaFloatToInt({ 1.0, 0.0, 0.0, 1.0 }); //red.
// d_output[0 + 495 * winWidth] = rgbaFloatToInt({ 0.0, 1.0, 0.0, 1.0 }); //green.
// d_output[495 + 0 * winWidth] = rgbaFloatToInt({ 1.0, 0.0, 1.0, 1.0 }); //pink.
// d_output[495 + 495 * winWidth] = rgbaFloatToInt({ 1.0, 1.0, 0.0, 1.0 }); //yellow.
//draw lasso on d_output.
d_output[ox + (winHeight - windowTitleBarHeight - oy) * winWidth] = rgbaFloatToInt(lassoColor);
}
*/
/*
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight, unsigned char *dev_lassoLabelImg, unsigned char *dev_bgImg,
uint *d_output)
{
//thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
//1. copy d_output lasso edge to dev_lassoLabelImg (lasso edge = 1).
if (d_output[x + y * winWidth] == rgbaFloatToInt(lassoColor))
{
dev_lassoLabelImg[x + y * winWidth] = 1;
}
//2. region-growingdev_lassoLabelImg background (background == 64),
//by using one thread.
if ((x == 0) && (y == 0))
{
backgroundGen(winWidth, winHeight, dev_lassoLabelImg, dev_bgImg, 64, 0, initialSeedPos);
}
//3. lasso pixels = background .
}// end if.
}
//add on 2020.4.1.
*/
extern "C" void render_kernel(int winWidth, int winHeight, dim3 blockSize, dim3 gridSize, hipExtent volumeSize, int maxVolumeDim,
int totAniFrames, int *d_aniMatrix, int theta,
float3 lightPos, bool contextOpen,
uint *d_output)
{
/*
if (buttonState == 1) //mouse left button is moving while pressed.
{
//1threadd_outputlasso.
drawLasso << <1, 1 >> >(winWidth, winHeight, windowTitleBarHeight, ox, oy, d_output);
}
else if (buttonState == 2) //mouse left button is released.
{
refineProbVolume << <gridSize, blockSize >> >(winWidth, winHeight, dev_lassoLabelImg, dev_bgImg,
d_output);
printf("left released.\n");
}
else
{
*/
//render image (pointed by d_output).
d_render << <gridSize, blockSize >> > (winWidth, winHeight, volumeSize, maxVolumeDim, totAniFrames, d_aniMatrix, theta, lightPos,
contextOpen, d_output);
//}
}
/*
//add on 2020/4/6.
//region-growingbackground pixels = 64.
__global__ void backgroundGen(int winWidth, int winHeight, uint *d_output2,
unsigned char *dev_lassoBackgroundImg, int2 initialSeedPos, int grayValue_background, int increment)
{
dev_lassoBackgroundImg[initialSeedPos.x + initialSeedPos.y * winWidth] = grayValue_background;
//int initialSeedIntensity = d_output2[initialSeedPos.x + initialSeedPos.y * winWidth];
//int minValue_Seed = initialSeedIntensity - increment;
//int maxValue_Seed = initialSeedIntensity + increment;
int oldseeds = 1;
int newseeds = 0;
while (newseeds != oldseeds)
{
oldseeds = newseeds;
newseeds = 0;
for (int y = 1; y < (winHeight - 1); y++) //i = y;
for (int x = 1; x < (winWidth - 1); x++) //x = j.
{
if (dev_lassoBackgroundImg[x + y * winWidth] > 0) //indicate dev_lassoBackgroundImg(x, y) is a seed.
{
//(1)find this seed's upper neighbor.
int intensity = d_output2[x + (y - 1) * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[x + (y - 1) * winWidth] = grayValue_background;
}
//(2)find this seed's lower neighbor.
intensity = d_output2[x + (y + 1) * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[x + (y + 1) * winWidth] = grayValue_background;
}
//(3)find this seed's left neighbor.
intensity = d_output2[(x - 1) + y * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[(x - 1) + y * winWidth] = grayValue_background;
}
//(4)find this seed's right neighbor.
intensity = d_output2[(x + 1) + y * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[(x + 1) + y * winWidth] = grayValue_background;
}
} //end if.
} //end for.
} //end while.
}
*/
/*
extern "C" void lassoBackgroundGen(int winWidth, int winHeight, uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_background, int increment)
{
backgroundGen << <1, 1 >> >(winWidth, winHeight, d_output2,
dev_lassoEdgeImg, dev_lassoBackgroundImg,
initialSeedPos, grayValue_background, increment);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
{
printf("lassoBackgroundGen launch failed with error \"%s\".\n", hipGetErrorString(cudaerr));
}
else
{
printf("lassoBackgroundGen launch successfully.\n");
}
}
*/
/*
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight,
uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_lassoBackground, int increment,
int cx, int cy)
{
//thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
//1. generate dev_lassoEdgeImg (lasso edge = 1), according to d_output2 lasso.
if (d_output2[x + y * winWidth] == rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }))
{
printf("wtf.\n");
dev_lassoEdgeImg[x + y * winWidth] = 1;
}
else
{
//printf("%u.\n", d_output2[x + y * winWidth]);
}
//2. region-growingdev_lassoBackgroundImg (lasso background = 64).
if ((x == 0) && (y == 0))
{
printf("pink = %u.\n", rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }));
//printf("white = %u.\n", rgbaFloatToInt(bgColor));
printf("col = %u.\n", d_output2[cx + (winHeight - cy - 9) * winWidth]);
}
} //end if.
}
extern "C" void getLassoPixels(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize,
uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_lassoBackground, int increment,
int cx, int cy)
{
//printf("gridSize: %d, %d, %d; blockSize: %d, %d, %d.\n", gridSize.x, gridSize.y, gridSize.z, blockSize.x, blockSize.y, blockSize.z);
refineProbVolume << <gridSize, blockSize>> >(winWidth, winHeight,
d_output2,
dev_lassoEdgeImg, dev_lassoBackgroundImg,
initialSeedPos, grayValue_lassoBackground, increment,
cx, cy);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
{
printf("refineProbVolume launch failed with error \"%s\".\n", hipGetErrorString(cudaerr));
}
}
//launch winWidth * winHeight threads.
__global__ void lassoBackgroundGen(int winWidth, int winHeight, uint *d_output2)
{
//thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
if (d_output2[x + y * winWidth] == rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }))
{
printf("yes.\n");
}
}//end if.
}
extern "C" void proc(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize, uint *d_output2)
{
lassoBackgroundGen << <gridSize, blockSize>> >(winWidth, winHeight, d_output2);
}
*/
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight, hipExtent volumeSize, int maxVolumeDim,
void *probVolume, unsigned char *dev_lassoBackgroundImg)
{
//thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
if (dev_lassoBackgroundImg[x + y * winWidth] == 0) //pixel(x, y)=lasso pixel.
{
//1lasso pixel(x, y):
//1. pixel(x, y)pixel(u, v).
float u = (x / (float)winWidth) * 2.0f - 1.0f; //u range: [-1.0, 1.0].
float v = (y / (float)winHeight) * 2.0f - 1.0f; //v range: [-1.0, 1.0].
//2. calculate eye ray in world space.
Ray eyeRay;
//eyeRay origin: eyeRay.o = (0, 0, 4).
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
//eyeRay direction.
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
const int maxSteps = 500; //max number of samples along a ray.
const float tstep = 0.01f; //sampling distance between 2 samples along a ray.
//both boxMin and boxMax ensure the final image displays in correct dimension along 3 directions.
const float3 boxMin = make_float3(-1.0f * volumeSize.width / maxVolumeDim,
-1.0f * volumeSize.height / maxVolumeDim,
-1.0f * volumeSize.depth / maxVolumeDim);
const float3 boxMax = make_float3(1.0f * volumeSize.width / maxVolumeDim,
1.0f * volumeSize.height / maxVolumeDim,
1.0f * volumeSize.depth / maxVolumeDim);
//3. find intersection with box.
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (hit)
{
if (tnear < 0.0f) tnear = 0.0f; //clamp to near plane.
//march along ray from front to back, accumulating color.
float4 color_xy = bgColor;
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d * tnear; //pos range: [-0.5, -1, -1] - [0.5, 1, 1].
printf("pos.x = %f, pos.y = %f, pos.z = %f.\n", pos.x, pos.y, pos.z);
float3 step = eyeRay.d * tstep;
for (int i = 0; i < maxSteps; i++)
{
//sampling point pos(x, y, z):
//(i)pos(x, y, z)(x/y/z range: [-1, 1])
//pos(x, y, z)(x/y/z range: [0, volumeSize.width/height/depth]).
pos.x = pos.x * float(maxVolumeDim) / float(volumeSize.width) * (float(volumeSize.width) / 2) + float(volumeSize.width)/2,
pos.y = pos.y * float(maxVolumeDim) / float(volumeSize.height) * (float(volumeSize.height) / 2) + (float(volumeSize.height)/2),
pos.z = pos.z * float(maxVolumeDim) / float(volumeSize.depth) * (float(volumeSize.depth) / 2) + (float(volumeSize.depth) / 2);
if (pos.x == volumeSize.width)
pos.x = volumeSize.width - 1;
if (pos.y == volumeSize.height)
pos.y = volumeSize.height - 1;
if (pos.z == volumeSize.depth)
pos.z = volumeSize.depth - 1;
//(ii)set sampling point pos(x, y, z)'s probVolume = 0.
//((float*)probVolume)[int(pos.x) + int(pos.y) * volumeSize.width + pos.z * volumeSize.width * volumeSize.height] = 0;
t += tstep;
if (t > tfar) break;
pos += step;
}
}//end if intersection.
}
}//end if.
}
extern "C" void inParallelRefineProbVolume(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize,
hipExtent volumeSize, int maxVolumeDim,
void *probVolume, unsigned char *dev_lassoBackgroundImg)
{
refineProbVolume << <gridSize, blockSize >> >(winWidth, winHeight, volumeSize, maxVolumeDim,
probVolume, dev_lassoBackgroundImg);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
{
printf("refineProbVolume failed with error \"%s\".\n", hipGetErrorString(cudaerr));
}
}
//add on 2020/4/6.
#endif
|
236ae535e99b53960d1188d71624a78acf8168f5.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include <stdio.h>
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Simple 3D volume renderer.
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <math.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "v9volumeRender.h"
//add on 2019/4/19
#define MINLEN 0.0001
//add on 2019/4/19
//these pointers point to gpu上的.raw data copied from cpu.
//cudaArray *d_aniMatrix = 0;
cudaArray *d_normInten = 0;
cudaArray *d_normGrad = 0;
cudaArray *d_probVolume = 0;
//cudaArray *d_colormap = 0;
//add on 2019/4/17.
cudaArray *d_normNormalX = 0;
cudaArray *d_normNormalY = 0;
cudaArray *d_normNormalZ = 0;
//add on 2019/4/17.
//NOTE:
//(1)changed 'cudaReadModeNormalizedFloat' to 'cudaReadModeElementType',
//so that when use tex3D() to extract texture value, it returns VolumeType data whose range is the same as .raw;
//(2)'tex3DRawData.filterMode = cudaFilterModeLinear' can only use for returned value being float-point, not for VolumeType;
//(3)'cudaReadModeNormalizedFloat' makes .raw values (in 3D texture) are normalized to [0.0, 1.0]. Refer to http://blog.csdn.net/yanghangjun/article/details/5587269.
texture<float, 3, cudaReadModeElementType> tex3D_normInten; //range: [0, 1].
texture<float, 3, cudaReadModeElementType> tex3D_normGrad; //range: [0, 1].
texture<float, 3, cudaReadModeElementType> tex3D_probVolume; //range: [0, n].
texture<float4, 1, cudaReadModeElementType> tex1D_colormap;
texture<int, 2, cudaReadModeElementType> tex2D_aniMatrix;
//add on 2019/4/17.
texture<float, 3, cudaReadModeElementType> tex3D_normNormalX; //range: [0, 1].
texture<float, 3, cudaReadModeElementType> tex3D_normNormalY; //range: [0, 1].
texture<float, 3, cudaReadModeElementType> tex3D_normNormalZ; //range: [0, 1].
//add on 2019/4/17.
//add on 2020/3/30/
__device__ float4 lassoColor = {1.0f, 0.0f, 1.0f, 1.0f}; //opaque pink.
__device__ float4 bgColor = {1.0f, 1.0f, 1.0f, 0.0f}; //transparent white.
__device__ float4 contextColor = { 0.0f, 0.0f, 1.0f, 0.0f}; //transparent blue.
__device__ float4 tarFeatColor = { 1.0f, 0.0f, 0.0f, 0.0f}; //transparent red.
//__device__ int2 initialSeedPos = {1, 1};
//add on 2020/3/30
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; //inverse view matrix.
struct Ray
{
float3 o; //origin.
float3 d; //direction.
};
// intersect ray with a box.
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__ float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__ float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) | (uint(rgba.y * 255) << 8) | uint(rgba.x * 255);
}
extern "C" void freeCudaBuffers()
{
checkCudaErrors(cudaFreeArray(d_normInten));
checkCudaErrors(cudaFreeArray(d_normGrad));
checkCudaErrors(cudaFreeArray(d_probVolume));
//checkCudaErrors(cudaFreeArray(d_colormap));
}
extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
extern "C" void initializeCuda(cudaExtent volumeSize,
void *normInten, void *normGrad, void *probVolume,
void *normNormalX, void *normNormalY, void *normNormalZ)
{
//定义channel描述符.
cudaChannelFormatDesc float_ChannelDesc = cudaCreateChannelDesc<float>();
//cudaChannelFormatDesc float4_ChannelDesc = cudaCreateChannelDesc<float4>();
//定义cudaMemcpy3DParms.
cudaMemcpy3DParms copyParams = { 0 };
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
//1.1 create a 3D array on gpu, pointed by 'd_normInten'.
cudaMalloc3DArray(&d_normInten, &float_ChannelDesc, volumeSize);
//1.2 copy cpu .raw data (pointed by 'normalizedIntensity') to this gpu 3D array (pointed by 'd_normalizedIntensity').
copyParams.srcPtr = make_cudaPitchedPtr(normInten, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normInten;
cudaMemcpy3D(©Params);
//1.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normInten.normalized = true; //access with normalized texture coordinates [0.0, 1.0].
tex3D_normInten.filterMode = cudaFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normInten.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_normInten.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_normInten, d_normInten, float_ChannelDesc);
//2.1 create a 3D array on gpu, pointed by 'd_normalizedGrad'.
cudaMalloc3DArray(&d_normGrad, &float_ChannelDesc, volumeSize);
//2.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//cudaMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_cudaPitchedPtr(normGrad, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normGrad;
cudaMemcpy3D(©Params);
//2.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normGrad.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normGrad.filterMode = cudaFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normGrad.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_normGrad.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_normGrad, d_normGrad, float_ChannelDesc);
//3.1 create a 3D array on gpu, pointed by 'd_probVolume'.
cudaMalloc3DArray(&d_probVolume, &float_ChannelDesc, volumeSize);
//3.2 copy cpu .raw data (pointed by 'probVolume') to this gpu 3D array (pointed by 'd_probVolume').
//cudaMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_cudaPitchedPtr(probVolume, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height); //copyParams_resultOfSelected3DComponent.srcPtr = make_cudaPitchedPtr(resultOfSelected3DComponent, volumeSize.width * sizeof(unsigned char), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_probVolume;
cudaMemcpy3D(©Params);
//3.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_probVolume.normalized = true; //access with normalized texture coordinates [0.0, 1.0].
tex3D_probVolume.filterMode = cudaFilterModeLinear; //linear interpolation can only use with float-point. //tex3D_resultOfSelected3DComponent.filterMode = cudaFilterModePoint;
tex3D_probVolume.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_probVolume.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_probVolume, d_probVolume, float_ChannelDesc);
/*
//4.1 create a 1D array on gpu, pointed by 'd_colormap'.
cudaMallocArray(&d_colormap, &float4_ChannelDesc, numOfRows_colormap, 1);
//4.2 copy cpu .raw colormap (pointed by 'colormap') to this gpu 1D array.
cudaMemcpyToArray(d_colormap, 0, 0, colormap, sizeof(float4) * numOfRows_colormap, cudaMemcpyHostToDevice);
//4.3 (1)set texture parameters for a gpu 1D texture;
//(2)bind the 1D texture with above 1D colormap array, so that we can use tex1D(transferTex, x) to obtain the x-indexed RGBA color.
tex1D_colormap.normalized = true;
tex1D_colormap.filterMode = cudaFilterModeLinear;
tex1D_colormap.addressMode[0] = cudaAddressModeClamp;
//bind above 1D colormap array to this 1D texture.
cudaBindTextureToArray(tex1D_colormap, d_colormap, float4_ChannelDesc);
*/
//add on 2019/4/17.
//5.1 create a 3D array on gpu, pointed by 'd_normNormalX'.
cudaMalloc3DArray(&d_normNormalX, &float_ChannelDesc, volumeSize);
//5.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//cudaMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_cudaPitchedPtr(normNormalX, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalX;
cudaMemcpy3D(©Params);
//5.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalX.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalX.filterMode = cudaFilterModeLinear; //linear interpolation can only use with float-point.
tex3D_normNormalX.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_normNormalX.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_normNormalX, d_normNormalX, float_ChannelDesc);
//6.1 create a 3D array on gpu, pointed by 'd_normNormalY'.
cudaMalloc3DArray(&d_normNormalY, &float_ChannelDesc, volumeSize);
//6.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//cudaMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_cudaPitchedPtr(normNormalY, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalY;
cudaMemcpy3D(©Params);
//6.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalY.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalY.filterMode = cudaFilterModeLinear; // linear interpolation can only use with float-point.
tex3D_normNormalY.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_normNormalY.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_normNormalY, d_normNormalY, float_ChannelDesc);
//7.1 create a 3D array on gpu, pointed by 'd_normNormalZ'.
cudaMalloc3DArray(&d_normNormalZ, &float_ChannelDesc, volumeSize);
//7.2 copy cpu .raw data (pointed by 'normalizedGrad') to this gpu 3D array (pointed by 'd_normalizedGrad').
//cudaMemcpy3DParms is CUDA 3D memory copying parameters.
copyParams.srcPtr = make_cudaPitchedPtr(normNormalZ, volumeSize.width * sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_normNormalZ;
cudaMemcpy3D(©Params);
//7.3 (1)set texture parameters for a gpu 3D texture;
//(2)bind this 3D texture with above 3D array, so that we can use tex3D(tex, x, y, z) to obtain .raw's (x, y, z) voxel.
tex3D_normNormalZ.normalized = true; //access with normalized texture coordinates [0.0,1.0].
tex3D_normNormalZ.filterMode = cudaFilterModeLinear; // linear interpolation can only use with float-point.
tex3D_normNormalZ.addressMode[0] = cudaAddressModeClamp; //clamp texture coordinates
tex3D_normNormalZ.addressMode[1] = cudaAddressModeClamp;
//bind above 3D array to this gpu 3D texture.
cudaBindTextureToArray(tex3D_normNormalZ, d_normNormalZ, float_ChannelDesc);
//add on 2019/4/17.
}
//add on 2020/3/30.
__device__ void clamp(float4 *sampleCol)
{
if (sampleCol->x < 0.0f)
sampleCol->x = 0.0f;
else if (sampleCol->x > 1.0f)
sampleCol->x = 1.0f;
if (sampleCol->y < 0.0f)
sampleCol->y = 0.0f;
else if (sampleCol->y > 1.0f)
sampleCol->y = 1.0f;
if (sampleCol->z < 0.0f)
sampleCol->z = 0.0f;
else if (sampleCol->z > 1.0f)
sampleCol->z = 1.0f;
if (sampleCol->w < 0.0f)
sampleCol->w = 0.0f;
else if (sampleCol->w > 1.0f)
sampleCol->w = 1.0f;
}
//add on 2020/3/30.
//launch winWidth * winHeight threads.
//volume ray casting algorithm.
__global__ void d_render(int winWidth, int winHeight, cudaExtent volumeSize, int maxVolumeDim,
int totAniFrames, int *d_aniMatrix, int theta, float3 lightPos, bool contextOpen,
uint *d_output)
{
//获得thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
const int maxSteps = 500; //max number of samples along a ray.
const float tstep = 0.01f; //sampling distance between 2 samples along a ray.
const float opacityThreshold = 0.95f;
//both boxMin and boxMax ensure the final image displays in correct dimension along 3 directions.
const float3 boxMin = make_float3(-1.0f * volumeSize.width / maxVolumeDim,
-1.0f * volumeSize.height / maxVolumeDim,
-1.0f * volumeSize.depth / maxVolumeDim);
const float3 boxMax = make_float3(1.0f * volumeSize.width / maxVolumeDim,
1.0f * volumeSize.height / maxVolumeDim,
1.0f * volumeSize.depth / maxVolumeDim);
//对于d_ouput上每个pixel(x, y)来说:
//将d_output上的pixel(x, y)映射为空间中范围为[-1.0, 1.0]的(u, v).
float u = (x / (float)winWidth) * 2.0f - 1.0f; //u range: [-1.0, 1.0].
float v = (y / (float)winHeight) * 2.0f - 1.0f; //v range: [-1.0, 1.0].
//calculate eye ray in world space.
Ray eyeRay;
//计算eyeRay origin: eyeRay.o = (0, 0, 4).
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
//计算eyeRay direction.
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
//find intersection with box.
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (hit) //如果eyeRay和box相交.
{
if (tnear < 0.0f) tnear = 0.0f; //clamp to near plane.
//march along ray from front to back, accumulating color.
float4 color_xy = bgColor;
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d * tnear; //pos range: [-1, -1, -1] - [1, 1, 1].
float3 step = eyeRay.d * tstep;
for (int i = 0; i < maxSteps; i++)
{
//(1)获得pos(x, y, z) sample point normInten + normGrad + probVolume + numOfEnhancedFrames + x/y/z normals.
//sample_normInten range: [0, 1].
float sample_normInten = tex3D(tex3D_normInten,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_normGrad range: [0, 1].
float sample_normGrad = tex3D(tex3D_normGrad,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_probVolume range: [0, 1].
float sample_probVolume = tex3D(tex3D_probVolume,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//sample_numOfEnhancedFrames range: [0, 16].
//int sample_numOfEnhancedFrames = round(sample_probVolume * totAniFrames); //ceil(sample_probVolume * totAniFrames);
//sample_normNormal.x range: [0, 1];
//sample_normNormal.y range: [0, 1];
//sample_normNormal.z range : [0, 1].
float3 sample_normNormal;
sample_normNormal.x = tex3D(tex3D_normNormalX,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
sample_normNormal.y = tex3D(tex3D_normNormalY,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
sample_normNormal.z = tex3D(tex3D_normNormalZ,
pos.x * maxVolumeDim / volumeSize.width * 0.5f + 0.5f,
pos.y * maxVolumeDim / volumeSize.height * 0.5f + 0.5f,
pos.z * maxVolumeDim / volumeSize.depth * 0.5f + 0.5f);
//(2)获得pos(x, y, z) sample point RGBA color sampleCol, according to contextOpen (true or false).
//(2.1)获得pos(x, y, z) sample point RGBA sampleCol without shading.
float opacityScaleFactor = 1.5;
float enhFactor = 900;
float4 sampleCol = { 0.0f, 0.0f, 0.0f, 0.0f };
switch (contextOpen)
{
case true: //显示both context + tarFeat.
if (sample_probVolume == 0) //说明是context.
{
sampleCol = contextColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
}
else if (sample_probVolume > 0) //说明是tarfet feature.
{
sampleCol = tarFeatColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
sampleCol.w *= (1.0f + log(sample_probVolume + 1) * enhFactor);
}
break;
case false: //只显示tarFeat.
if (sample_probVolume > 0)
{
sampleCol = tarFeatColor;
sampleCol.w = sample_normInten * sample_normGrad * opacityScaleFactor;
sampleCol.w *= (1.0f + log(sample_probVolume + 1) * enhFactor);
}
break;
}
//(2.2)add shading to pos(x, y, z) sample point RGBA sampleCol.
//compute light direction for pos(x, y, z).
float3 lightDir;
lightDir.x = lightPos.x - pos.x;
lightDir.y = lightPos.y - pos.y;
lightDir.z = lightPos.z - pos.z;
//normalize lightDir.
float len_lightDir = sqrt(lightDir.x * lightDir.x + lightDir.y * lightDir.y + lightDir.z * lightDir.z);
if (len_lightDir < MINLEN)
{
lightDir.x = 0;
lightDir.y = 0;
lightDir.z = 0;
}
else
{
lightDir.x /= len_lightDir;
lightDir.y /= len_lightDir;
lightDir.z /= len_lightDir;
}
//compute diffuse lighting.
float diffuseFactor = 10;
float diffuse = sample_normNormal.x * lightDir.x + sample_normNormal.y * lightDir.y + sample_normNormal.z * lightDir.z;
//add diffuse lighting to sampleCol.
sampleCol.x += diffuse * diffuseFactor;
sampleCol.y += diffuse * diffuseFactor;
sampleCol.z += diffuse * diffuseFactor;
//(2.3)clamp sampleCol to be [0, 1].
clamp(&sampleCol);
/*
//(3)获得pos(x, y, z) sample point enhanced RGB(optional) + oapcity.
float enhFactor = 9000000; //30;
float4 sampleCol_enhanced = sampleCol_default;
//(3.1)方法1(rule-enhanced paper所描述): 根据pos(x, y, z) sample point probVolume, 获得该sample point enhanced opacity Oe(v).
sampleCol_enhanced.w = sampleCol_default.w * (1.0f + log(sample_probVolume + 1) * enhFactor);
//end 方法1.
*/
/*
//(3.2)方法2(自己发明, 与方法1任选其一): 根据pos(x, y, z) sample point probVolume, 获得该sample point new RGB + enhanced opacity Oe(v).
//(3.2.1)获得pos(x, y, z) sample point new RGB.
if (sample_probVolume > 0)
{
//specify to be red.
sampleCol_enhanced.x = 1.0f;
sampleCol_enhanced.y = 0.0f;
sampleCol_enhanced.z = 0.0f;
}
//(3.2.2)获得pos(x, y, z) sample point enhanced opacity Oe(v).
sampleCol_enhanced.w = sampleCol_default.w * (1 + log(sample_probVolume + 1) * enhFactor);
//end 方法2.
*/
/*
//(3.3)clamp pos(x, y, z) sample point enhanced opacity Oe(v) to be [0, 1].
if (sampleCol_enhanced.w < 0.0f)
sampleCol_enhanced.w = 0.0f;
else if (sampleCol_enhanced.w > 1.0f)
sampleCol_enhanced.w = 1.0f;
*/
/*
//add on 2019/4/13.
//(4)获得pos(x, y, z) sample point removed opacity.
float4 sampleCol_removed = sampleCol_default;
//(4.1)根据该sample point sample_probVolume, 决定sampleCol.removed.w.
//如果sampleprobVolume越大, 则sampleCol.removed.w越小;
//如果sample_probVolume越小, 则sampleCol.removed.w越大.
//(Note: (a)remFactor = 0, sample_removed.w = sample_default.w (对于原数据中target feature显示不出的, 可用remFactor = 0);
//(b)remFactor越大, 移除贡献越多).
float remFactor = 0; //90000000;
sampleCol_removed.w = sampleCol_default.w * (1.0f - log(sample_probVolume + 1) * remFactor);
//end方法2.
//(4.2)clamp sampleCol_removed.w to be [0, 1].
if (sampleCol_removed.w < 0.0f)
sampleCol_removed.w = 0.0f;
else if (sampleCol_removed.w > 1.0f)
sampleCol_removed.w = 1.0f;
//(5)根据d_aniMatrix(theta - 1, sample_numOfEnhancedFrames), 确定在该theta帧下sampleCol_thetaFrame颜色.
float4 sampleCol_thetaFrame;
int enhancedOrRemovedValue = d_aniMatrix[(theta - 1) * (totAniFrames + 1) + sample_numOfEnhancedFrames];
if (enhancedOrRemovedValue == 1)
{
//说明在该theta帧sampleColor_thetaFrame = sampleColor_enhanced.
sampleCol_thetaFrame = sampleCol_enhanced;
}
else if (enhancedOrRemovedValue == 0)
{
//说明在该theta帧sampleColor_thetaFrame = sampleColor_removed.
sampleCol_thetaFrame = sampleCol_removed;
}
//add on 2019/4/13.
*/
//(6)accumulate pos(x, y, z) sample point sampleCol to be color_xy.
//pre-multiply alpha.
sampleCol.x *= sampleCol.w;
sampleCol.y *= sampleCol.w;
sampleCol.z *= sampleCol.w;
//"over" operator for front-to-back blending.
//color_xy = color_xy + sampleCol * (1.0f - color_xy.w);
color_xy = sampleCol + color_xy * (1.0f - sampleCol.w); //refer to https://stackoverflow.com/questions/39555050/how-to-do-the-blending-in-volume-rendering-using-glsl
//exit early if opaque.
if (color_xy.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
//write each [x, y]'s output color to corresponding pixel location in pbo.
d_output[x + y * winWidth] = rgbaFloatToInt(color_xy);
//printf("col = %u.\n", d_output[x + y * winWidth]);
}//end if intersection.
//add on 2020/4/2.
else //if not intersection with data.
{
d_output[x + y * winWidth] = rgbaFloatToInt(bgColor);
//printf("white = %u.\n", rgbaFloatToInt(bgColor));
}
//add on 2020/4/2.
}//end if.
}
/*
//add on 2020.4.1.
__global__ void drawLasso(int winWidth, int winHeight, int windowTitleBarHeight, int ox, int oy,
uint *d_output)
{
//here is correct 4 corners of window.
//d_output[0 + 0 * winWidth] = rgbaFloatToInt({ 1.0, 0.0, 0.0, 1.0 }); //red.
// d_output[0 + 495 * winWidth] = rgbaFloatToInt({ 0.0, 1.0, 0.0, 1.0 }); //green.
// d_output[495 + 0 * winWidth] = rgbaFloatToInt({ 1.0, 0.0, 1.0, 1.0 }); //pink.
// d_output[495 + 495 * winWidth] = rgbaFloatToInt({ 1.0, 1.0, 0.0, 1.0 }); //yellow.
//draw lasso on d_output.
d_output[ox + (winHeight - windowTitleBarHeight - oy) * winWidth] = rgbaFloatToInt(lassoColor);
}
*/
/*
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight, unsigned char *dev_lassoLabelImg, unsigned char *dev_bgImg,
uint *d_output)
{
//获得thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
//1. copy d_output lasso edge to dev_lassoLabelImg (lasso edge = 1).
if (d_output[x + y * winWidth] == rgbaFloatToInt(lassoColor))
{
dev_lassoLabelImg[x + y * winWidth] = 1;
}
//2. 用region-growing算法填充dev_lassoLabelImg background (background == 64),
//by using one thread.
if ((x == 0) && (y == 0))
{
backgroundGen(winWidth, winHeight, dev_lassoLabelImg, dev_bgImg, 64, 0, initialSeedPos);
}
//3. lasso pixels = background的补集 .
}// end if.
}
//add on 2020.4.1.
*/
extern "C" void render_kernel(int winWidth, int winHeight, dim3 blockSize, dim3 gridSize, cudaExtent volumeSize, int maxVolumeDim,
int totAniFrames, int *d_aniMatrix, int theta,
float3 lightPos, bool contextOpen,
uint *d_output)
{
/*
if (buttonState == 1) //mouse left button is moving while pressed.
{
//用1个thread来在d_output上画lasso.
drawLasso << <1, 1 >> >(winWidth, winHeight, windowTitleBarHeight, ox, oy, d_output);
}
else if (buttonState == 2) //mouse left button is released.
{
refineProbVolume << <gridSize, blockSize >> >(winWidth, winHeight, dev_lassoLabelImg, dev_bgImg,
d_output);
printf("left released.\n");
}
else
{
*/
//render image (pointed by d_output).
d_render << <gridSize, blockSize >> > (winWidth, winHeight, volumeSize, maxVolumeDim, totAniFrames, d_aniMatrix, theta, lightPos,
contextOpen, d_output);
//}
}
/*
//add on 2020/4/6.
//用region-growing算法填充background pixels = 64.
__global__ void backgroundGen(int winWidth, int winHeight, uint *d_output2,
unsigned char *dev_lassoBackgroundImg, int2 initialSeedPos, int grayValue_background, int increment)
{
dev_lassoBackgroundImg[initialSeedPos.x + initialSeedPos.y * winWidth] = grayValue_background;
//int initialSeedIntensity = d_output2[initialSeedPos.x + initialSeedPos.y * winWidth];
//int minValue_Seed = initialSeedIntensity - increment;
//int maxValue_Seed = initialSeedIntensity + increment;
int oldseeds = 1;
int newseeds = 0;
while (newseeds != oldseeds)
{
oldseeds = newseeds;
newseeds = 0;
for (int y = 1; y < (winHeight - 1); y++) //i = y;
for (int x = 1; x < (winWidth - 1); x++) //x = j.
{
if (dev_lassoBackgroundImg[x + y * winWidth] > 0) //indicate dev_lassoBackgroundImg(x, y) is a seed.
{
//(1)find this seed's upper neighbor.
int intensity = d_output2[x + (y - 1) * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[x + (y - 1) * winWidth] = grayValue_background;
}
//(2)find this seed's lower neighbor.
intensity = d_output2[x + (y + 1) * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[x + (y + 1) * winWidth] = grayValue_background;
}
//(3)find this seed's left neighbor.
intensity = d_output2[(x - 1) + y * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[(x - 1) + y * winWidth] = grayValue_background;
}
//(4)find this seed's right neighbor.
intensity = d_output2[(x + 1) + y * winWidth];
if (intensity != rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f })) //((intensity >= minValue_Seed) && (intensity <= maxValue_Seed))
{
newseeds = newseeds + 1;
dev_lassoBackgroundImg[(x + 1) + y * winWidth] = grayValue_background;
}
} //end if.
} //end for.
} //end while.
}
*/
/*
extern "C" void lassoBackgroundGen(int winWidth, int winHeight, uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_background, int increment)
{
backgroundGen << <1, 1 >> >(winWidth, winHeight, d_output2,
dev_lassoEdgeImg, dev_lassoBackgroundImg,
initialSeedPos, grayValue_background, increment);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
{
printf("lassoBackgroundGen launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr));
}
else
{
printf("lassoBackgroundGen launch successfully.\n");
}
}
*/
/*
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight,
uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_lassoBackground, int increment,
int cx, int cy)
{
//获得thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
//1. generate dev_lassoEdgeImg (lasso edge = 1), according to d_output2 lasso.
if (d_output2[x + y * winWidth] == rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }))
{
printf("wtf.\n");
dev_lassoEdgeImg[x + y * winWidth] = 1;
}
else
{
//printf("%u.\n", d_output2[x + y * winWidth]);
}
//2. 用region-growing算法获取dev_lassoBackgroundImg (lasso background = 64).
if ((x == 0) && (y == 0))
{
printf("pink = %u.\n", rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }));
//printf("white = %u.\n", rgbaFloatToInt(bgColor));
printf("col = %u.\n", d_output2[cx + (winHeight - cy - 9) * winWidth]);
}
} //end if.
}
extern "C" void getLassoPixels(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize,
uint *d_output2,
unsigned char *dev_lassoEdgeImg, unsigned char *dev_lassoBackgroundImg,
int2 initialSeedPos, int grayValue_lassoBackground, int increment,
int cx, int cy)
{
//printf("gridSize: %d, %d, %d; blockSize: %d, %d, %d.\n", gridSize.x, gridSize.y, gridSize.z, blockSize.x, blockSize.y, blockSize.z);
refineProbVolume << <gridSize, blockSize>> >(winWidth, winHeight,
d_output2,
dev_lassoEdgeImg, dev_lassoBackgroundImg,
initialSeedPos, grayValue_lassoBackground, increment,
cx, cy);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
{
printf("refineProbVolume launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr));
}
}
//launch winWidth * winHeight threads.
__global__ void lassoBackgroundGen(int winWidth, int winHeight, uint *d_output2)
{
//获得thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
if (d_output2[x + y * winWidth] == rgbaFloatToInt({ 1.0f, 0.0f, 1.0f, 1.0f }))
{
printf("yes.\n");
}
}//end if.
}
extern "C" void proc(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize, uint *d_output2)
{
lassoBackgroundGen << <gridSize, blockSize>> >(winWidth, winHeight, d_output2);
}
*/
//launch winWidth * winHeight threads.
__global__ void refineProbVolume(int winWidth, int winHeight, cudaExtent volumeSize, int maxVolumeDim,
void *probVolume, unsigned char *dev_lassoBackgroundImg)
{
//获得thread(x, y) id.
int x = threadIdx.x + blockIdx.x * blockDim.x; //range: [0, winWidth - 1].
int y = threadIdx.y + blockIdx.y * blockDim.y; //range: [0, winHeight - 1].
if ((x < winWidth) && (y < winHeight))
{
if (dev_lassoBackgroundImg[x + y * winWidth] == 0) //说明该像素pixel(x, y)=lasso pixel.
{
//对于1个lasso pixel(x, y)来说:
//1. 将pixel(x, y)映射为pixel(u, v).
float u = (x / (float)winWidth) * 2.0f - 1.0f; //u range: [-1.0, 1.0].
float v = (y / (float)winHeight) * 2.0f - 1.0f; //v range: [-1.0, 1.0].
//2. calculate eye ray in world space.
Ray eyeRay;
//计算eyeRay origin: eyeRay.o = (0, 0, 4).
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
//计算eyeRay direction.
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
const int maxSteps = 500; //max number of samples along a ray.
const float tstep = 0.01f; //sampling distance between 2 samples along a ray.
//both boxMin and boxMax ensure the final image displays in correct dimension along 3 directions.
const float3 boxMin = make_float3(-1.0f * volumeSize.width / maxVolumeDim,
-1.0f * volumeSize.height / maxVolumeDim,
-1.0f * volumeSize.depth / maxVolumeDim);
const float3 boxMax = make_float3(1.0f * volumeSize.width / maxVolumeDim,
1.0f * volumeSize.height / maxVolumeDim,
1.0f * volumeSize.depth / maxVolumeDim);
//3. find intersection with box.
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (hit)
{
if (tnear < 0.0f) tnear = 0.0f; //clamp to near plane.
//march along ray from front to back, accumulating color.
float4 color_xy = bgColor;
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d * tnear; //pos range: [-0.5, -1, -1] - [0.5, 1, 1].
printf("pos.x = %f, pos.y = %f, pos.z = %f.\n", pos.x, pos.y, pos.z);
float3 step = eyeRay.d * tstep;
for (int i = 0; i < maxSteps; i++)
{
//已知sampling point pos(x, y, z):
//(i)将pos(x, y, z)(x/y/z range: [-1, 1])
//映射为pos(x, y, z)(x/y/z range: [0, volumeSize.width/height/depth]).
pos.x = pos.x * float(maxVolumeDim) / float(volumeSize.width) * (float(volumeSize.width) / 2) + float(volumeSize.width)/2,
pos.y = pos.y * float(maxVolumeDim) / float(volumeSize.height) * (float(volumeSize.height) / 2) + (float(volumeSize.height)/2),
pos.z = pos.z * float(maxVolumeDim) / float(volumeSize.depth) * (float(volumeSize.depth) / 2) + (float(volumeSize.depth) / 2);
if (pos.x == volumeSize.width)
pos.x = volumeSize.width - 1;
if (pos.y == volumeSize.height)
pos.y = volumeSize.height - 1;
if (pos.z == volumeSize.depth)
pos.z = volumeSize.depth - 1;
//(ii)set sampling point pos(x, y, z)'s probVolume = 0.
//((float*)probVolume)[int(pos.x) + int(pos.y) * volumeSize.width + pos.z * volumeSize.width * volumeSize.height] = 0;
t += tstep;
if (t > tfar) break;
pos += step;
}
}//end if intersection.
}
}//end if.
}
extern "C" void inParallelRefineProbVolume(int winWidth, int winHeight, dim3 gridSize, dim3 blockSize,
cudaExtent volumeSize, int maxVolumeDim,
void *probVolume, unsigned char *dev_lassoBackgroundImg)
{
refineProbVolume << <gridSize, blockSize >> >(winWidth, winHeight, volumeSize, maxVolumeDim,
probVolume, dev_lassoBackgroundImg);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
{
printf("refineProbVolume failed with error \"%s\".\n", cudaGetErrorString(cudaerr));
}
}
//add on 2020/4/6.
#endif
|
5b6d2999588236a1198844112f0a10f90bdcc4e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Possible weight coefficients for tracking cost evaluation :
// Gaussian discretisation
/*
* 1 4 6 4 1
* 4 16 24 16 4
* 6 24 36 24 6
* 4 16 24 16 4
* 1 4 6 4 1
*/
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Low pass gaussian-like filtering before subsampling
// Low pass gaussian-like filtering before subsampling
/*
// Upsample a picture using the "magic" kernel
*/
__global__ void kernelSmoothX(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx = y*w;
int a = x-2;
int b = x-1;
int c = x;
int d = x+1;
int e = x+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(d >= w) d = w-1;
if(e >= w) e = w-1;
out[y*w+x] = 0.0625f*in[idx+a] + 0.25f*in[idx+b] + 0.375f*in[idx+c] + 0.25f*in[idx+d] + 0.0625f*in[idx+e];
}
|
5b6d2999588236a1198844112f0a10f90bdcc4e7.cu
|
#include "includes.h"
// Possible weight coefficients for tracking cost evaluation :
// Gaussian discretisation
/*
* 1 4 6 4 1
* 4 16 24 16 4
* 6 24 36 24 6
* 4 16 24 16 4
* 1 4 6 4 1
*/
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Low pass gaussian-like filtering before subsampling
// Low pass gaussian-like filtering before subsampling
/*
// Upsample a picture using the "magic" kernel
*/
__global__ void kernelSmoothX(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx = y*w;
int a = x-2;
int b = x-1;
int c = x;
int d = x+1;
int e = x+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(d >= w) d = w-1;
if(e >= w) e = w-1;
out[y*w+x] = 0.0625f*in[idx+a] + 0.25f*in[idx+b] + 0.375f*in[idx+c] + 0.25f*in[idx+d] + 0.0625f*in[idx+e];
}
|
7a1789b47807366f064d62928a83bf143f164e6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/resize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
/*if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);*/
if (src_w < src_width)
res += (w_h0 * w_w1 * src_data[src_idx]);
if (src_h < src_height)
res += (w_h1 * w_w0 * src_data[src_idx]);
if (src_w < src_width && src_h < src_height)
res += (w_h1 * w_w1 * src_data[src_idx]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK(bottom[0]->num() == top[0]->num())<<"bottom[0]->num() == top[0]->num()";
CHECK(bottom[0]->channels() == top[0]->channels())<< "bottom[0]->channels() == top[0]->channels()";
const int src_num = bottom[0]->num();
const int src_channels = bottom[0]->channels();
const int src_height = bottom[0]->height();
const int src_width = bottom[0]->width();
const int dst_channels = top[0]->channels();
const int dst_height = top[0]->height();
const int dst_width = top[0]->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = bottom[0]->gpu_data();
Dtype* dst_data = top[0]->mutable_gpu_data();
kernel_ResizeBlob<Dtype> << <CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >> >(
loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1, const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3, const Dtype* weight3, const Dtype* loc4, const Dtype* weight4) {
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx) {
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset + idx] * weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset + idx] * weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset + idx] * weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset + idx] * weight4[idx];
}
}
}
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
/* if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}*/
if (src_w < src_width)
{
loc2[dst_idx] = src_idx;
weight2[dst_idx] = w_h0 * w_w1;
}
if (src_h < src_height)
{
loc3[dst_idx] = src_idx;
weight3[dst_idx] = w_h1 * w_w0;
}
if (src_w < src_width && src_h < src_height)
{
loc4[dst_idx] = src_idx;
weight4[dst_idx] = w_h1 * w_w1;
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* top_diff = top[0]->mutable_gpu_diff();
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
caffe::caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
const Dtype scale_w = bottom[0]->width() / (Dtype)top[0]->width();
const Dtype scale_h = bottom[0]->height() / (Dtype)top[0]->height();
int loop_n_t = top[0]->height() * top[0]->width();
kernel_GetBiLinearResizeMatRules<Dtype> << <CAFFE_GET_BLOCKS(loop_n_t), CAFFE_CUDA_NUM_THREADS >> >(
loop_n_t, bottom[0]->height(), bottom[0]->width(),
top[0]->height(), top[0]->width(), scale_h, scale_w,
this->locs_[0]->mutable_gpu_data(), this->locs_[0]->mutable_gpu_diff(),
this->locs_[1]->mutable_gpu_data(), this->locs_[1]->mutable_gpu_diff(),
this->locs_[2]->mutable_gpu_data(), this->locs_[2]->mutable_gpu_diff(),
this->locs_[3]->mutable_gpu_data(), this->locs_[3]->mutable_gpu_diff());
const int top_step = top[0]->offset(0, 1);
const int bottom_step = bottom[0]->offset(0, 1);
int loop_n = this->out_num_ * this->out_channels_;
kernel_ResizeBackward<Dtype> << <CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >> >(
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
|
7a1789b47807366f064d62928a83bf143f164e6d.cu
|
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/resize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
/*if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);*/
if (src_w < src_width)
res += (w_h0 * w_w1 * src_data[src_idx]);
if (src_h < src_height)
res += (w_h1 * w_w0 * src_data[src_idx]);
if (src_w < src_width && src_h < src_height)
res += (w_h1 * w_w1 * src_data[src_idx]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK(bottom[0]->num() == top[0]->num())<<"bottom[0]->num() == top[0]->num()";
CHECK(bottom[0]->channels() == top[0]->channels())<< "bottom[0]->channels() == top[0]->channels()";
const int src_num = bottom[0]->num();
const int src_channels = bottom[0]->channels();
const int src_height = bottom[0]->height();
const int src_width = bottom[0]->width();
const int dst_channels = top[0]->channels();
const int dst_height = top[0]->height();
const int dst_width = top[0]->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = bottom[0]->gpu_data();
Dtype* dst_data = top[0]->mutable_gpu_data();
kernel_ResizeBlob<Dtype> << <CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >> >(
loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1, const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3, const Dtype* weight3, const Dtype* loc4, const Dtype* weight4) {
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx) {
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset + idx] * weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset + idx] * weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset + idx] * weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset + idx] * weight4[idx];
}
}
}
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
/* if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}*/
if (src_w < src_width)
{
loc2[dst_idx] = src_idx;
weight2[dst_idx] = w_h0 * w_w1;
}
if (src_h < src_height)
{
loc3[dst_idx] = src_idx;
weight3[dst_idx] = w_h1 * w_w0;
}
if (src_w < src_width && src_h < src_height)
{
loc4[dst_idx] = src_idx;
weight4[dst_idx] = w_h1 * w_w1;
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* top_diff = top[0]->mutable_gpu_diff();
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
caffe::caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
const Dtype scale_w = bottom[0]->width() / (Dtype)top[0]->width();
const Dtype scale_h = bottom[0]->height() / (Dtype)top[0]->height();
int loop_n_t = top[0]->height() * top[0]->width();
kernel_GetBiLinearResizeMatRules<Dtype> << <CAFFE_GET_BLOCKS(loop_n_t), CAFFE_CUDA_NUM_THREADS >> >(
loop_n_t, bottom[0]->height(), bottom[0]->width(),
top[0]->height(), top[0]->width(), scale_h, scale_w,
this->locs_[0]->mutable_gpu_data(), this->locs_[0]->mutable_gpu_diff(),
this->locs_[1]->mutable_gpu_data(), this->locs_[1]->mutable_gpu_diff(),
this->locs_[2]->mutable_gpu_data(), this->locs_[2]->mutable_gpu_diff(),
this->locs_[3]->mutable_gpu_data(), this->locs_[3]->mutable_gpu_diff());
const int top_step = top[0]->offset(0, 1);
const int bottom_step = bottom[0]->offset(0, 1);
int loop_n = this->out_num_ * this->out_channels_;
kernel_ResizeBackward<Dtype> << <CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >> >(
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
|
e901b95cb8189ce0e4a297340e09531245c0bf02.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "ppm_lib.h"
#define divisionFactor 9
#define N 500*1000
static void HandleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE ); }
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define CREATOR "PARALLELISME2OPENMP"
struct filterCoeff{
int l,c;
};
PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n') ;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
if (rgb_comp_color!= RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n') ;
img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, sizeof(PPMPixel)*img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(const char *filename, PPMImage *img)
{
FILE *fp;
fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
fprintf(fp, "P6\n");
fprintf(fp, "# Created by %s\n",CREATOR);
fprintf(fp, "%d %d\n",img->x,img->y);
fprintf(fp, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, fp);
fclose(fp);
}
// GPU version 1//
// un thread par pixel et un bloc par ligne on incrmente//
__global__ void filterSofter(PPMPixel *img,int *filter ,PPMPixel *destination){
int finalRed =0;
int finalGreen =0;
int finalBlue =0;
int indFiltre = 0;
int tidX =threadIdx.x+ blockIdx.x*blockDim.x;
int l=tidX/500;
int c=tidX%500;
int ll;
int cc;
for(int i=-2;i<=2;i++){
for(int j=-2;j<=2;j++){
ll=l+i;
cc=c+j;
if(ll<0 ){
ll=-ll;
}else if(ll>1000){
ll=l-i;
}
if(cc<0 ){
cc=-cc;
} else if (cc>500){
cc=c-j;
}
finalRed += img[(ll)*500+(cc)].red * filter[indFiltre];
finalGreen += img[(ll)*500+(cc)].green * filter[indFiltre];
finalBlue += img[(ll)*500+(cc)].blue * filter[indFiltre];
indFiltre++;
}
}
destination[tidX].red = finalRed / divisionFactor;
destination[tidX].green = finalGreen / divisionFactor;
destination[tidX].blue = finalBlue / divisionFactor;
}
int main(){
PPMImage *image, *imageCopy;
image = readPPM("imageProject.ppm");
imageCopy = readPPM("imageProject.ppm");
int filter[25] = { 1, 2, 0, -2, -1,
4 , 8, 0 , -8 , -4,
6 , 12 , 0 , -12 , -6 ,
4, 8, 0 , -8, -4,
1, 2, 0, -2, -1 };
filterCoeff coeff[25] = {};
int k=0;
for(int i=-2;i<=2;i++)
for(int j=-2;j<=2;j++)
coeff[k++]={i,j};
PPMPixel *tempo;
PPMPixel *dev_image;
PPMPixel *dev_tempo;
PPMPixel *dev_imageCopy;
int *dev_filter;
filterCoeff *dev_coeff;
//double time;
//hipEvent_t start,stop;
HANDLE_ERROR( hipMalloc( (void**)&dev_image, image->x*image->y *3* sizeof(char) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_imageCopy, imageCopy->x*imageCopy->y*3 * sizeof(char) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_tempo, image->x*imageCopy->y*3 *25* sizeof(char) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_filter, 25 * sizeof(int) ));
HANDLE_ERROR( hipMalloc( (void**)&dev_coeff, 25* sizeof( filterCoeff) ));
/* copier 'a' et 'b' sur le GPU */
HANDLE_ERROR( hipMemcpy( dev_image, image->data,image->x*image->y *3* sizeof(char),hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy( dev_imageCopy, imageCopy->data, imageCopy->x*imageCopy->y *3* sizeof(char),hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy( dev_filter, filter, 25 * sizeof(int),hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy( dev_coeff, coeff, 25 * sizeof( filterCoeff),hipMemcpyHostToDevice));
/*hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
*/
hipLaunchKernelGGL((
tempo), dim3(500),dim3(1000), 0, 0, dev_image,dev_tempo);
HANDLE_ERROR( hipMemcpy( tempo, dev_tempo, imageCopy->x*imageCopy->y * 3*25*sizeof(char), hipMemcpyDeviceToHost));
printf(">%s\n",hipGetErrorString (hipGetLastError ()));
/*
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
*/hipLaunchKernelGGL((
filterSofter), dim3(500),dim3(1000), 0, 0, dev_image,dev_filter,dev_imageCopy);
/* copier le tableau 'c' depuis le GPU vers le CPU */
HANDLE_ERROR( hipMemcpy( imageCopy->data, dev_imageCopy, imageCopy->x*imageCopy->y * 3*sizeof(char), hipMemcpyDeviceToHost));
//printf("Temps ncessaire : %3.1f ms\n", time);
writePPM("imageProjectResult.ppm",imageCopy);
/* liberer la memoire allouee sur le GPU */
hipFree( dev_image );
hipFree( dev_imageCopy );
hipFree( dev_filter );
return 0;
}
|
e901b95cb8189ce0e4a297340e09531245c0bf02.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "ppm_lib.h"
#define divisionFactor 9
#define N 500*1000
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE ); }
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define CREATOR "PARALLELISME2OPENMP"
struct filterCoeff{
int l,c;
};
PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n') ;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
if (rgb_comp_color!= RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n') ;
img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, sizeof(PPMPixel)*img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(const char *filename, PPMImage *img)
{
FILE *fp;
fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
fprintf(fp, "P6\n");
fprintf(fp, "# Created by %s\n",CREATOR);
fprintf(fp, "%d %d\n",img->x,img->y);
fprintf(fp, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, fp);
fclose(fp);
}
// GPU version 1//
// un thread par pixel et un bloc par ligne on incrémente//
__global__ void filterSofter(PPMPixel *img,int *filter ,PPMPixel *destination){
int finalRed =0;
int finalGreen =0;
int finalBlue =0;
int indFiltre = 0;
int tidX =threadIdx.x+ blockIdx.x*blockDim.x;
int l=tidX/500;
int c=tidX%500;
int ll;
int cc;
for(int i=-2;i<=2;i++){
for(int j=-2;j<=2;j++){
ll=l+i;
cc=c+j;
if(ll<0 ){
ll=-ll;
}else if(ll>1000){
ll=l-i;
}
if(cc<0 ){
cc=-cc;
} else if (cc>500){
cc=c-j;
}
finalRed += img[(ll)*500+(cc)].red * filter[indFiltre];
finalGreen += img[(ll)*500+(cc)].green * filter[indFiltre];
finalBlue += img[(ll)*500+(cc)].blue * filter[indFiltre];
indFiltre++;
}
}
destination[tidX].red = finalRed / divisionFactor;
destination[tidX].green = finalGreen / divisionFactor;
destination[tidX].blue = finalBlue / divisionFactor;
}
int main(){
PPMImage *image, *imageCopy;
image = readPPM("imageProject.ppm");
imageCopy = readPPM("imageProject.ppm");
int filter[25] = { 1, 2, 0, -2, -1,
4 , 8, 0 , -8 , -4,
6 , 12 , 0 , -12 , -6 ,
4, 8, 0 , -8, -4,
1, 2, 0, -2, -1 };
filterCoeff coeff[25] = {};
int k=0;
for(int i=-2;i<=2;i++)
for(int j=-2;j<=2;j++)
coeff[k++]={i,j};
PPMPixel *tempo;
PPMPixel *dev_image;
PPMPixel *dev_tempo;
PPMPixel *dev_imageCopy;
int *dev_filter;
filterCoeff *dev_coeff;
//double time;
//cudaEvent_t start,stop;
HANDLE_ERROR( cudaMalloc( (void**)&dev_image, image->x*image->y *3* sizeof(char) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_imageCopy, imageCopy->x*imageCopy->y*3 * sizeof(char) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_tempo, image->x*imageCopy->y*3 *25* sizeof(char) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_filter, 25 * sizeof(int) ));
HANDLE_ERROR( cudaMalloc( (void**)&dev_coeff, 25* sizeof( filterCoeff) ));
/* copier 'a' et 'b' sur le GPU */
HANDLE_ERROR( cudaMemcpy( dev_image, image->data,image->x*image->y *3* sizeof(char),cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy( dev_imageCopy, imageCopy->data, imageCopy->x*imageCopy->y *3* sizeof(char),cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy( dev_filter, filter, 25 * sizeof(int),cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy( dev_coeff, coeff, 25 * sizeof( filterCoeff),cudaMemcpyHostToDevice));
/*cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
*/
tempo<<<500,1000>>>(dev_image,dev_tempo);
HANDLE_ERROR( cudaMemcpy( tempo, dev_tempo, imageCopy->x*imageCopy->y * 3*25*sizeof(char), cudaMemcpyDeviceToHost));
printf(">%s\n",cudaGetErrorString (cudaGetLastError ()));
/*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
*/
filterSofter<<<500,1000>>>(dev_image,dev_filter,dev_imageCopy);
/* copier le tableau 'c' depuis le GPU vers le CPU */
HANDLE_ERROR( cudaMemcpy( imageCopy->data, dev_imageCopy, imageCopy->x*imageCopy->y * 3*sizeof(char), cudaMemcpyDeviceToHost));
//printf("Temps nécessaire : %3.1f ms\n", time);
writePPM("imageProjectResult.ppm",imageCopy);
/* liberer la memoire allouee sur le GPU */
cudaFree( dev_image );
cudaFree( dev_imageCopy );
cudaFree( dev_filter );
return 0;
}
|
b482ec464e4c33e8b7d5bf5920ccf901c0fd67b1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include "random/permute.h"
#include "random/rng.h"
#include "test_utils.h"
#include <algorithm>
#include <vector>
namespace MLCommon {
namespace Random {
template <typename T>
struct PermInputs {
int N, D;
bool needPerms, needShuffle, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) {
return os;
}
template <typename T>
class PermTest : public ::testing::TestWithParam<PermInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<PermInputs<T>>::GetParam();
// forcefully set needPerms, since we need it for unit-testing!
if(params.needShuffle) {
params.needPerms = true;
}
Random::Rng<T> r(params.seed);
int N = params.N;
int D = params.D;
int len = N * D;
if(params.needPerms)
allocate(outPerms, N);
else
outPerms = nullptr;
if(params.needShuffle) {
allocate(in, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
} else {
in = out = nullptr;
}
char *workspace = nullptr;
size_t workspaceSize;
permute(outPerms, out, in, D, N, params.rowMajor, workspace, workspaceSize);
allocate(workspace, workspaceSize);
permute(outPerms, out, in, D, N, params.rowMajor, workspace, workspaceSize);
CUDA_CHECK(hipFree(workspace));
}
void TearDown() override {
if(params.needPerms)
CUDA_CHECK(hipFree(outPerms));
if(params.needShuffle) {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
}
}
protected:
PermInputs<T> params;
T *in, *out;
int *outPerms;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchRange(const T *actual, size_t size,
T start, L eq_compare,
bool doSort = true) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size);
if(doSort)
std::sort(act_h.begin(), act_h.end());
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = start + i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
template <typename T, typename L>
::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out,
const T *in, int D, int N,
bool rowMajor, L eq_compare) {
std::vector<int> h_perms(N);
updateHost<int>(&(h_perms[0]), perms, N);
std::vector<T> h_out(N * D), h_in(N * D);
updateHost<T>(&(h_out[0]), out, N * D);
updateHost<T>(&(h_in[0]), in, N * D);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < D; ++j) {
int outPos = rowMajor? i * D + j : j * N + i;
int inPos = rowMajor? h_perms[i] * D + j : j * N + h_perms[i];
auto act = h_out[outPos];
auto expected = h_in[inPos];
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @"
<< i << ", " << j;
}
}
}
return ::testing::AssertionSuccess();
}
const std::vector<PermInputs<float>> inputsf = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2*1024, 32, true, false, true, 1234ULL},
{2*1024, 32, true, false, true, 1234567890ULL},
{2*1024+500, 32, true, false, true, 1234ULL},
{2*1024+500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
// permute and shuffle the data
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2*1024, 32, true, true, true, 1234ULL},
{2*1024, 32, true, true, true, 1234567890ULL},
{2*1024+500, 32, true, true, true, 1234ULL},
{2*1024+500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL}};
typedef PermTest<float> PermTestF;
TEST_P(PermTestF, Result) {
if(params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if(params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<float>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf));
const std::vector<PermInputs<double>> inputsd = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2*1024, 32, true, false, true, 1234ULL},
{2*1024, 32, true, false, true, 1234567890ULL},
{2*1024+500, 32, true, false, true, 1234ULL},
{2*1024+500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
// permute and shuffle the data
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2*1024, 32, true, true, true, 1234ULL},
{2*1024, 32, true, true, true, 1234567890ULL},
{2*1024+500, 32, true, true, true, 1234ULL},
{2*1024+500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL}};
typedef PermTest<double> PermTestD;
TEST_P(PermTestD, Result) {
if(params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if(params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<double>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
|
b482ec464e4c33e8b7d5bf5920ccf901c0fd67b1.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include "random/permute.h"
#include "random/rng.h"
#include "test_utils.h"
#include <algorithm>
#include <vector>
namespace MLCommon {
namespace Random {
template <typename T>
struct PermInputs {
int N, D;
bool needPerms, needShuffle, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) {
return os;
}
template <typename T>
class PermTest : public ::testing::TestWithParam<PermInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<PermInputs<T>>::GetParam();
// forcefully set needPerms, since we need it for unit-testing!
if(params.needShuffle) {
params.needPerms = true;
}
Random::Rng<T> r(params.seed);
int N = params.N;
int D = params.D;
int len = N * D;
if(params.needPerms)
allocate(outPerms, N);
else
outPerms = nullptr;
if(params.needShuffle) {
allocate(in, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
} else {
in = out = nullptr;
}
char *workspace = nullptr;
size_t workspaceSize;
permute(outPerms, out, in, D, N, params.rowMajor, workspace, workspaceSize);
allocate(workspace, workspaceSize);
permute(outPerms, out, in, D, N, params.rowMajor, workspace, workspaceSize);
CUDA_CHECK(cudaFree(workspace));
}
void TearDown() override {
if(params.needPerms)
CUDA_CHECK(cudaFree(outPerms));
if(params.needShuffle) {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
}
}
protected:
PermInputs<T> params;
T *in, *out;
int *outPerms;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchRange(const T *actual, size_t size,
T start, L eq_compare,
bool doSort = true) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size);
if(doSort)
std::sort(act_h.begin(), act_h.end());
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = start + i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
template <typename T, typename L>
::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out,
const T *in, int D, int N,
bool rowMajor, L eq_compare) {
std::vector<int> h_perms(N);
updateHost<int>(&(h_perms[0]), perms, N);
std::vector<T> h_out(N * D), h_in(N * D);
updateHost<T>(&(h_out[0]), out, N * D);
updateHost<T>(&(h_in[0]), in, N * D);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < D; ++j) {
int outPos = rowMajor? i * D + j : j * N + i;
int inPos = rowMajor? h_perms[i] * D + j : j * N + h_perms[i];
auto act = h_out[outPos];
auto expected = h_in[inPos];
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @"
<< i << ", " << j;
}
}
}
return ::testing::AssertionSuccess();
}
const std::vector<PermInputs<float>> inputsf = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2*1024, 32, true, false, true, 1234ULL},
{2*1024, 32, true, false, true, 1234567890ULL},
{2*1024+500, 32, true, false, true, 1234ULL},
{2*1024+500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
// permute and shuffle the data
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2*1024, 32, true, true, true, 1234ULL},
{2*1024, 32, true, true, true, 1234567890ULL},
{2*1024+500, 32, true, true, true, 1234ULL},
{2*1024+500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL}};
typedef PermTest<float> PermTestF;
TEST_P(PermTestF, Result) {
if(params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if(params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<float>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf));
const std::vector<PermInputs<double>> inputsd = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2*1024, 32, true, false, true, 1234ULL},
{2*1024, 32, true, false, true, 1234567890ULL},
{2*1024+500, 32, true, false, true, 1234ULL},
{2*1024+500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
// permute and shuffle the data
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2*1024, 32, true, true, true, 1234ULL},
{2*1024, 32, true, true, true, 1234567890ULL},
{2*1024+500, 32, true, true, true, 1234ULL},
{2*1024+500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL}};
typedef PermTest<double> PermTestD;
TEST_P(PermTestD, Result) {
if(params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if(params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<double>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
|
2bc8cfa92bafa898fa61755c06e374f8e2a65b2a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if USE_ROCM
#include "cuda/Common.h"
#include "activation/Softmax.h"
namespace nn {
namespace activation {
__global__ void SoftmaxForwardImpl(const int N, const int neurons, const float* X, float* Y) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if(n < N) {
int offset = n * neurons;
float max_element = X[offset];
for(int i = 1; i < neurons; ++i) {
if(X[offset + i] > max_element) {
max_element = X[offset + i];
}
}
float sum = 0;
for(int i = 0; i < neurons; ++i) {
Y[offset + i] = exp(X[offset + i] - max_element);
sum += Y[offset + i];
}
float factor = 1.0f / sum;
for(int i = 0; i < neurons; ++i) {
Y[offset + i] *= factor;
}
}
}
template <>
void Softmax<Cuda>::Forward(const Matrix<Cuda>& X) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("Softmax forward: wrong matrix shape");
}
const auto N = shape.rows;
this->_Y.Reshape(Shape{N, this->_out_neurons});
this->_dFdX.Reshape(Shape{N, this->_in_neurons});
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( SoftmaxForwardImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, this->_out_neurons, X.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("Softmax: cannot perform forward propagation");
}
template <>
void Softmax<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, const float /*learning_rate*/) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows > this->_Y.GetShape().rows)) {
throw Exception("Softmax backprop: wrong matrix shape");
}
const auto N = shape.Size();
hipMemcpy(this->_dFdX.DeviceData(), dFdY.DeviceData(), N * sizeof(float), hipMemcpyDeviceToDevice);
Exception::ThrowOnError("Softmax: cannot perform back propagation");
}
} //namespace activation
} //namespace nn
#endif //USE_ROCM
|
2bc8cfa92bafa898fa61755c06e374f8e2a65b2a.cu
|
#if USE_CUDA
#include "cuda/Common.h"
#include "activation/Softmax.h"
namespace nn {
namespace activation {
__global__ void SoftmaxForwardImpl(const int N, const int neurons, const float* X, float* Y) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if(n < N) {
int offset = n * neurons;
float max_element = X[offset];
for(int i = 1; i < neurons; ++i) {
if(X[offset + i] > max_element) {
max_element = X[offset + i];
}
}
float sum = 0;
for(int i = 0; i < neurons; ++i) {
Y[offset + i] = exp(X[offset + i] - max_element);
sum += Y[offset + i];
}
float factor = 1.0f / sum;
for(int i = 0; i < neurons; ++i) {
Y[offset + i] *= factor;
}
}
}
template <>
void Softmax<Cuda>::Forward(const Matrix<Cuda>& X) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("Softmax forward: wrong matrix shape");
}
const auto N = shape.rows;
this->_Y.Reshape(Shape{N, this->_out_neurons});
this->_dFdX.Reshape(Shape{N, this->_in_neurons});
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
SoftmaxForwardImpl<<<num_of_blocks, block_size>>>
(N, this->_out_neurons, X.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("Softmax: cannot perform forward propagation");
}
template <>
void Softmax<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, const float /*learning_rate*/) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows > this->_Y.GetShape().rows)) {
throw Exception("Softmax backprop: wrong matrix shape");
}
const auto N = shape.Size();
cudaMemcpy(this->_dFdX.DeviceData(), dFdY.DeviceData(), N * sizeof(float), cudaMemcpyDeviceToDevice);
Exception::ThrowOnError("Softmax: cannot perform back propagation");
}
} //namespace activation
} //namespace nn
#endif //USE_CUDA
|
17546264a0315e363bfc689d9160a99f04dbdf8c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "kernel_hip.cuh"
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void atomicAddGpu(int *d_a) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
tid = tid % SIZE;
atomicAdd(&d_a[tid], 1);
}
void cudaAtomicAdd() {
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
int *d_a;
hipMalloc((void **)&d_a, ARRAY_BYTES);
hipMemset((void *)d_a, 0, ARRAY_BYTES);
hipLaunchKernelGGL(( atomicAddGpu), dim3(NUM_THREADS / BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_a);
hipMemcpy(h_a, d_a, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_a);
std::cout << "Number of times a particular Array index has been incremented is " << std::endl;
for (int i = 0; i < SIZE; i++) {
std::cout << "index: " << i << " --> " << h_a[i] << " times" << std::endl;
}
}
|
17546264a0315e363bfc689d9160a99f04dbdf8c.cu
|
#include <iostream>
#include "kernel.cuh"
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void atomicAddGpu(int *d_a) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
tid = tid % SIZE;
atomicAdd(&d_a[tid], 1);
}
void cudaAtomicAdd() {
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
int *d_a;
cudaMalloc((void **)&d_a, ARRAY_BYTES);
cudaMemset((void *)d_a, 0, ARRAY_BYTES);
atomicAddGpu<<<NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH>>>(d_a);
cudaMemcpy(h_a, d_a, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_a);
std::cout << "Number of times a particular Array index has been incremented is " << std::endl;
for (int i = 0; i < SIZE; i++) {
std::cout << "index: " << i << " --> " << h_a[i] << " times" << std::endl;
}
}
|
d7a42c4b855382a341b2420adea4736934c490ca.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/embedding/mock_key_value_store.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
namespace embedding {
namespace {
template<typename Key>
class IteratorImpl : public KVIterator {
public:
OF_DISALLOW_COPY_AND_MOVE(IteratorImpl);
IteratorImpl(HashMap<Key, std::string>* store, uint32_t key_size, uint32_t value_size,
uint32_t max_query_length, void* host_keys_buffer, void* host_values_buffer,
uint32_t* host_num_buffer)
: store_(store),
pos_(store->begin()),
key_size_(key_size),
value_size_(value_size),
max_query_length_(max_query_length),
host_keys_buffer_(host_keys_buffer),
host_values_buffer_(host_values_buffer),
host_num_buffer_(host_num_buffer) {}
~IteratorImpl() override = default;
void NextN(ep::Stream* stream, uint32_t n_request, uint32_t* n_result, void* keys,
void* values) override {
CHECK_LE(n_request, max_query_length_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_JUST(cuda_stream->Sync());
*host_num_buffer_ = 0;
while (*host_num_buffer_ < n_request && pos_ != store_->end()) {
reinterpret_cast<Key*>(host_keys_buffer_)[*host_num_buffer_] = pos_->first;
std::memcpy(reinterpret_cast<char*>(host_values_buffer_) + *host_num_buffer_ * value_size_,
pos_->second.data(), value_size_);
}
OF_CUDA_CHECK(hipMemcpyAsync(n_result, host_num_buffer_, sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
const uint32_t num_keys = *host_num_buffer_;
if (num_keys != 0) {
OF_CUDA_CHECK(hipMemcpyAsync(keys, host_keys_buffer_, num_keys * key_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(values, host_values_buffer_, num_keys * value_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
}
}
void Reset() override { pos_ = store_->begin(); }
private:
HashMap<Key, std::string>* store_;
typename HashMap<Key, std::string>::iterator pos_;
uint32_t key_size_;
uint32_t value_size_;
uint32_t max_query_length_;
void* host_keys_buffer_;
void* host_values_buffer_;
uint32_t* host_num_buffer_;
};
template<typename Key>
class KeyValueStoreImpl : public KeyValueStore {
public:
OF_DISALLOW_COPY_AND_MOVE(KeyValueStoreImpl);
explicit KeyValueStoreImpl(const MockKeyValueStoreOptions& options)
: device_index_(-1), max_query_length_(0) {
OF_CUDA_CHECK(hipGetDevice(&device_index_));
key_size_ = options.key_size;
value_size_ = options.value_size;
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_query_values_),
value_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_, reinterpret_cast<void**>(&host_n_missing_),
sizeof(uint32_t)));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * max_query_length_));
}
~KeyValueStoreImpl() {
CudaCurrentDeviceGuard guard(device_index_);
if (max_query_length_ != 0) {
OF_CUDA_CHECK(hipHostFree(host_query_keys_));
OF_CUDA_CHECK(hipHostFree(host_query_values_));
OF_CUDA_CHECK(hipHostFree(host_missing_indices_));
}
OF_CUDA_CHECK(hipHostFree(host_n_missing_));
}
uint32_t KeySize() const override { return key_size_; }
uint32_t ValueSize() const override { return value_size_; }
uint32_t MaxQueryLength() const override { return max_query_length_; }
void ReserveQueryLength(uint32_t query_length) override {
CudaCurrentDeviceGuard guard(device_index_);
if (query_length <= max_query_length_) { return; }
if (max_query_length_ != 0) {
OF_CUDA_CHECK(hipHostFree(host_query_keys_));
OF_CUDA_CHECK(hipHostFree(host_query_values_));
OF_CUDA_CHECK(hipHostFree(host_missing_indices_));
}
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_values_), value_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * query_length));
max_query_length_ = query_length;
}
using KeyValueStore::Get;
void Get(ep::Stream* stream, uint32_t num_keys, const void* keys, void* values,
uint32_t* n_missing, uint32_t* missing_indices) override;
void Put(ep::Stream* stream, uint32_t num_keys, const void* keys, const void* values) override;
bool SnapshotExists(const std::string& name) override;
void LoadSnapshot(const std::string& name) override;
void LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) override;
void SaveSnapshot(const std::string& name) override;
private:
int device_index_;
uint32_t max_query_length_;
uint32_t key_size_;
uint32_t value_size_;
Key* host_query_keys_{};
uint8_t* host_query_values_{};
uint32_t* host_n_missing_{};
uint32_t* host_missing_indices_{};
HashMap<Key, std::string> store_;
HashMap<std::string, HashMap<Key, std::string>> snapshots_;
std::mutex mutex_;
};
template<typename Key>
void KeyValueStoreImpl<Key>::Get(ep::Stream* stream, uint32_t num_keys, const void* keys,
void* values, uint32_t* n_missing, uint32_t* missing_indices) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) {
OF_CUDA_CHECK(hipMemsetAsync(n_missing, 0, sizeof(uint32_t),
stream->As<ep::CudaStream>()->cuda_stream()));
return;
}
OF_CUDA_CHECK(hipMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, hipMemcpyDefault,
cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
*host_n_missing_ = 0;
for (uint32_t i = 0; i < num_keys; ++i) {
auto it = store_.find(host_query_keys_[i]);
if (it != store_.end()) {
std::memcpy(host_query_values_ + i * value_size_, it->second.data(), value_size_);
} else {
host_missing_indices_[*host_n_missing_] = i;
*host_n_missing_ += 1;
}
}
OF_CUDA_CHECK(hipMemcpyAsync(values, host_query_values_, num_keys * value_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(n_missing, host_n_missing_, sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(missing_indices, host_missing_indices_,
(*host_n_missing_) * sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
}
template<typename Key>
void KeyValueStoreImpl<Key>::Put(ep::Stream* stream, uint32_t num_keys, const void* keys,
const void* values) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) { return; }
OF_CUDA_CHECK(hipMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, hipMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(host_query_values_, values, value_size_ * num_keys,
hipMemcpyDefault, cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
for (uint32_t i = 0; i < num_keys; ++i) {
store_[host_query_keys_[i]] = std::string(
reinterpret_cast<const char*>(host_query_values_) + i * value_size_, value_size_);
}
}
template<typename Key>
bool KeyValueStoreImpl<Key>::SnapshotExists(const std::string& name) {
return snapshots_.find(name) != snapshots_.end();
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
LoadSnapshot(name, nullptr);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) {
CudaCurrentDeviceGuard guard(device_index_);
store_ = snapshots_[name];
if (Hook) {
IteratorImpl<Key> iterator(&store_, KeySize(), ValueSize(), max_query_length_, host_query_keys_,
host_query_values_, host_n_missing_);
Hook(&iterator);
}
}
template<typename Key>
void KeyValueStoreImpl<Key>::SaveSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
snapshots_[name] = store_;
}
} // namespace
std::unique_ptr<KeyValueStore> NewMockKeyValueStore(const MockKeyValueStoreOptions& options) {
if (options.key_size == sizeof(uint64_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint64_t>(options));
} else if (options.key_size == sizeof(uint32_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint32_t>(options));
} else {
UNIMPLEMENTED();
return nullptr;
}
}
} // namespace embedding
} // namespace oneflow
|
d7a42c4b855382a341b2420adea4736934c490ca.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/embedding/mock_key_value_store.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
namespace embedding {
namespace {
template<typename Key>
class IteratorImpl : public KVIterator {
public:
OF_DISALLOW_COPY_AND_MOVE(IteratorImpl);
IteratorImpl(HashMap<Key, std::string>* store, uint32_t key_size, uint32_t value_size,
uint32_t max_query_length, void* host_keys_buffer, void* host_values_buffer,
uint32_t* host_num_buffer)
: store_(store),
pos_(store->begin()),
key_size_(key_size),
value_size_(value_size),
max_query_length_(max_query_length),
host_keys_buffer_(host_keys_buffer),
host_values_buffer_(host_values_buffer),
host_num_buffer_(host_num_buffer) {}
~IteratorImpl() override = default;
void NextN(ep::Stream* stream, uint32_t n_request, uint32_t* n_result, void* keys,
void* values) override {
CHECK_LE(n_request, max_query_length_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_JUST(cuda_stream->Sync());
*host_num_buffer_ = 0;
while (*host_num_buffer_ < n_request && pos_ != store_->end()) {
reinterpret_cast<Key*>(host_keys_buffer_)[*host_num_buffer_] = pos_->first;
std::memcpy(reinterpret_cast<char*>(host_values_buffer_) + *host_num_buffer_ * value_size_,
pos_->second.data(), value_size_);
}
OF_CUDA_CHECK(cudaMemcpyAsync(n_result, host_num_buffer_, sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
const uint32_t num_keys = *host_num_buffer_;
if (num_keys != 0) {
OF_CUDA_CHECK(cudaMemcpyAsync(keys, host_keys_buffer_, num_keys * key_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(values, host_values_buffer_, num_keys * value_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
}
}
void Reset() override { pos_ = store_->begin(); }
private:
HashMap<Key, std::string>* store_;
typename HashMap<Key, std::string>::iterator pos_;
uint32_t key_size_;
uint32_t value_size_;
uint32_t max_query_length_;
void* host_keys_buffer_;
void* host_values_buffer_;
uint32_t* host_num_buffer_;
};
template<typename Key>
class KeyValueStoreImpl : public KeyValueStore {
public:
OF_DISALLOW_COPY_AND_MOVE(KeyValueStoreImpl);
explicit KeyValueStoreImpl(const MockKeyValueStoreOptions& options)
: device_index_(-1), max_query_length_(0) {
OF_CUDA_CHECK(cudaGetDevice(&device_index_));
key_size_ = options.key_size;
value_size_ = options.value_size;
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_query_values_),
value_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_, reinterpret_cast<void**>(&host_n_missing_),
sizeof(uint32_t)));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * max_query_length_));
}
~KeyValueStoreImpl() {
CudaCurrentDeviceGuard guard(device_index_);
if (max_query_length_ != 0) {
OF_CUDA_CHECK(cudaFreeHost(host_query_keys_));
OF_CUDA_CHECK(cudaFreeHost(host_query_values_));
OF_CUDA_CHECK(cudaFreeHost(host_missing_indices_));
}
OF_CUDA_CHECK(cudaFreeHost(host_n_missing_));
}
uint32_t KeySize() const override { return key_size_; }
uint32_t ValueSize() const override { return value_size_; }
uint32_t MaxQueryLength() const override { return max_query_length_; }
void ReserveQueryLength(uint32_t query_length) override {
CudaCurrentDeviceGuard guard(device_index_);
if (query_length <= max_query_length_) { return; }
if (max_query_length_ != 0) {
OF_CUDA_CHECK(cudaFreeHost(host_query_keys_));
OF_CUDA_CHECK(cudaFreeHost(host_query_values_));
OF_CUDA_CHECK(cudaFreeHost(host_missing_indices_));
}
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_values_), value_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * query_length));
max_query_length_ = query_length;
}
using KeyValueStore::Get;
void Get(ep::Stream* stream, uint32_t num_keys, const void* keys, void* values,
uint32_t* n_missing, uint32_t* missing_indices) override;
void Put(ep::Stream* stream, uint32_t num_keys, const void* keys, const void* values) override;
bool SnapshotExists(const std::string& name) override;
void LoadSnapshot(const std::string& name) override;
void LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) override;
void SaveSnapshot(const std::string& name) override;
private:
int device_index_;
uint32_t max_query_length_;
uint32_t key_size_;
uint32_t value_size_;
Key* host_query_keys_{};
uint8_t* host_query_values_{};
uint32_t* host_n_missing_{};
uint32_t* host_missing_indices_{};
HashMap<Key, std::string> store_;
HashMap<std::string, HashMap<Key, std::string>> snapshots_;
std::mutex mutex_;
};
template<typename Key>
void KeyValueStoreImpl<Key>::Get(ep::Stream* stream, uint32_t num_keys, const void* keys,
void* values, uint32_t* n_missing, uint32_t* missing_indices) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) {
OF_CUDA_CHECK(cudaMemsetAsync(n_missing, 0, sizeof(uint32_t),
stream->As<ep::CudaStream>()->cuda_stream()));
return;
}
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, cudaMemcpyDefault,
cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
*host_n_missing_ = 0;
for (uint32_t i = 0; i < num_keys; ++i) {
auto it = store_.find(host_query_keys_[i]);
if (it != store_.end()) {
std::memcpy(host_query_values_ + i * value_size_, it->second.data(), value_size_);
} else {
host_missing_indices_[*host_n_missing_] = i;
*host_n_missing_ += 1;
}
}
OF_CUDA_CHECK(cudaMemcpyAsync(values, host_query_values_, num_keys * value_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(n_missing, host_n_missing_, sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(missing_indices, host_missing_indices_,
(*host_n_missing_) * sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
}
template<typename Key>
void KeyValueStoreImpl<Key>::Put(ep::Stream* stream, uint32_t num_keys, const void* keys,
const void* values) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) { return; }
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, cudaMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_values_, values, value_size_ * num_keys,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
for (uint32_t i = 0; i < num_keys; ++i) {
store_[host_query_keys_[i]] = std::string(
reinterpret_cast<const char*>(host_query_values_) + i * value_size_, value_size_);
}
}
template<typename Key>
bool KeyValueStoreImpl<Key>::SnapshotExists(const std::string& name) {
return snapshots_.find(name) != snapshots_.end();
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
LoadSnapshot(name, nullptr);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) {
CudaCurrentDeviceGuard guard(device_index_);
store_ = snapshots_[name];
if (Hook) {
IteratorImpl<Key> iterator(&store_, KeySize(), ValueSize(), max_query_length_, host_query_keys_,
host_query_values_, host_n_missing_);
Hook(&iterator);
}
}
template<typename Key>
void KeyValueStoreImpl<Key>::SaveSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
snapshots_[name] = store_;
}
} // namespace
std::unique_ptr<KeyValueStore> NewMockKeyValueStore(const MockKeyValueStoreOptions& options) {
if (options.key_size == sizeof(uint64_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint64_t>(options));
} else if (options.key_size == sizeof(uint32_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint32_t>(options));
} else {
UNIMPLEMENTED();
return nullptr;
}
}
} // namespace embedding
} // namespace oneflow
|
7e39ab90812c6ac486f46916a78230572035c311.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "RngBenchmarker.h"
#include "RngBenchmarker_gpu.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "RngTest.h"
namespace vecrng {
void RngBenchmarker::RunCuda()
{
int nDevice;
bool cudaEnabled = false;
hipGetDeviceCount(&nDevice);
if(nDevice > 0) {
hipDeviceReset();
cudaEnabled = true;
}
else {
printf("Waning: No Cuda Capable Device ...\n");
}
//cuda event timing
hipEvent_t start;
hipEvent_t stop;
hipEventCreate (&start);
hipEventCreate (&stop);
//set the default number of threads and thread blocks - should be setable
//theNThreads should be a power of 2 (due to reduction operations on GPU)
int theNBlocks = 64;
int theNThreads = 256;
//1. MRG32k3a:
vecRng::MRG32k3a<vecRng::ScalarBackend> *mrg32k2a = new vecRng::MRG32k3a<vecRng::ScalarBackend>();
vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t* statesMRG32k3a_d = 0;
hipMalloc((void**)&statesMRG32k3a_d, theNBlocks*theNThreads*sizeof(vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t));
mrg32k2a->Initialize(statesMRG32k3a_d, theNBlocks*theNThreads);
//2. Threefry:
vecRng::Threefry<vecRng::ScalarBackend> *threefry = new vecRng::Threefry<vecRng::ScalarBackend>();
vecRng::Threefry<vecRng::ScalarBackend>::State_t* statesThreefry_d = 0;
hipMalloc((void**)&statesThreefry_d, theNBlocks*theNThreads*sizeof(vecRng::Threefry<vecRng::ScalarBackend>::State_t));
threefry->Initialize(statesThreefry_d, theNBlocks*theNThreads);
//Philox:
vecRng::Philox<vecRng::ScalarBackend> *philox = new vecRng::Philox<vecRng::ScalarBackend>();
vecRng::Philox<vecRng::ScalarBackend>::State_t* statesPhilox_d = 0;
hipMalloc((void**)&statesPhilox_d, theNBlocks*theNThreads*sizeof(vecRng::Philox<vecRng::ScalarBackend>::State_t));
philox->Initialize(statesPhilox_d, theNBlocks*theNThreads);
//4 curandStateMRG32k3a
curandStateMRG32k3a* devStatesMRG32k3a = 0;
hipMalloc(&devStatesMRG32k3a,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu(devStatesMRG32k3a, time(NULL), theNBlocks, theNThreads);
//4 curandStatePhilox
hiprandStatePhilox4_32_10_t* devStatesPhilox = 0;
hipMalloc(&devStatesPhilox,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu(devStatesPhilox, time(NULL), theNBlocks, theNThreads);
//return values for varification
double *result_h;
double *result_c;
double *result_d;
result_h = (double*) calloc(theNBlocks, sizeof(double));
result_c = (double*) calloc(theNBlocks, sizeof(double));
hipMalloc((void**)&result_d,theNBlocks*sizeof(double));
float meanEventTime[kNumberRng +2];
float sigmaEventTime[kNumberRng +2];
double rngEvent[kNumberRng +2];
float *trialEventTime = new float [fRepetition];;
for (int k = 0; k < kNumberRng + 2; ++k) {
meanEventTime[k] = 0.0;
sigmaEventTime[k] = 0.;
rngEvent[k] = 0.0;
float elapsedTotalTime = 0.;
for (unsigned r = 0; r < fRepetition; ++r) {
trialEventTime[r] = 0.0;
hipMemset(result_d,0,theNBlocks*theNThreads*sizeof(double));
if(cudaEnabled) {
hipEventRecord (start,0);
//call CUDA kernel
if(k == 0) {
CudaMRG32k3a(statesMRG32k3a_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 1) {
CudaThreefry(statesThreefry_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 2) {
CudaPhilox(statesPhilox_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 3) {
CurandMRG32k3a(devStatesMRG32k3a,result_d,fNSample,theNBlocks,theNThreads);
}
if(k == 4) {
CurandPhilox(devStatesPhilox,result_d,fNSample,theNBlocks,theNThreads);
}
hipEventRecord (stop,0);
hipEventSynchronize (stop);
hipEventElapsedTime (&trialEventTime[r],start,stop);
//copy the result for varification
hipMemcpy(result_h,result_d,theNBlocks*sizeof(double),hipMemcpyDeviceToHost);
for(int i = 0 ; i < theNBlocks ; ++i) {
rngEvent[k] += result_h[i];
}
elapsedTotalTime += trialEventTime[r]; //ms
}
}
meanEventTime[k] = elapsedTotalTime/fRepetition;
float variance = 0;
for (unsigned r = 0; r < fRepetition; ++r) {
float delta = (trialEventTime[r] - meanEventTime[k]);
variance += delta*delta;
}
sigmaEventTime[k] = sqrt(variance/fRepetition);
}
delete trialEventTime;
for (int k = 0; k < kNumberRng + 2; ++k) {
if(k < kNumberRng) {
printf(" %s CudaBackend Time = %6.4f +- %6.4f msec Sum = %g\n",
RngName[k], meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberRng) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandMRG32k3a", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberRng+1) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandPhilox ", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
}
//clean up: destory cuda event and free memory on device and host
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(statesMRG32k3a_d);
hipFree(statesThreefry_d);
hipFree(statesPhilox_d);
hipFree(devStatesMRG32k3a);
hipFree(devStatesPhilox);
hipFree(result_d);
free(result_h);
free(result_c);
delete mrg32k2a;
delete threefry;
delete philox;
}
} // end of vecrng namespace
|
7e39ab90812c6ac486f46916a78230572035c311.cu
|
#include "RngBenchmarker.h"
#include "RngBenchmarker_gpu.h"
#include <cuda.h>
#include <curand_kernel.h>
#include "RngTest.h"
namespace vecrng {
void RngBenchmarker::RunCuda()
{
int nDevice;
bool cudaEnabled = false;
cudaGetDeviceCount(&nDevice);
if(nDevice > 0) {
cudaDeviceReset();
cudaEnabled = true;
}
else {
printf("Waning: No Cuda Capable Device ...\n");
}
//cuda event timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
//set the default number of threads and thread blocks - should be setable
//theNThreads should be a power of 2 (due to reduction operations on GPU)
int theNBlocks = 64;
int theNThreads = 256;
//1. MRG32k3a:
vecRng::MRG32k3a<vecRng::ScalarBackend> *mrg32k2a = new vecRng::MRG32k3a<vecRng::ScalarBackend>();
vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t* statesMRG32k3a_d = 0;
cudaMalloc((void**)&statesMRG32k3a_d, theNBlocks*theNThreads*sizeof(vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t));
mrg32k2a->Initialize(statesMRG32k3a_d, theNBlocks*theNThreads);
//2. Threefry:
vecRng::Threefry<vecRng::ScalarBackend> *threefry = new vecRng::Threefry<vecRng::ScalarBackend>();
vecRng::Threefry<vecRng::ScalarBackend>::State_t* statesThreefry_d = 0;
cudaMalloc((void**)&statesThreefry_d, theNBlocks*theNThreads*sizeof(vecRng::Threefry<vecRng::ScalarBackend>::State_t));
threefry->Initialize(statesThreefry_d, theNBlocks*theNThreads);
//Philox:
vecRng::Philox<vecRng::ScalarBackend> *philox = new vecRng::Philox<vecRng::ScalarBackend>();
vecRng::Philox<vecRng::ScalarBackend>::State_t* statesPhilox_d = 0;
cudaMalloc((void**)&statesPhilox_d, theNBlocks*theNThreads*sizeof(vecRng::Philox<vecRng::ScalarBackend>::State_t));
philox->Initialize(statesPhilox_d, theNBlocks*theNThreads);
//4 curandStateMRG32k3a
curandStateMRG32k3a* devStatesMRG32k3a = 0;
cudaMalloc(&devStatesMRG32k3a,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu(devStatesMRG32k3a, time(NULL), theNBlocks, theNThreads);
//4 curandStatePhilox
curandStatePhilox4_32_10_t* devStatesPhilox = 0;
cudaMalloc(&devStatesPhilox,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu(devStatesPhilox, time(NULL), theNBlocks, theNThreads);
//return values for varification
double *result_h;
double *result_c;
double *result_d;
result_h = (double*) calloc(theNBlocks, sizeof(double));
result_c = (double*) calloc(theNBlocks, sizeof(double));
cudaMalloc((void**)&result_d,theNBlocks*sizeof(double));
float meanEventTime[kNumberRng +2];
float sigmaEventTime[kNumberRng +2];
double rngEvent[kNumberRng +2];
float *trialEventTime = new float [fRepetition];;
for (int k = 0; k < kNumberRng + 2; ++k) {
meanEventTime[k] = 0.0;
sigmaEventTime[k] = 0.;
rngEvent[k] = 0.0;
float elapsedTotalTime = 0.;
for (unsigned r = 0; r < fRepetition; ++r) {
trialEventTime[r] = 0.0;
cudaMemset(result_d,0,theNBlocks*theNThreads*sizeof(double));
if(cudaEnabled) {
cudaEventRecord (start,0);
//call CUDA kernel
if(k == 0) {
CudaMRG32k3a(statesMRG32k3a_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 1) {
CudaThreefry(statesThreefry_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 2) {
CudaPhilox(statesPhilox_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 3) {
CurandMRG32k3a(devStatesMRG32k3a,result_d,fNSample,theNBlocks,theNThreads);
}
if(k == 4) {
CurandPhilox(devStatesPhilox,result_d,fNSample,theNBlocks,theNThreads);
}
cudaEventRecord (stop,0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&trialEventTime[r],start,stop);
//copy the result for varification
cudaMemcpy(result_h,result_d,theNBlocks*sizeof(double),cudaMemcpyDeviceToHost);
for(int i = 0 ; i < theNBlocks ; ++i) {
rngEvent[k] += result_h[i];
}
elapsedTotalTime += trialEventTime[r]; //ms
}
}
meanEventTime[k] = elapsedTotalTime/fRepetition;
float variance = 0;
for (unsigned r = 0; r < fRepetition; ++r) {
float delta = (trialEventTime[r] - meanEventTime[k]);
variance += delta*delta;
}
sigmaEventTime[k] = sqrt(variance/fRepetition);
}
delete trialEventTime;
for (int k = 0; k < kNumberRng + 2; ++k) {
if(k < kNumberRng) {
printf(" %s CudaBackend Time = %6.4f +- %6.4f msec Sum = %g\n",
RngName[k], meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberRng) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandMRG32k3a", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberRng+1) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandPhilox ", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
}
//clean up: destory cuda event and free memory on device and host
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(statesMRG32k3a_d);
cudaFree(statesThreefry_d);
cudaFree(statesPhilox_d);
cudaFree(devStatesMRG32k3a);
cudaFree(devStatesPhilox);
cudaFree(result_d);
free(result_h);
free(result_c);
delete mrg32k2a;
delete threefry;
delete philox;
}
} // end of vecrng namespace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.