hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
a7ffd2d7f1102079dd3f8fe57bcef0ef7f061b7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaKernels.h"
#define NUM_THREADS 256
static __device__ bool cuda_strcmp(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*s1=='\0') return true;
}
return false;
}
static __device__ bool cuda_prefix(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*(s2+1)=='\0') return true;
}
return false;
}
static __device__ bool cuda_substr(char *s1, char *s2) {
int size1 = 0;
int size2 = 0;
while (s1[size1]!='\0') size1++;
while (s2[size2]!='\0') size2++;
if (size1==size2) return cuda_strcmp(s1, s2);
if (size1<size2) return false;
for (int i=0; i<size1-size2+1; i++) {
bool failed = false;
for (int j=0; j<size2; j++) {
if (s1[i+j-1]!=s2[j]) {
failed = true;
break;
}
}
if (! failed) return true;
}
return false;
}
static __global__ void cleanCounters(unsigned char *filtersCount, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
// initialize interfaces and filtersCount
if (pos<numInterfaces) interfaces[pos] = 0;
while(pos<numFilters) {
filtersCount[pos] = 0;
pos = pos + gridDim.x*blockDim.x;
}
}
static __global__ void evalConstraint(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces, int attributeIdx, CudaInputElem *constInput) {
int constraintsIndex = blockIdx.x*blockDim.x+threadIdx.x;
if (constraintsIndex>=constInput[attributeIdx].numConstraints) return;
CudaInputElem inputElem = constInput[attributeIdx];
CudaValue val = inputElem.value;
Op constrOp = inputElem.constrOp[constraintsIndex];
if (val.type==INT) {
IntCudaConstraint constrVal = ((IntCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && val.intVal!=constrVal.value) ||
(constrOp==LT && val.intVal>=constrVal.value) ||
(constrOp==GT && val.intVal<=constrVal.value) ||
(constrOp==DF && val.intVal==constrVal.value)) return;
} else {
StringCudaConstraint constrVal = ((StringCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && !cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==DF && cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==PF && !cuda_prefix(val.stringVal, constrVal.value)) ||
(constrOp==IN && !cuda_substr(val.stringVal, constrVal.value))) return;
}
int filterIndex = inputElem.filterIdx[constraintsIndex];
filtersCount[filterIndex]++;
}
static __global__ void summarize(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
while(pos<numFilters) {
if (filtersCount[pos]==filterInfo[pos].numConstraints) {
interfaces[filterInfo[pos].interface] = 1;
}
pos = pos + gridDim.x*blockDim.x;
}
}
CudaKernels::CudaKernels() {
numInterfaces = 0;
numFilters = 0;
consolidated = false;
hostToDeviceCopyTime = 0;
execTime = 0;
deviceToHostCopyTime = 0;
}
CudaKernels::~CudaKernels() {
if (consolidated) {
for(int d=0;d<ngpus;d++){
hipSetDevice(d);
for (map<string_t, void *>::iterator it=nameDeviceConstrVal[d].begin(); it!=nameDeviceConstrVal[d].end(); ++it) {
void *constrPtr = it->second;
hipFree(constrPtr);
//cout<<constrPtr<<"\nCudaKernel Destructor\n"<<endl;
// cout<<constrPtr<<"\nCudaKernel Destructor\n"<<endl;
}
for (map<string_t, Op *>::iterator it=nameDeviceConstrOp[d].begin(); it!=nameDeviceConstrOp[d].end(); ++it) {
Op *constrPtr = it->second;
hipFree(constrPtr);
}
for (map<string_t, int *>::iterator it=nameDeviceFilterIdx[d].begin(); it!=nameDeviceFilterIdx[d].end(); ++it) {
int *filterIdxPtr = it->second;
hipFree(filterIdxPtr);
}
hipHostFree(hostInput[d]);
hipFree(currentFiltersCount[d]);
hipFree(filtersInfo[d]);
hipFree(interfacesDevice[d]);
delete interfacesHost[d];
}
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
delete filter;
}
}
}
}
void CudaKernels::ifConfig(int interfaceId, set<CudaFilter *> &filters) {
// record the set of filters associated to this interface
hostFilters.insert(make_pair(interfaceId, filters));
// update the numConstraints and nameType data structures (to be used at consolidate time)
for (set<CudaFilter *>::iterator it=filters.begin(); it!=filters.end(); ++it) {
CudaFilter *filter = *it;
for (int i=0; i<filter->numConstraints; i++) {
string_t nameStr = filter->constraints[i].name;
map<string_t, int>::iterator it=numConstraints.find(nameStr);
if (it==numConstraints.end()) {
numConstraints.insert(make_pair(nameStr, 1));
} else {
it->second++;
}
map<string_t, Type>::iterator it1=nameType.find(nameStr);
if (it1==nameType.end()) {
nameType.insert(make_pair(nameStr, filter->constraints[i].value.type));
}
}
numFilters++;
}
}
void all_host_allocation_filters()
{
}
void CudaKernels::consolidate() {
/// host structures
map<string_t, int> currentNumConstraints;
map<string_t, void *> nameHostConstrVal;
map<string_t, Op *> nameHostConstrOp;
map<string_t, int *> nameHostFilterIdx;
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *hostConstrValPtr;
if(nameType[name]==INT) {
hostConstrValPtr = malloc(sizeof(IntCudaConstraint)*num);
} else {
hostConstrValPtr = malloc(sizeof(StringCudaConstraint)*num);
}
nameHostConstrVal.insert(make_pair(name, hostConstrValPtr));
Op* hostConstrOpPtr;
hostConstrOpPtr = (Op *)malloc(sizeof(Op)*num);
nameHostConstrOp.insert(make_pair(name, hostConstrOpPtr));
currentNumConstraints.insert(make_pair(name, 0));
int *hostFilterIdxPtr;
hostFilterIdxPtr = (int *)malloc(sizeof(int)*num);
nameHostFilterIdx.insert(make_pair(name, hostFilterIdxPtr));
}
/// initialize the nameHostConstrVal, nameHostConstrOp, nameHostFilterIdx, and hostFiltersInfo structures
///(to be copied into the corresponding structures in device later)
int filterId = 0;
FilterInfo *hostFiltersInfo = (FilterInfo *) malloc(sizeof(FilterInfo)*numFilters);
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
int interfaceId = it->first;
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
for (int i=0; i<filter->numConstraints; i++) {
string_t name = filter->constraints[i].name;
int writingIndex = currentNumConstraints[name];
currentNumConstraints[name] = writingIndex+1;
Op *hostConstrOpPtr = nameHostConstrOp[name];
hostConstrOpPtr[writingIndex] = filter->constraints[i].op;
if(nameType[name]==INT) {
IntCudaConstraint *hostConstrValPtr = (IntCudaConstraint *)nameHostConstrVal[name];
hostConstrValPtr[writingIndex].value = filter->constraints[i].value.intVal;
} else {
StringCudaConstraint *hostConstrValPtr = (StringCudaConstraint *)nameHostConstrVal[name];
memcpy(hostConstrValPtr[writingIndex].value, filter->constraints[i].value.stringVal, STRING_VAL_LEN);
}
int *hostFilterIdxPtr = nameHostFilterIdx[name];
hostFilterIdxPtr[writingIndex] = filterId;
}
hostFiltersInfo[filterId].numConstraints = filter->numConstraints;
hostFiltersInfo[filterId].interface = interfaceId;
filterId++;
}
}
/// device functions copy
hipGetDeviceCount(&ngpus);
//.ngpus=1;
nameDeviceConstrVal.resize(ngpus);
nameDeviceConstrOp.resize(ngpus);
nameDeviceFilterIdx.resize(ngpus) ;
// cudaStreams = = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);
hostInput = (CudaInputElem **)malloc(sizeof(CudaInputElem)*ngpus);
interfacesHost = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
interfacesDevice = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
currentFiltersCount = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
filtersInfo = (FilterInfo **)malloc(sizeof(FilterInfo *)*ngpus);
constInput = (CudaInputElem **)malloc(sizeof(CudaInputElem * )*ngpus);
cout<<"No. of Cuda Devices "<<ngpus<<endl;
/// multiple devices
int e = 0;
int allocSize = 0;
for(int device = 0 ; device < ngpus ; device++){
hipSetDevice(device);
//static __constant__ constInput[i]=[MAX_ATTR_NUM];
e+=hipMalloc((void**)&constInput[device] , (size_t)sizeof(CudaInputElem)*MAX_ATTR_NUM);
// hipStreamCreate(&cudaStreams[i]);///not needed
/// host input data structures... to be copied to Gpu
e += hipHostMalloc((void**) &hostInput[device], (size_t) sizeof(CudaInputElem)*MAX_ATTR_NUM);
///interface array on host like pinned memory
interfacesHost[device] = (unsigned char *) malloc( (size_t) sizeof(unsigned char)*numInterfaces);
// allocate memory on device and host
numInterfaces = hostFilters.size();
allocSize += sizeof(CudaInputElem)*MAX_ATTR_NUM; // allocated into constant memory (see static variable at the beginning of file)
e += hipMalloc((void**) &interfacesDevice[device], (size_t) sizeof(unsigned char)*numInterfaces);
allocSize += sizeof(unsigned char)*numInterfaces;
/// allocation for host and device data structuers . host datastructure pinned memory to copy
/// map stores pointers to addresses
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *constrValPtr;
if(nameType[name]==INT) {
e += hipMalloc((void**) &constrValPtr, (size_t) sizeof(IntCudaConstraint)*num);
allocSize += sizeof(IntCudaConstraint)*num;
} else {
e += hipMalloc((void**) &constrValPtr, (size_t) sizeof(StringCudaConstraint)*num);
allocSize += sizeof(StringCudaConstraint)*num;
}
nameDeviceConstrVal[device].insert(make_pair(name, constrValPtr));
Op *constrOpPtr;
e+= hipMalloc((void**) &constrOpPtr, (size_t) sizeof(Op)*num);
allocSize += sizeof(Op)*num;
nameDeviceConstrOp[device].insert(make_pair(name, constrOpPtr));
int *filterIdxPtr;
e+= hipMalloc((void**) &filterIdxPtr, (size_t) sizeof(int)*num);
allocSize += sizeof(int)*num;
nameDeviceFilterIdx[device].insert(make_pair(name, filterIdxPtr));
}
e += hipMalloc((void**) ¤tFiltersCount[device], (size_t) sizeof(unsigned char)*numFilters);
allocSize += sizeof(unsigned char)*numFilters;
e += hipMalloc((void**) &filtersInfo[device], (size_t) sizeof(FilterInfo)*numFilters);
allocSize += sizeof(FilterInfo)*numFilters;
if (e>0) {
cerr << " Allocation error " << e << endl;
exit(1);
}
}
for(int device=0; device < ngpus ; device ++){
hipSetDevice(device);
// hipStreamCreate(&cudaStreams[i]);///not needed
/// initialize the device memory
void *host;
for (map<string_t, void *>::iterator it=nameHostConstrVal.begin(); it!=nameHostConstrVal.end(); ++it) {
string_t name = it->first;
host = it->second;
void *device_add = nameDeviceConstrVal[device][name];
int size = numConstraints[name];
if(nameType[name]==INT) {
e += hipMemcpyAsync(device_add, host, sizeof(IntCudaConstraint)*size, hipMemcpyHostToDevice);
} else {
e += hipMemcpyAsync(device_add, host, sizeof(StringCudaConstraint)*size, hipMemcpyHostToDevice);
}
//hipDeviceSynchronize();
//
}
//free(host);
Op *host1;
for (map<string_t, Op *>::iterator it=nameHostConstrOp.begin(); it!=nameHostConstrOp.end(); ++it) {
string_t name = it->first;
host1 = it->second;
Op *device_add = nameDeviceConstrOp[device][name];
int size = numConstraints[name];
e += hipMemcpyAsync(device_add, host1, sizeof(Op)*size, hipMemcpyHostToDevice);
//hipDeviceSynchronize();
}
//free(host1);
int *host2;
for (map<string_t, int *>::iterator it=nameHostFilterIdx.begin(); it!=nameHostFilterIdx.end(); ++it) {
string_t name = it->first;
host2 = it->second;
int *device_add = nameDeviceFilterIdx[device][name];
int size = numConstraints[name];
e += hipMemcpyAsync(device_add, host2, sizeof(int)*size, hipMemcpyHostToDevice);
//hipDeviceSynchronize();
}
// free(host2);
e += hipMemcpyAsync(filtersInfo[device], hostFiltersInfo, (size_t) sizeof(FilterInfo)*numFilters, hipMemcpyHostToDevice);
hipMemsetAsync(currentFiltersCount[device], 0, (size_t) sizeof(unsigned char)*numFilters);
hipMemsetAsync(interfacesDevice[device], 0, (size_t) sizeof(unsigned char)*numInterfaces);
hipDeviceSynchronize();
consolidated = true;
if (e>0) {
cerr << " Memcpy error " << e << " during consolidation " << endl;
exit(1);
}
// set up the runtime to optimize performance
//hipFuncSetCacheConfig(evalConstraint, hipFuncCachePreferL1);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
}
for(int device=0;device<ngpus;device++){
hipSetDevice(device);
hipDeviceSynchronize();
}
int totConstr=0;
for(map<string_t,int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
totConstr+=it->second;
}
cout << endl << " ### " << totConstr << " constraints allocated ### " << endl;
cout << endl << " ### " << allocSize << " bytes allocated on device ### " << endl;
cout << endl << "#####################" << endl;
free(hostFiltersInfo);
}
int CudaKernels::getGpuCount(){
return ngpus;
}
void CudaKernels::getStats(double &hToD, double &exec, double &dToH) {
hToD = hostToDeviceCopyTime;
exec = execTime;
dToH = deviceToHostCopyTime;
}
#if STATS==1
void CudaKernels::processMessage(CudaOutbox *outbox,int dev_no) {
Timer t;
t.start();
dev_no%=ngpus;
hipSetDevice(dev_no); /// cuda set device
int maxConstr = copyMsgToDevice(outbox->message,dev_no);
//hipDeviceSynchronize(); // TODO: remove
hostToDeviceCopyTime += t.stop();
if (maxConstr>0) {
t.start();
for(int i=0; i<numValues; i++) {
hipLaunchKernelGGL(( evalConstraint), dim3(hostInput[dev_no][i].numConstraints/NUM_THREADS+1), dim3(NUM_THREADS), 0, 0, currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces, i,constInput[dev_no]);
}
hipLaunchKernelGGL(( summarize), dim3(numFilters/2048), dim3(NUM_THREADS), 0, 0, currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces);
// computeResults(maxConstr,dev_no);
//hipDeviceSynchronize(); // TODO: remove
execTime += t.stop();
//t.start();
//getMatchingInterfaces(outbox->outgoingInterfaces,dev_no);
//hipDeviceSynchronize(); // TODO: remove
//deviceToHostCopyTime += t.stop();
}
}
#elif STATS==0
void CudaKernels::processMessage(CudaOutbox *outbox) {
int maxConstr = copyMsgToDevice(outbox->message);
if (maxConstr>0) {
computeResults(maxConstr);
getMatchingInterfaces(outbox->outgoingInterfaces);
}
}
#endif
int CudaKernels::copyMsgToDevice(CudaMessage *message,int dev_no) {
int dest = 0;
int maxConstr = 0;
for (int i=0; i<message->numAttributes; i++) {
string_t name = message->attributes[i].name;
map<string_t, void *>::iterator it = nameDeviceConstrVal[dev_no].find(name);
if(it==nameDeviceConstrVal[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].constrVal = it->second;
map<string_t, Op *>::iterator it1 = nameDeviceConstrOp[dev_no].find(name);
if(it1==nameDeviceConstrOp[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].constrOp = it1->second;
map<string_t, int *>::iterator it2 = nameDeviceFilterIdx[dev_no].find(name);
if(it2==nameDeviceFilterIdx[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].filterIdx = it2->second;
hostInput[dev_no][dest].numConstraints = numConstraints[name];
if (hostInput[dev_no][dest].numConstraints>maxConstr) maxConstr = hostInput[dev_no][dest].numConstraints;
hostInput[dev_no][dest].value = message->attributes[i].value;
dest++;
}
numValues = dest;
if (dest>0) {
int e = 0;
e += hipMemcpyAsync(constInput[dev_no], hostInput[dev_no], (size_t) sizeof(CudaInputElem)*numValues,hipMemcpyHostToDevice);
if (e>0) {
cerr << " Memcpy error " << e << " during message processing " << endl;
exit(1);
}
}
return maxConstr;
}
void CudaKernels::computeResults(int maxConstr,int dev_no) {
//int numBlocksX = 1+maxConstr/NUM_THREADS;
//dim3 numBlocks = dim3(numBlocksX);
for(int i=0; i<numValues; i++) {
hipLaunchKernelGGL(( evalConstraint), dim3(hostInput[dev_no][i].numConstraints/NUM_THREADS+1), dim3(NUM_THREADS), 0, 0, currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces, i,constInput[dev_no]);
}
hipLaunchKernelGGL(( summarize), dim3(numFilters/2048), dim3(NUM_THREADS), 0, 0, currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces);
}
void CudaKernels::getMatchingInterfaces(set<int> &results,int dev_no) {
hipSetDevice(dev_no);
Timer t;
t.start();
int e = hipMemcpyAsync(interfacesHost[dev_no], interfacesDevice[dev_no], (size_t) sizeof(unsigned char)*numInterfaces, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
deviceToHostCopyTime += t.stop();
hipMemsetAsync(currentFiltersCount[dev_no], 0, (size_t) sizeof(unsigned char)*numFilters);
hipMemsetAsync(interfacesDevice[dev_no], 0, (size_t) sizeof(unsigned char)*numInterfaces);
//hipDeviceReset();
//cleanCounters<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount, interfacesDevice, numFilters, numInterfaces);
if (e>0) {
cerr << " Memcpy error " << e << " while copying matching interfaces " << endl;
exit(1);
}
/// clear the previous set ;
results.clear();
for (int i=0; i<numInterfaces; i++) {
if (interfacesHost[dev_no][i]!=0) {
results.insert(i);
}
}
}
|
a7ffd2d7f1102079dd3f8fe57bcef0ef7f061b7b.cu
|
#include "CudaKernels.h"
#define NUM_THREADS 256
static __device__ bool cuda_strcmp(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*s1=='\0') return true;
}
return false;
}
static __device__ bool cuda_prefix(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*(s2+1)=='\0') return true;
}
return false;
}
static __device__ bool cuda_substr(char *s1, char *s2) {
int size1 = 0;
int size2 = 0;
while (s1[size1]!='\0') size1++;
while (s2[size2]!='\0') size2++;
if (size1==size2) return cuda_strcmp(s1, s2);
if (size1<size2) return false;
for (int i=0; i<size1-size2+1; i++) {
bool failed = false;
for (int j=0; j<size2; j++) {
if (s1[i+j-1]!=s2[j]) {
failed = true;
break;
}
}
if (! failed) return true;
}
return false;
}
static __global__ void cleanCounters(unsigned char *filtersCount, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
// initialize interfaces and filtersCount
if (pos<numInterfaces) interfaces[pos] = 0;
while(pos<numFilters) {
filtersCount[pos] = 0;
pos = pos + gridDim.x*blockDim.x;
}
}
static __global__ void evalConstraint(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces, int attributeIdx, CudaInputElem *constInput) {
int constraintsIndex = blockIdx.x*blockDim.x+threadIdx.x;
if (constraintsIndex>=constInput[attributeIdx].numConstraints) return;
CudaInputElem inputElem = constInput[attributeIdx];
CudaValue val = inputElem.value;
Op constrOp = inputElem.constrOp[constraintsIndex];
if (val.type==INT) {
IntCudaConstraint constrVal = ((IntCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && val.intVal!=constrVal.value) ||
(constrOp==LT && val.intVal>=constrVal.value) ||
(constrOp==GT && val.intVal<=constrVal.value) ||
(constrOp==DF && val.intVal==constrVal.value)) return;
} else {
StringCudaConstraint constrVal = ((StringCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && !cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==DF && cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==PF && !cuda_prefix(val.stringVal, constrVal.value)) ||
(constrOp==IN && !cuda_substr(val.stringVal, constrVal.value))) return;
}
int filterIndex = inputElem.filterIdx[constraintsIndex];
filtersCount[filterIndex]++;
}
static __global__ void summarize(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
while(pos<numFilters) {
if (filtersCount[pos]==filterInfo[pos].numConstraints) {
interfaces[filterInfo[pos].interface] = 1;
}
pos = pos + gridDim.x*blockDim.x;
}
}
CudaKernels::CudaKernels() {
numInterfaces = 0;
numFilters = 0;
consolidated = false;
hostToDeviceCopyTime = 0;
execTime = 0;
deviceToHostCopyTime = 0;
}
CudaKernels::~CudaKernels() {
if (consolidated) {
for(int d=0;d<ngpus;d++){
cudaSetDevice(d);
for (map<string_t, void *>::iterator it=nameDeviceConstrVal[d].begin(); it!=nameDeviceConstrVal[d].end(); ++it) {
void *constrPtr = it->second;
cudaFree(constrPtr);
//cout<<constrPtr<<"\nCudaKernel Destructor\n"<<endl;
// cout<<constrPtr<<"\nCudaKernel Destructor\n"<<endl;
}
for (map<string_t, Op *>::iterator it=nameDeviceConstrOp[d].begin(); it!=nameDeviceConstrOp[d].end(); ++it) {
Op *constrPtr = it->second;
cudaFree(constrPtr);
}
for (map<string_t, int *>::iterator it=nameDeviceFilterIdx[d].begin(); it!=nameDeviceFilterIdx[d].end(); ++it) {
int *filterIdxPtr = it->second;
cudaFree(filterIdxPtr);
}
cudaFreeHost(hostInput[d]);
cudaFree(currentFiltersCount[d]);
cudaFree(filtersInfo[d]);
cudaFree(interfacesDevice[d]);
delete interfacesHost[d];
}
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
delete filter;
}
}
}
}
void CudaKernels::ifConfig(int interfaceId, set<CudaFilter *> &filters) {
// record the set of filters associated to this interface
hostFilters.insert(make_pair(interfaceId, filters));
// update the numConstraints and nameType data structures (to be used at consolidate time)
for (set<CudaFilter *>::iterator it=filters.begin(); it!=filters.end(); ++it) {
CudaFilter *filter = *it;
for (int i=0; i<filter->numConstraints; i++) {
string_t nameStr = filter->constraints[i].name;
map<string_t, int>::iterator it=numConstraints.find(nameStr);
if (it==numConstraints.end()) {
numConstraints.insert(make_pair(nameStr, 1));
} else {
it->second++;
}
map<string_t, Type>::iterator it1=nameType.find(nameStr);
if (it1==nameType.end()) {
nameType.insert(make_pair(nameStr, filter->constraints[i].value.type));
}
}
numFilters++;
}
}
void all_host_allocation_filters()
{
}
void CudaKernels::consolidate() {
/// host structures
map<string_t, int> currentNumConstraints;
map<string_t, void *> nameHostConstrVal;
map<string_t, Op *> nameHostConstrOp;
map<string_t, int *> nameHostFilterIdx;
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *hostConstrValPtr;
if(nameType[name]==INT) {
hostConstrValPtr = malloc(sizeof(IntCudaConstraint)*num);
} else {
hostConstrValPtr = malloc(sizeof(StringCudaConstraint)*num);
}
nameHostConstrVal.insert(make_pair(name, hostConstrValPtr));
Op* hostConstrOpPtr;
hostConstrOpPtr = (Op *)malloc(sizeof(Op)*num);
nameHostConstrOp.insert(make_pair(name, hostConstrOpPtr));
currentNumConstraints.insert(make_pair(name, 0));
int *hostFilterIdxPtr;
hostFilterIdxPtr = (int *)malloc(sizeof(int)*num);
nameHostFilterIdx.insert(make_pair(name, hostFilterIdxPtr));
}
/// initialize the nameHostConstrVal, nameHostConstrOp, nameHostFilterIdx, and hostFiltersInfo structures
///(to be copied into the corresponding structures in device later)
int filterId = 0;
FilterInfo *hostFiltersInfo = (FilterInfo *) malloc(sizeof(FilterInfo)*numFilters);
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
int interfaceId = it->first;
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
for (int i=0; i<filter->numConstraints; i++) {
string_t name = filter->constraints[i].name;
int writingIndex = currentNumConstraints[name];
currentNumConstraints[name] = writingIndex+1;
Op *hostConstrOpPtr = nameHostConstrOp[name];
hostConstrOpPtr[writingIndex] = filter->constraints[i].op;
if(nameType[name]==INT) {
IntCudaConstraint *hostConstrValPtr = (IntCudaConstraint *)nameHostConstrVal[name];
hostConstrValPtr[writingIndex].value = filter->constraints[i].value.intVal;
} else {
StringCudaConstraint *hostConstrValPtr = (StringCudaConstraint *)nameHostConstrVal[name];
memcpy(hostConstrValPtr[writingIndex].value, filter->constraints[i].value.stringVal, STRING_VAL_LEN);
}
int *hostFilterIdxPtr = nameHostFilterIdx[name];
hostFilterIdxPtr[writingIndex] = filterId;
}
hostFiltersInfo[filterId].numConstraints = filter->numConstraints;
hostFiltersInfo[filterId].interface = interfaceId;
filterId++;
}
}
/// device functions copy
cudaGetDeviceCount(&ngpus);
//.ngpus=1;
nameDeviceConstrVal.resize(ngpus);
nameDeviceConstrOp.resize(ngpus);
nameDeviceFilterIdx.resize(ngpus) ;
// cudaStreams = = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);
hostInput = (CudaInputElem **)malloc(sizeof(CudaInputElem)*ngpus);
interfacesHost = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
interfacesDevice = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
currentFiltersCount = (unsigned char **)malloc(sizeof(unsigned char *)*ngpus);
filtersInfo = (FilterInfo **)malloc(sizeof(FilterInfo *)*ngpus);
constInput = (CudaInputElem **)malloc(sizeof(CudaInputElem * )*ngpus);
cout<<"No. of Cuda Devices "<<ngpus<<endl;
/// multiple devices
int e = 0;
int allocSize = 0;
for(int device = 0 ; device < ngpus ; device++){
cudaSetDevice(device);
//static __constant__ constInput[i]=[MAX_ATTR_NUM];
e+=cudaMalloc((void**)&constInput[device] , (size_t)sizeof(CudaInputElem)*MAX_ATTR_NUM);
// cudaStreamCreate(&cudaStreams[i]);///not needed
/// host input data structures... to be copied to Gpu
e += cudaMallocHost((void**) &hostInput[device], (size_t) sizeof(CudaInputElem)*MAX_ATTR_NUM);
///interface array on host like pinned memory
interfacesHost[device] = (unsigned char *) malloc( (size_t) sizeof(unsigned char)*numInterfaces);
// allocate memory on device and host
numInterfaces = hostFilters.size();
allocSize += sizeof(CudaInputElem)*MAX_ATTR_NUM; // allocated into constant memory (see static variable at the beginning of file)
e += cudaMalloc((void**) &interfacesDevice[device], (size_t) sizeof(unsigned char)*numInterfaces);
allocSize += sizeof(unsigned char)*numInterfaces;
/// allocation for host and device data structuers . host datastructure pinned memory to copy
/// map stores pointers to addresses
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *constrValPtr;
if(nameType[name]==INT) {
e += cudaMalloc((void**) &constrValPtr, (size_t) sizeof(IntCudaConstraint)*num);
allocSize += sizeof(IntCudaConstraint)*num;
} else {
e += cudaMalloc((void**) &constrValPtr, (size_t) sizeof(StringCudaConstraint)*num);
allocSize += sizeof(StringCudaConstraint)*num;
}
nameDeviceConstrVal[device].insert(make_pair(name, constrValPtr));
Op *constrOpPtr;
e+= cudaMalloc((void**) &constrOpPtr, (size_t) sizeof(Op)*num);
allocSize += sizeof(Op)*num;
nameDeviceConstrOp[device].insert(make_pair(name, constrOpPtr));
int *filterIdxPtr;
e+= cudaMalloc((void**) &filterIdxPtr, (size_t) sizeof(int)*num);
allocSize += sizeof(int)*num;
nameDeviceFilterIdx[device].insert(make_pair(name, filterIdxPtr));
}
e += cudaMalloc((void**) ¤tFiltersCount[device], (size_t) sizeof(unsigned char)*numFilters);
allocSize += sizeof(unsigned char)*numFilters;
e += cudaMalloc((void**) &filtersInfo[device], (size_t) sizeof(FilterInfo)*numFilters);
allocSize += sizeof(FilterInfo)*numFilters;
if (e>0) {
cerr << " Allocation error " << e << endl;
exit(1);
}
}
for(int device=0; device < ngpus ; device ++){
cudaSetDevice(device);
// cudaStreamCreate(&cudaStreams[i]);///not needed
/// initialize the device memory
void *host;
for (map<string_t, void *>::iterator it=nameHostConstrVal.begin(); it!=nameHostConstrVal.end(); ++it) {
string_t name = it->first;
host = it->second;
void *device_add = nameDeviceConstrVal[device][name];
int size = numConstraints[name];
if(nameType[name]==INT) {
e += cudaMemcpyAsync(device_add, host, sizeof(IntCudaConstraint)*size, cudaMemcpyHostToDevice);
} else {
e += cudaMemcpyAsync(device_add, host, sizeof(StringCudaConstraint)*size, cudaMemcpyHostToDevice);
}
//cudaDeviceSynchronize();
//
}
//free(host);
Op *host1;
for (map<string_t, Op *>::iterator it=nameHostConstrOp.begin(); it!=nameHostConstrOp.end(); ++it) {
string_t name = it->first;
host1 = it->second;
Op *device_add = nameDeviceConstrOp[device][name];
int size = numConstraints[name];
e += cudaMemcpyAsync(device_add, host1, sizeof(Op)*size, cudaMemcpyHostToDevice);
//cudaDeviceSynchronize();
}
//free(host1);
int *host2;
for (map<string_t, int *>::iterator it=nameHostFilterIdx.begin(); it!=nameHostFilterIdx.end(); ++it) {
string_t name = it->first;
host2 = it->second;
int *device_add = nameDeviceFilterIdx[device][name];
int size = numConstraints[name];
e += cudaMemcpyAsync(device_add, host2, sizeof(int)*size, cudaMemcpyHostToDevice);
//cudaDeviceSynchronize();
}
// free(host2);
e += cudaMemcpyAsync(filtersInfo[device], hostFiltersInfo, (size_t) sizeof(FilterInfo)*numFilters, cudaMemcpyHostToDevice);
cudaMemsetAsync(currentFiltersCount[device], 0, (size_t) sizeof(unsigned char)*numFilters);
cudaMemsetAsync(interfacesDevice[device], 0, (size_t) sizeof(unsigned char)*numInterfaces);
cudaDeviceSynchronize();
consolidated = true;
if (e>0) {
cerr << " Memcpy error " << e << " during consolidation " << endl;
exit(1);
}
// set up the runtime to optimize performance
//cudaFuncSetCacheConfig(evalConstraint, cudaFuncCachePreferL1);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
}
for(int device=0;device<ngpus;device++){
cudaSetDevice(device);
cudaDeviceSynchronize();
}
int totConstr=0;
for(map<string_t,int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
totConstr+=it->second;
}
cout << endl << " ### " << totConstr << " constraints allocated ### " << endl;
cout << endl << " ### " << allocSize << " bytes allocated on device ### " << endl;
cout << endl << "#####################" << endl;
free(hostFiltersInfo);
}
int CudaKernels::getGpuCount(){
return ngpus;
}
void CudaKernels::getStats(double &hToD, double &exec, double &dToH) {
hToD = hostToDeviceCopyTime;
exec = execTime;
dToH = deviceToHostCopyTime;
}
#if STATS==1
void CudaKernels::processMessage(CudaOutbox *outbox,int dev_no) {
Timer t;
t.start();
dev_no%=ngpus;
cudaSetDevice(dev_no); /// cuda set device
int maxConstr = copyMsgToDevice(outbox->message,dev_no);
//cudaDeviceSynchronize(); // TODO: remove
hostToDeviceCopyTime += t.stop();
if (maxConstr>0) {
t.start();
for(int i=0; i<numValues; i++) {
evalConstraint<<<hostInput[dev_no][i].numConstraints/NUM_THREADS+1, NUM_THREADS>>>(currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces, i,constInput[dev_no]);
}
summarize<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces);
// computeResults(maxConstr,dev_no);
//cudaDeviceSynchronize(); // TODO: remove
execTime += t.stop();
//t.start();
//getMatchingInterfaces(outbox->outgoingInterfaces,dev_no);
//cudaDeviceSynchronize(); // TODO: remove
//deviceToHostCopyTime += t.stop();
}
}
#elif STATS==0
void CudaKernels::processMessage(CudaOutbox *outbox) {
int maxConstr = copyMsgToDevice(outbox->message);
if (maxConstr>0) {
computeResults(maxConstr);
getMatchingInterfaces(outbox->outgoingInterfaces);
}
}
#endif
int CudaKernels::copyMsgToDevice(CudaMessage *message,int dev_no) {
int dest = 0;
int maxConstr = 0;
for (int i=0; i<message->numAttributes; i++) {
string_t name = message->attributes[i].name;
map<string_t, void *>::iterator it = nameDeviceConstrVal[dev_no].find(name);
if(it==nameDeviceConstrVal[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].constrVal = it->second;
map<string_t, Op *>::iterator it1 = nameDeviceConstrOp[dev_no].find(name);
if(it1==nameDeviceConstrOp[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].constrOp = it1->second;
map<string_t, int *>::iterator it2 = nameDeviceFilterIdx[dev_no].find(name);
if(it2==nameDeviceFilterIdx[dev_no].end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dev_no][dest].filterIdx = it2->second;
hostInput[dev_no][dest].numConstraints = numConstraints[name];
if (hostInput[dev_no][dest].numConstraints>maxConstr) maxConstr = hostInput[dev_no][dest].numConstraints;
hostInput[dev_no][dest].value = message->attributes[i].value;
dest++;
}
numValues = dest;
if (dest>0) {
int e = 0;
e += cudaMemcpyAsync(constInput[dev_no], hostInput[dev_no], (size_t) sizeof(CudaInputElem)*numValues,cudaMemcpyHostToDevice);
if (e>0) {
cerr << " Memcpy error " << e << " during message processing " << endl;
exit(1);
}
}
return maxConstr;
}
void CudaKernels::computeResults(int maxConstr,int dev_no) {
//int numBlocksX = 1+maxConstr/NUM_THREADS;
//dim3 numBlocks = dim3(numBlocksX);
for(int i=0; i<numValues; i++) {
evalConstraint<<<hostInput[dev_no][i].numConstraints/NUM_THREADS+1, NUM_THREADS>>>(currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces, i,constInput[dev_no]);
}
summarize<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount[dev_no], filtersInfo[dev_no], interfacesDevice[dev_no], numFilters, numInterfaces);
}
void CudaKernels::getMatchingInterfaces(set<int> &results,int dev_no) {
cudaSetDevice(dev_no);
Timer t;
t.start();
int e = cudaMemcpyAsync(interfacesHost[dev_no], interfacesDevice[dev_no], (size_t) sizeof(unsigned char)*numInterfaces, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
deviceToHostCopyTime += t.stop();
cudaMemsetAsync(currentFiltersCount[dev_no], 0, (size_t) sizeof(unsigned char)*numFilters);
cudaMemsetAsync(interfacesDevice[dev_no], 0, (size_t) sizeof(unsigned char)*numInterfaces);
//cudaDeviceReset();
//cleanCounters<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount, interfacesDevice, numFilters, numInterfaces);
if (e>0) {
cerr << " Memcpy error " << e << " while copying matching interfaces " << endl;
exit(1);
}
/// clear the previous set ;
results.clear();
for (int i=0; i<numInterfaces; i++) {
if (interfacesHost[dev_no][i]!=0) {
results.insert(i);
}
}
}
|
52423209c1869d000cbe2192669aaa0ceeb0b64d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019, Aman Gupta, ENG EC 527, Prof. Martin Herbordt */
/******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include "matrix_hip.cuh"
#include "nn_layer.cuh"
#include "cuda_utils.cuh"
#include "../nn_param.cuh"
/******************************************************************************/
/* Neural Network layer initialization and deletion */
/******************************************************************************/
data_t rand_weight() {
return ((data_t)rand())/((data_t)RAND_MAX);
}
void weight_init(Matrix * W) {
int row, col;
for (row = 1; row <= W->rows; row++) {
for (col = 1; col <= W->cols; col++) {
ELEMENT(W, row, col) = rand_weight();
}
}
// copy host W to device
copy_matrix_H2D(W);
}
nnlayer * nnl_init(int l, int Wx, int Wy, char f) {
nnlayer * nnl = (nnlayer*)malloc(sizeof(nnlayer));
if (!nnl) { printf("Unabble to initialize nn layer\n"); return NULL; }
nnl->l = l;
nnl->A = matrix_init(BATCH_SIZE, Wx);
nnl->W = matrix_init(Wx, Wy);
nnl->b = matrix_init(Wy, 1);
nnl->Z = matrix_init(BATCH_SIZE, Wy);
nnl->dA = matrix_init(BATCH_SIZE, Wx);
nnl->dZ = matrix_init(BATCH_SIZE, Wy);
matrix_allocate(nnl->A);
matrix_allocate(nnl->W); weight_init(nnl->W); // initialize random weights
matrix_allocate(nnl->b);
matrix_allocate(nnl->Z);
matrix_allocate(nnl->dA);
matrix_allocate(nnl->dZ);
nnl->f = f;
return nnl;
}
int nnl_free(nnlayer * nnl) {
if (!nnl) { printf("Unabble to initialize nn layer\n"); return -1; }
int freea, freew, freeb, freez, freeda, freedz;
freea = freew = freeb = freez = freeda = freedz = -1;
if (nnl->A) freea = matrix_free(nnl->A);
if (nnl->W) freew = matrix_free(nnl->W);
if (nnl->b) freeb = matrix_free(nnl->b);
if (nnl->Z) freez = matrix_free(nnl->Z);
if (nnl->dA) freeda = matrix_free(nnl->dA);
if (nnl->dZ) freedz = matrix_free(nnl->dZ);
// printf("A: %d, W: %d, b: %d, Z: %d, dA: %d, dZ: %d\n",
// freea, freew, freeb, freez, freeda, freedz);
if (freea || freew || freeb || freez || freeda || freedz) return -1;
free(nnl);
return 0;
}
/******************************************************************************/
/* Linear Layers */
/******************************************************************************/
/* Forward Pass */
__global__
void FFNNFP_global(data_t *Z, data_t *W, data_t *A, data_t *b, int Wx, int Wy,
int Ax, int Ay) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockIdx.x + threadIdx.x;
int Zx = Ax;
int Zy = Wy;
data_t val = (data_t)0;
int k;
if (row < Zy && col < Zx) {
for (k = 0; k < Wx; k++) {
val += W[row*Wx+k] * A[k*Ax+col];
}
Z[row*Zx+col] = val + b[row];
}
}
/* Forward pass call from host */
Matrix * nnl_forward_pass_global(nnlayer * nnl, Matrix *A) {
assert(nnl->W->rows == A->cols); nnl->A = A;
// call forward pass kernel
dim3 block_W(BLOCK_SIZE_W, BLOCK_SIZE_W);
dim3 grid_W((nnl->Z->rows+block_W.x-1)/block_W.x,
(nnl->Z->cols+block_W.y-1)/block_W.y);
hipLaunchKernelGGL(( FFNNFP_global), dim3(grid_W), dim3(block_W), 0, 0, nnl->Z->data_d,
nnl->W->data_d,
nnl->A->data_d,
nnl->b->data_d,
nnl->W->rows, nnl->W->cols,
nnl->A->rows, nnl->A->cols);
return nnl->Z;
}
/* Back Propagation */
__global__
void FFNNBP_global(data_t *dA, data_t *W, data_t *dZ, int Wx, int Wy,
int dZx, int dZy) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// indexing in W transposed
int dAx = dZx;
int dAy = Wx;
data_t val = (data_t)0;
int k;
if (row < dAy && col < dAx) {
for (k = 0; k < Wy; k++) {
val += W[k*Wx+row] * dZ[k*dZx+col];
}
dA[row*dAx+col] = val;
}
}
__global__
void FFNNUW_global(data_t *W, data_t *dZ, data_t *A, int dZx, int dZy, int Ax,
int Ay, data_t lr) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// indexing in A transposed
int Wx = Ay;
int Wy = dZy;
data_t val = (data_t)0;
int k;
if (row < Wy && col < Wx) {
for (k = 0; k < dZx; k++) {
val += dZ[row*dZx+k] * A[col*Ax+k];
}
W[row*Wx+col] += lr*(val/Ax);
}
}
__global__
void FFNNUb_global(data_t *b, data_t *dZ, int dZx, int dZy, int bx, data_t lr) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dZx*dZy) {
int zx = i % dZx;
int zy = i / dZx;
// do an atomic add to avoid race conditions
// (because many threads might write to same memory location )
atomicAdd(&b[zy], lr*(dZ[zy*dZx+zx]/dZx));
}
}
/* backward pass call from host */
void nnl_back_propagation_global(nnlayer * nnl, Matrix *dZ, data_t lr) {
// to reduce number of memory references
int Ax = nnl->A->rows, Ay = nnl->A->cols;
int Wx = nnl->W->rows, Wy = nnl->W->cols;
int dZx = dZ->rows, dZy = dZ->cols;
// call forward pass kernel
// Compute back-propagation error using dZ
dim3 block_W(BLOCK_SIZE_W, BLOCK_SIZE_W);
dim3 grid_W((Ax+block_W.x-1)/block_W.x, (Ay+block_W.y-1)/block_W.y);
hipLaunchKernelGGL(( FFNNBP_global), dim3(grid_W), dim3(block_W), 0, 0, nnl->dA->data_d,
nnl->W->data_d,
dZ->data_d,
Wx, Wy,
dZx, dZy);
// update bias
dim3 block_b(BLOCK_SIZE_b);
dim3 num_blocks_b((dZy*dZx+block_b.x-1)/block_b.x);
hipLaunchKernelGGL(( FFNNUb_global), dim3(num_blocks_b), dim3(block_b), 0, 0, nnl->b->data_d,
dZ->data_d,
dZx, dZy,
nnl->b->rows,
lr);
// update Weights
hipLaunchKernelGGL(( FFNNUW_global), dim3(grid_W), dim3(block_W), 0, 0, nnl->W->data_d,
dZ->data_d,
nnl->A->data_d,
dZx, dZy,
Ax, Ay,
lr);
// return nnl->dA;
}
// /* Testing network and layer initializations */
// printf("On host\n");
// printf("Stats on host for layer %d, Activation: %c\n", nn->layer[0]->l, nn->layer[0]->f);
// print_matrix(nn->layer[0]->A);
// printf("\n");
// print_matrix(nn->layer[0]->W);
// printf("\n");
// print_matrix(nn->layer[0]->b);
// printf("\n");
// print_matrix(nn->layer[0]->Z);
// printf("\n");
// print_matrix(nn->layer[0]->dA);
// printf("\n");
// print_matrix(nn->layer[0]->dZ);
// printf("\n\n");
// printf("Stats on host for layer %d, Activation: %c\n", nn->layer[1]->l, nn->layer[1]->f);
// print_matrix(nn->layer[1]->A);
// printf("\n");
// print_matrix(nn->layer[1]->W);
// printf("\n");
// print_matrix(nn->layer[1]->b);
// printf("\n");
// print_matrix(nn->layer[1]->Z);
// printf("\n");
// print_matrix(nn->layer[1]->dA);
// printf("\n");
// print_matrix(nn->layer[1]->dZ);
// printf("\n\n");
// printf("On Device\n");
// printf("Stats on device for layer %d, Activation: %c\n", nn->layer[0]->l, nn->layer[0]->f);
// print_matrix_d(nn->layer[0]->A);
// printf("\n");
// print_matrix_d(nn->layer[0]->W);
// printf("\n");
// print_matrix_d(nn->layer[0]->b);
// printf("\n");
// print_matrix_d(nn->layer[0]->Z);
// printf("\n");
// print_matrix_d(nn->layer[0]->dA);
// printf("\n");
// print_matrix_d(nn->layer[0]->dZ);
// printf("\n\n");
// printf("Stats on device for layer %d, Activation: %c\n", nn->layer[1]->l, nn->layer[1]->f);
// print_matrix_d(nn->layer[1]->A);
// printf("\n");
// print_matrix_d(nn->layer[1]->W);
// printf("\n");
// print_matrix_d(nn->layer[1]->b);
// printf("\n");
// print_matrix_d(nn->layer[1]->Z);
// printf("\n");
// print_matrix_d(nn->layer[1]->dA);
// printf("\n");
// print_matrix_d(nn->layer[1]->dZ);
// printf("\n\n");
|
52423209c1869d000cbe2192669aaa0ceeb0b64d.cu
|
/* Copyright 2019, Aman Gupta, ENG EC 527, Prof. Martin Herbordt */
/******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include "matrix.cuh"
#include "nn_layer.cuh"
#include "cuda_utils.cuh"
#include "../nn_param.cuh"
/******************************************************************************/
/* Neural Network layer initialization and deletion */
/******************************************************************************/
data_t rand_weight() {
return ((data_t)rand())/((data_t)RAND_MAX);
}
void weight_init(Matrix * W) {
int row, col;
for (row = 1; row <= W->rows; row++) {
for (col = 1; col <= W->cols; col++) {
ELEMENT(W, row, col) = rand_weight();
}
}
// copy host W to device
copy_matrix_H2D(W);
}
nnlayer * nnl_init(int l, int Wx, int Wy, char f) {
nnlayer * nnl = (nnlayer*)malloc(sizeof(nnlayer));
if (!nnl) { printf("Unabble to initialize nn layer\n"); return NULL; }
nnl->l = l;
nnl->A = matrix_init(BATCH_SIZE, Wx);
nnl->W = matrix_init(Wx, Wy);
nnl->b = matrix_init(Wy, 1);
nnl->Z = matrix_init(BATCH_SIZE, Wy);
nnl->dA = matrix_init(BATCH_SIZE, Wx);
nnl->dZ = matrix_init(BATCH_SIZE, Wy);
matrix_allocate(nnl->A);
matrix_allocate(nnl->W); weight_init(nnl->W); // initialize random weights
matrix_allocate(nnl->b);
matrix_allocate(nnl->Z);
matrix_allocate(nnl->dA);
matrix_allocate(nnl->dZ);
nnl->f = f;
return nnl;
}
int nnl_free(nnlayer * nnl) {
if (!nnl) { printf("Unabble to initialize nn layer\n"); return -1; }
int freea, freew, freeb, freez, freeda, freedz;
freea = freew = freeb = freez = freeda = freedz = -1;
if (nnl->A) freea = matrix_free(nnl->A);
if (nnl->W) freew = matrix_free(nnl->W);
if (nnl->b) freeb = matrix_free(nnl->b);
if (nnl->Z) freez = matrix_free(nnl->Z);
if (nnl->dA) freeda = matrix_free(nnl->dA);
if (nnl->dZ) freedz = matrix_free(nnl->dZ);
// printf("A: %d, W: %d, b: %d, Z: %d, dA: %d, dZ: %d\n",
// freea, freew, freeb, freez, freeda, freedz);
if (freea || freew || freeb || freez || freeda || freedz) return -1;
free(nnl);
return 0;
}
/******************************************************************************/
/* Linear Layers */
/******************************************************************************/
/* Forward Pass */
__global__
void FFNNFP_global(data_t *Z, data_t *W, data_t *A, data_t *b, int Wx, int Wy,
int Ax, int Ay) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockIdx.x + threadIdx.x;
int Zx = Ax;
int Zy = Wy;
data_t val = (data_t)0;
int k;
if (row < Zy && col < Zx) {
for (k = 0; k < Wx; k++) {
val += W[row*Wx+k] * A[k*Ax+col];
}
Z[row*Zx+col] = val + b[row];
}
}
/* Forward pass call from host */
Matrix * nnl_forward_pass_global(nnlayer * nnl, Matrix *A) {
assert(nnl->W->rows == A->cols); nnl->A = A;
// call forward pass kernel
dim3 block_W(BLOCK_SIZE_W, BLOCK_SIZE_W);
dim3 grid_W((nnl->Z->rows+block_W.x-1)/block_W.x,
(nnl->Z->cols+block_W.y-1)/block_W.y);
FFNNFP_global<<<grid_W, block_W>>>(nnl->Z->data_d,
nnl->W->data_d,
nnl->A->data_d,
nnl->b->data_d,
nnl->W->rows, nnl->W->cols,
nnl->A->rows, nnl->A->cols);
return nnl->Z;
}
/* Back Propagation */
__global__
void FFNNBP_global(data_t *dA, data_t *W, data_t *dZ, int Wx, int Wy,
int dZx, int dZy) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// indexing in W transposed
int dAx = dZx;
int dAy = Wx;
data_t val = (data_t)0;
int k;
if (row < dAy && col < dAx) {
for (k = 0; k < Wy; k++) {
val += W[k*Wx+row] * dZ[k*dZx+col];
}
dA[row*dAx+col] = val;
}
}
__global__
void FFNNUW_global(data_t *W, data_t *dZ, data_t *A, int dZx, int dZy, int Ax,
int Ay, data_t lr) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// indexing in A transposed
int Wx = Ay;
int Wy = dZy;
data_t val = (data_t)0;
int k;
if (row < Wy && col < Wx) {
for (k = 0; k < dZx; k++) {
val += dZ[row*dZx+k] * A[col*Ax+k];
}
W[row*Wx+col] += lr*(val/Ax);
}
}
__global__
void FFNNUb_global(data_t *b, data_t *dZ, int dZx, int dZy, int bx, data_t lr) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dZx*dZy) {
int zx = i % dZx;
int zy = i / dZx;
// do an atomic add to avoid race conditions
// (because many threads might write to same memory location )
atomicAdd(&b[zy], lr*(dZ[zy*dZx+zx]/dZx));
}
}
/* backward pass call from host */
void nnl_back_propagation_global(nnlayer * nnl, Matrix *dZ, data_t lr) {
// to reduce number of memory references
int Ax = nnl->A->rows, Ay = nnl->A->cols;
int Wx = nnl->W->rows, Wy = nnl->W->cols;
int dZx = dZ->rows, dZy = dZ->cols;
// call forward pass kernel
// Compute back-propagation error using dZ
dim3 block_W(BLOCK_SIZE_W, BLOCK_SIZE_W);
dim3 grid_W((Ax+block_W.x-1)/block_W.x, (Ay+block_W.y-1)/block_W.y);
FFNNBP_global<<<grid_W, block_W>>>(nnl->dA->data_d,
nnl->W->data_d,
dZ->data_d,
Wx, Wy,
dZx, dZy);
// update bias
dim3 block_b(BLOCK_SIZE_b);
dim3 num_blocks_b((dZy*dZx+block_b.x-1)/block_b.x);
FFNNUb_global<<<num_blocks_b, block_b>>>(nnl->b->data_d,
dZ->data_d,
dZx, dZy,
nnl->b->rows,
lr);
// update Weights
FFNNUW_global<<<grid_W, block_W>>>(nnl->W->data_d,
dZ->data_d,
nnl->A->data_d,
dZx, dZy,
Ax, Ay,
lr);
// return nnl->dA;
}
// /* Testing network and layer initializations */
// printf("On host\n");
// printf("Stats on host for layer %d, Activation: %c\n", nn->layer[0]->l, nn->layer[0]->f);
// print_matrix(nn->layer[0]->A);
// printf("\n");
// print_matrix(nn->layer[0]->W);
// printf("\n");
// print_matrix(nn->layer[0]->b);
// printf("\n");
// print_matrix(nn->layer[0]->Z);
// printf("\n");
// print_matrix(nn->layer[0]->dA);
// printf("\n");
// print_matrix(nn->layer[0]->dZ);
// printf("\n\n");
// printf("Stats on host for layer %d, Activation: %c\n", nn->layer[1]->l, nn->layer[1]->f);
// print_matrix(nn->layer[1]->A);
// printf("\n");
// print_matrix(nn->layer[1]->W);
// printf("\n");
// print_matrix(nn->layer[1]->b);
// printf("\n");
// print_matrix(nn->layer[1]->Z);
// printf("\n");
// print_matrix(nn->layer[1]->dA);
// printf("\n");
// print_matrix(nn->layer[1]->dZ);
// printf("\n\n");
// printf("On Device\n");
// printf("Stats on device for layer %d, Activation: %c\n", nn->layer[0]->l, nn->layer[0]->f);
// print_matrix_d(nn->layer[0]->A);
// printf("\n");
// print_matrix_d(nn->layer[0]->W);
// printf("\n");
// print_matrix_d(nn->layer[0]->b);
// printf("\n");
// print_matrix_d(nn->layer[0]->Z);
// printf("\n");
// print_matrix_d(nn->layer[0]->dA);
// printf("\n");
// print_matrix_d(nn->layer[0]->dZ);
// printf("\n\n");
// printf("Stats on device for layer %d, Activation: %c\n", nn->layer[1]->l, nn->layer[1]->f);
// print_matrix_d(nn->layer[1]->A);
// printf("\n");
// print_matrix_d(nn->layer[1]->W);
// printf("\n");
// print_matrix_d(nn->layer[1]->b);
// printf("\n");
// print_matrix_d(nn->layer[1]->Z);
// printf("\n");
// print_matrix_d(nn->layer[1]->dA);
// printf("\n");
// print_matrix_d(nn->layer[1]->dZ);
// printf("\n\n");
|
ceffd75f32fc9667244709d49267aa7e1410a9da.hip
|
// !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include "..\Activation\ActivationFunction.cu"
extern "C"
{
//
// Loss function for comparing images.
// See
// https://github.com/skaae/lasagne-draw/blob/master/deepmodels/layers/draw.py
// line 613 - 632
// The sigmoid function is applied to the output (to ensure range 0..1)
// then we take the cross entropy between target and output.
//
__global__ void ImageLossKernel(
float *canvasPtr,
float *targetPtr,
float *deltaPtr,
float *costPtr,
int thisLayerSize,
float imageLossLearningRate
)
{
extern __shared__ float loss[];
unsigned int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int k = tid;
loss[tid] = 0;
while (k < thisLayerSize)
{
if (!isnan(targetPtr[k]))
{
float o = sigmoid(canvasPtr[k]);
float t = targetPtr[k];
// Use cross entropy for the loss
loss[tid] -= t * logf(o) + (1 - t) * logf(1 - o);
deltaPtr[k] += imageLossLearningRate * (o - t);
}
k += blockSize;
}
// reduction of loss to cost
if (blockSize >= 1024) { if (tid < 512) { loss[tid] += loss[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { loss[tid] += loss[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { loss[tid] += loss[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { loss[tid] += loss[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) loss[tid] += loss[tid + 32];
if (blockSize >= 32) loss[tid] += loss[tid + 16];
if (blockSize >= 16) loss[tid] += loss[tid + 8];
if (blockSize >= 8) loss[tid] += loss[tid + 4];
if (blockSize >= 4) loss[tid] += loss[tid + 2];
if (blockSize >= 2) loss[tid] += loss[tid + 1];
}
// Add, not assign, because we may be using this loss measure in combination with another
if (tid == 0)
*costPtr += loss[0];
}
}
|
ceffd75f32fc9667244709d49267aa7e1410a9da.cu
|
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include "..\Activation\ActivationFunction.cu"
extern "C"
{
//
// Loss function for comparing images.
// See
// https://github.com/skaae/lasagne-draw/blob/master/deepmodels/layers/draw.py
// line 613 - 632
// The sigmoid function is applied to the output (to ensure range 0..1)
// then we take the cross entropy between target and output.
//
__global__ void ImageLossKernel(
float *canvasPtr,
float *targetPtr,
float *deltaPtr,
float *costPtr,
int thisLayerSize,
float imageLossLearningRate
)
{
extern __shared__ float loss[];
unsigned int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int k = tid;
loss[tid] = 0;
while (k < thisLayerSize)
{
if (!isnan(targetPtr[k]))
{
float o = sigmoid(canvasPtr[k]);
float t = targetPtr[k];
// Use cross entropy for the loss
loss[tid] -= t * logf(o) + (1 - t) * logf(1 - o);
deltaPtr[k] += imageLossLearningRate * (o - t);
}
k += blockSize;
}
// reduction of loss to cost
if (blockSize >= 1024) { if (tid < 512) { loss[tid] += loss[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { loss[tid] += loss[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { loss[tid] += loss[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { loss[tid] += loss[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) loss[tid] += loss[tid + 32];
if (blockSize >= 32) loss[tid] += loss[tid + 16];
if (blockSize >= 16) loss[tid] += loss[tid + 8];
if (blockSize >= 8) loss[tid] += loss[tid + 4];
if (blockSize >= 4) loss[tid] += loss[tid + 2];
if (blockSize >= 2) loss[tid] += loss[tid + 1];
}
// Add, not assign, because we may be using this loss measure in combination with another
if (tid == 0)
*costPtr += loss[0];
}
}
|
9a10b82fe893661c24e8a4575cd3c8b356cfdd3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers.h"
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 5;
int numElements2d = 5;
int i, j;
//printf("Enter matrix size: ");
//scanf("%d", &numElements);
size_t size = numElements * sizeof(float);
size_t size2d = numElements2d * numElements2d * sizeof(float);
//printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A, *h_B, *h_M;
float *h_A2d, *h_B2d, *h_M2d;
//for (i= 0; i<numElements; i++)
h_A = (float *)malloc(size);
h_B = (float *)malloc(size);
h_M = (float *)malloc(5*sizeof(float));
h_A2d = (float *)malloc(size2d);
h_B2d = (float *)malloc(size2d);
h_M2d = (float *)malloc(9*sizeof(float));
// Verify that allocations succeeded
if (h_A == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_B == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_M == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_A2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_B2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_M2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//Initialize vectors
for (i = 0; i < numElements; ++i)
{
h_A[i] = ((float)rand()/(float)RAND_MAX);
h_B[i] = 0;
printf("%f, ", h_A[i]);
}
printf("\n");
for (i = 0; i < 5; ++i)
{
h_M[i] = ((float)rand()/(float)RAND_MAX);
printf("%f, ", h_M[i]);
}
printf("\n\n");
//Initialize the host input vectors
for (i = 0; i < numElements; i++)
{
for (j = 0; j < numElements; j++)
{
h_A2d[i*numElements + j] = ((float)rand() / RAND_MAX);
h_B2d[i*numElements + j] = 0;
printf("%f ", h_A2d[i*numElements + j]); // Or *(*(arr+i)+j) = ++count
}
printf("\n");
}
printf("\n\n");
//Initialize the host input vectors
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
h_M2d[i*3 + j] = ((float)rand() / RAND_MAX);
printf("%f ", h_M2d[i*3 + j]); // Or *(*(arr+i)+j) = ++count
}
printf("\n");
}
printf("\n\n");
// Allocate the device input vector A
float *d_A = NULL, *d_B = NULL, *d_M = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_M, 5*sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector A
float *d_A2d = NULL, *d_B2d = NULL, *d_M2d = NULL;
err = hipMalloc((void **)&d_A2d, size2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_B2d, size2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_M2d, 9*sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_M, h_M, 5*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A2d, h_A2d, size2d, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_B2d, h_B2d, size2d, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_M2d, h_M2d, 9*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = numElements;
int blocksPerGrid = numElements;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( convolution1d), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_M, numElements);
err = hipGetLastError();
int threadsPerBlock2d = numElements2d;
int blocksPerGrid2d = numElements2d;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid2d, threadsPerBlock2d);
hipLaunchKernelGGL(( convolution2d), dim3(blocksPerGrid2d), dim3(threadsPerBlock2d), 0, 0, d_A2d, d_B2d, d_M2d, numElements2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch swap kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_B2d, d_B2d, size2d, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
// for (int i = 0; i < numElements; ++i)
// {
// if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
// {
// fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
// }
// }
// for(i = 0; i<numElements; i++)
// {
// for (j = 0; j<numElements; j++)
// {
// printf("%f ", h_A[i*numElements + j]);
// }
// printf("\n");
// }
for (i = 0; i < numElements; ++i)
{
printf("%f, ", h_B[i]);
}
printf("\n");
for(i = 0; i<numElements; i++)
{
for (j = 0; j<numElements; j++)
{
printf("%f ", h_B2d[i*numElements + j]);
}
printf("\n");
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_A2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_M);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_M2d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// err = hipFree(d_C);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// Free host memory
free(h_A);
free(h_B);
free(h_A2d);
free(h_B2d);
free(h_M2d);
free(h_M2d);
// free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
9a10b82fe893661c24e8a4575cd3c8b356cfdd3e.cu
|
#include "headers.h"
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 5;
int numElements2d = 5;
int i, j;
//printf("Enter matrix size: ");
//scanf("%d", &numElements);
size_t size = numElements * sizeof(float);
size_t size2d = numElements2d * numElements2d * sizeof(float);
//printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A, *h_B, *h_M;
float *h_A2d, *h_B2d, *h_M2d;
//for (i= 0; i<numElements; i++)
h_A = (float *)malloc(size);
h_B = (float *)malloc(size);
h_M = (float *)malloc(5*sizeof(float));
h_A2d = (float *)malloc(size2d);
h_B2d = (float *)malloc(size2d);
h_M2d = (float *)malloc(9*sizeof(float));
// Verify that allocations succeeded
if (h_A == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_B == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_M == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_A2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_B2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
if (h_M2d == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//Initialize vectors
for (i = 0; i < numElements; ++i)
{
h_A[i] = ((float)rand()/(float)RAND_MAX);
h_B[i] = 0;
printf("%f, ", h_A[i]);
}
printf("\n");
for (i = 0; i < 5; ++i)
{
h_M[i] = ((float)rand()/(float)RAND_MAX);
printf("%f, ", h_M[i]);
}
printf("\n\n");
//Initialize the host input vectors
for (i = 0; i < numElements; i++)
{
for (j = 0; j < numElements; j++)
{
h_A2d[i*numElements + j] = ((float)rand() / RAND_MAX);
h_B2d[i*numElements + j] = 0;
printf("%f ", h_A2d[i*numElements + j]); // Or *(*(arr+i)+j) = ++count
}
printf("\n");
}
printf("\n\n");
//Initialize the host input vectors
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
h_M2d[i*3 + j] = ((float)rand() / RAND_MAX);
printf("%f ", h_M2d[i*3 + j]); // Or *(*(arr+i)+j) = ++count
}
printf("\n");
}
printf("\n\n");
// Allocate the device input vector A
float *d_A = NULL, *d_B = NULL, *d_M = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_M, 5*sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector A
float *d_A2d = NULL, *d_B2d = NULL, *d_M2d = NULL;
err = cudaMalloc((void **)&d_A2d, size2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_B2d, size2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_M2d, 9*sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_M, h_M, 5*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A2d, h_A2d, size2d, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_B2d, h_B2d, size2d, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_M2d, h_M2d, 9*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = numElements;
int blocksPerGrid = numElements;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
convolution1d<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_M, numElements);
err = cudaGetLastError();
int threadsPerBlock2d = numElements2d;
int blocksPerGrid2d = numElements2d;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid2d, threadsPerBlock2d);
convolution2d<<<blocksPerGrid2d, threadsPerBlock2d>>>(d_A2d, d_B2d, d_M2d, numElements2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch swap kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_B2d, d_B2d, size2d, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
// for (int i = 0; i < numElements; ++i)
// {
// if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
// {
// fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
// }
// }
// for(i = 0; i<numElements; i++)
// {
// for (j = 0; j<numElements; j++)
// {
// printf("%f ", h_A[i*numElements + j]);
// }
// printf("\n");
// }
for (i = 0; i < numElements; ++i)
{
printf("%f, ", h_B[i]);
}
printf("\n");
for(i = 0; i<numElements; i++)
{
for (j = 0; j<numElements; j++)
{
printf("%f ", h_B2d[i*numElements + j]);
}
printf("\n");
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_A2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_M);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_M2d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// err = cudaFree(d_C);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// Free host memory
free(h_A);
free(h_B);
free(h_A2d);
free(h_B2d);
free(h_M2d);
free(h_M2d);
// free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
af258fcaba3e6aa98b3fbd02ece9303102af353c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
#include <conio.h>
//#define _USE_MATH_DEFINES
#include "math.h"
#define M_PI 3.141592653589793f
#define IT 1
//#define N 1024
//#define N 2048
//#define N 4096
//#define N 8192
//#define N 16384
//#define N 32768
//#define N 65536
//#define N 131072
//#define N 262144
//#define N 524288
//#define N 1048576
//#define N 2097152
//#define N 4194304
//#define N 8388608
//#define N 16777216
#define N 33554432
//#define N 67108864
float data_real[N];
float data_imag[N];
__global__ void stfft(float* data_real_d_in,float* data_imag_d_in,float* data_real_d_out,float* data_imag_d_out,int p)
{
int subarray1,subarray2,m,thread_position,subarray_start,subarray2_start,tmp2,tmp3;
float tw_real;
float tw_imag;
int power;
float tmp;
float real,real2,imag,imag2;
int index=threadIdx.x+blockIdx.x*blockDim.x;
//power=__powf(2,p);
power = 1<<p;
subarray1=index>>p;
m=N>>(p+1);
subarray2=subarray1+m;//7
//thread_position=index%power;
thread_position=(index)&(power-1);
subarray_start=subarray1<<p;
subarray2_start=subarray2<<p;
tmp3=subarray_start+thread_position;
tmp2=subarray2_start+thread_position;
//issue request for real parts
real=data_real_d_in[tmp3];
real2=data_real_d_in[tmp2];//15
//compute twiddle factor
tmp=(index)&(m-1);//17
tmp=(2*M_PI*subarray1*power)/N;
//tw_real=cosf(tmp);
//tw_imag=-1*sinf(tmp);
sincosf(tmp,&tw_imag,&tw_real);
tw_imag=tw_imag*-1;
//issue request for imaginary parts
imag=data_imag_d_in[tmp3];
imag2=data_imag_d_in[tmp2];//19
//butterfly real parts
tmp=real+real2;
real2=real-real2;
real=tmp;
//write back real results of butterfly,only this part is written because we still need to twiddle the other
tmp2=subarray_start*2+thread_position;
data_real_d_out[tmp2]=real;//22
//butterfly imag part
tmp=imag+imag2;
imag2=imag-imag2;
imag=tmp;
//multiply by twiddle
tmp=real2;
real2=real2*tw_real-imag2*tw_imag;
data_real_d_out[tmp2+power]=real2;
imag2=tmp*tw_imag+imag2*tw_real;//10
//write back imag result of butterfly
data_imag_d_out[tmp2]=imag;
data_imag_d_out[tmp2+power]=imag2;//27
}
int main( int argc, char** argv)
{
for(int i=0;i<N;i++)
{
if(i<N/2)
{data_real[i]=1;
data_imag[i]=0;}
else{
data_real[i]=0;
data_imag[i]=0;
}
}
int passes=log((float)N)/log((float)2);
int* cycles=(int*)malloc(N/2*sizeof(int));
int* cycles_d;
float* data_real_d;
float* data_imag_d;
float* data_real_d_out;
float* data_imag_d_out;
float* tmp;float* tmp2;
float* fft_time=(float*)calloc(IT,sizeof(float));
hipEvent_t start, stop; float time;
hipMalloc((void**)&cycles_d,N/2*sizeof(int));
hipMalloc((void**)&data_real_d,N*sizeof(float));
hipMalloc((void**)&data_imag_d,N*sizeof(float));
hipMalloc((void**)&data_real_d_out,N*sizeof(float));
hipMalloc((void**)&data_imag_d_out,N*sizeof(float));
dim3 dimBlock(512,1,1);
dim3 dimGrid(N/1024,1,1);
long int before = GetTickCount();
//hipFuncSetCacheConfig(stfft,hipFuncCachePreferShared);
//-----------------------
for(int j=0;j<IT;j++)
{
hipMemcpy(data_real_d,data_real,sizeof(float)*N,hipMemcpyHostToDevice);
hipMemcpy(data_imag_d,data_imag,sizeof(float)*N,hipMemcpyHostToDevice);
hipEventCreate(&stop);
hipEventCreate(&start);
hipEventRecord( start, 0 );
for(int i=0;i<passes;i++)
{
hipLaunchKernelGGL(( stfft), dim3(dimGrid),dim3(dimBlock), 0, 0, data_real_d,data_imag_d,data_real_d_out,data_imag_d_out,i);
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
}
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("fft time=%f\n",time);
fft_time[j]=time;
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
}
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
long int after = GetTickCount();
const char* err=hipGetErrorString(hipGetLastError());
for(int i=0;i<40;i++)
{printf("%c",err[i]);}
printf("\n");
printf("%d ms\n",after-before);
hipMemcpy(data_real,data_real_d,sizeof(float)*N,hipMemcpyDeviceToHost);
hipMemcpy(data_imag,data_imag_d,sizeof(float)*N,hipMemcpyDeviceToHost);
hipMemcpy(cycles,cycles_d,sizeof(int)*N/2,hipMemcpyDeviceToHost);
hipFree(data_real_d);
hipFree(data_imag_d);
for(int i=N-16;i<N;i++)
{
printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
}
float average=0;
for(int i=0;i<IT;i++)
{
average+=fft_time[i];
}
average=average/IT;
float flops=(41*(N/2)*log2f(N))/(average*0.001);
printf("FLOPS=%f GFLOPS, AV Time=%f\n",flops*0.000000001,average);
/*
for(int i=0;i<128;i++)
{
printf("cycles[%d]=%d\n",i,cycles[i]);
}*/
//_getch();
}
|
af258fcaba3e6aa98b3fbd02ece9303102af353c.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
#include <conio.h>
//#define _USE_MATH_DEFINES
#include "math.h"
#define M_PI 3.141592653589793f
#define IT 1
//#define N 1024
//#define N 2048
//#define N 4096
//#define N 8192
//#define N 16384
//#define N 32768
//#define N 65536
//#define N 131072
//#define N 262144
//#define N 524288
//#define N 1048576
//#define N 2097152
//#define N 4194304
//#define N 8388608
//#define N 16777216
#define N 33554432
//#define N 67108864
float data_real[N];
float data_imag[N];
__global__ void stfft(float* data_real_d_in,float* data_imag_d_in,float* data_real_d_out,float* data_imag_d_out,int p)
{
int subarray1,subarray2,m,thread_position,subarray_start,subarray2_start,tmp2,tmp3;
float tw_real;
float tw_imag;
int power;
float tmp;
float real,real2,imag,imag2;
int index=threadIdx.x+blockIdx.x*blockDim.x;
//power=__powf(2,p);
power = 1<<p;
subarray1=index>>p;
m=N>>(p+1);
subarray2=subarray1+m;//7
//thread_position=index%power;
thread_position=(index)&(power-1);
subarray_start=subarray1<<p;
subarray2_start=subarray2<<p;
tmp3=subarray_start+thread_position;
tmp2=subarray2_start+thread_position;
//issue request for real parts
real=data_real_d_in[tmp3];
real2=data_real_d_in[tmp2];//15
//compute twiddle factor
tmp=(index)&(m-1);//17
tmp=(2*M_PI*subarray1*power)/N;
//tw_real=cosf(tmp);
//tw_imag=-1*sinf(tmp);
sincosf(tmp,&tw_imag,&tw_real);
tw_imag=tw_imag*-1;
//issue request for imaginary parts
imag=data_imag_d_in[tmp3];
imag2=data_imag_d_in[tmp2];//19
//butterfly real parts
tmp=real+real2;
real2=real-real2;
real=tmp;
//write back real results of butterfly,only this part is written because we still need to twiddle the other
tmp2=subarray_start*2+thread_position;
data_real_d_out[tmp2]=real;//22
//butterfly imag part
tmp=imag+imag2;
imag2=imag-imag2;
imag=tmp;
//multiply by twiddle
tmp=real2;
real2=real2*tw_real-imag2*tw_imag;
data_real_d_out[tmp2+power]=real2;
imag2=tmp*tw_imag+imag2*tw_real;//10
//write back imag result of butterfly
data_imag_d_out[tmp2]=imag;
data_imag_d_out[tmp2+power]=imag2;//27
}
int main( int argc, char** argv)
{
for(int i=0;i<N;i++)
{
if(i<N/2)
{data_real[i]=1;
data_imag[i]=0;}
else{
data_real[i]=0;
data_imag[i]=0;
}
}
int passes=log((float)N)/log((float)2);
int* cycles=(int*)malloc(N/2*sizeof(int));
int* cycles_d;
float* data_real_d;
float* data_imag_d;
float* data_real_d_out;
float* data_imag_d_out;
float* tmp;float* tmp2;
float* fft_time=(float*)calloc(IT,sizeof(float));
cudaEvent_t start, stop; float time;
cudaMalloc((void**)&cycles_d,N/2*sizeof(int));
cudaMalloc((void**)&data_real_d,N*sizeof(float));
cudaMalloc((void**)&data_imag_d,N*sizeof(float));
cudaMalloc((void**)&data_real_d_out,N*sizeof(float));
cudaMalloc((void**)&data_imag_d_out,N*sizeof(float));
dim3 dimBlock(512,1,1);
dim3 dimGrid(N/1024,1,1);
long int before = GetTickCount();
//cudaFuncSetCacheConfig(stfft,cudaFuncCachePreferShared);
//-----------------------
for(int j=0;j<IT;j++)
{
cudaMemcpy(data_real_d,data_real,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(data_imag_d,data_imag,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaEventCreate(&stop);
cudaEventCreate(&start);
cudaEventRecord( start, 0 );
for(int i=0;i<passes;i++)
{
stfft<<<dimGrid,dimBlock>>>(data_real_d,data_imag_d,data_real_d_out,data_imag_d_out,i);
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
}
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("fft time=%f\n",time);
fft_time[j]=time;
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
}
tmp=data_real_d;
tmp2=data_imag_d;
data_real_d=data_real_d_out;
data_real_d_out=tmp;
data_imag_d=data_imag_d_out;
data_imag_d_out=tmp2;
long int after = GetTickCount();
const char* err=cudaGetErrorString(cudaGetLastError());
for(int i=0;i<40;i++)
{printf("%c",err[i]);}
printf("\n");
printf("%d ms\n",after-before);
cudaMemcpy(data_real,data_real_d,sizeof(float)*N,cudaMemcpyDeviceToHost);
cudaMemcpy(data_imag,data_imag_d,sizeof(float)*N,cudaMemcpyDeviceToHost);
cudaMemcpy(cycles,cycles_d,sizeof(int)*N/2,cudaMemcpyDeviceToHost);
cudaFree(data_real_d);
cudaFree(data_imag_d);
for(int i=N-16;i<N;i++)
{
printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
}
float average=0;
for(int i=0;i<IT;i++)
{
average+=fft_time[i];
}
average=average/IT;
float flops=(41*(N/2)*log2f(N))/(average*0.001);
printf("FLOPS=%f GFLOPS, AV Time=%f\n",flops*0.000000001,average);
/*
for(int i=0;i<128;i++)
{
printf("cycles[%d]=%d\n",i,cycles[i]);
}*/
//_getch();
}
|
149441b703e217d986befec7f7392c24cdc55f75.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define FILTER_SIZE 5
#define TILE_SIZE 12
#define BLOCK_SIZE (TILE_SIZE + FILTER_SIZE - 1)
#define RADIUS (FILTER_SIZE / 2)
__constant__ float M_c[FILTER_SIZE][FILTER_SIZE];
__global__ void convolution(Matrix N, Matrix P)
{
/********************************************************************
Determine input and output indexes of each thread
Load a tile of the input image to shared memory
Apply the filter on the input image tile
Write the compute values to the output image at the correct indexes
********************************************************************/
//INSERT KERNEL CODE HERE
__shared__ float N_s[16][16];
// determine row and column for each thread
int global_row;
int global_column;
int shared_row;
int shared_column;
global_row = blockIdx.y * TILE_SIZE + threadIdx.y;
global_column = blockIdx.x * TILE_SIZE + threadIdx.x;
shared_row = threadIdx.y + RADIUS;
shared_column = threadIdx.x + RADIUS;
// initial boundary checking
if (global_row < N.height && global_column < N.width) {
// each thread needs to copy its N element into the shared memory
N_s[shared_row][shared_column] = N.elements[global_row * N.width + global_column];
// the halo elements need to be copied into the shared memory using the edge threads
// if the halo element is outside the original N matrix, input 0.0
// if top rows in block
if (threadIdx.y < RADIUS) {
// if top rows in N
if (global_row < RADIUS) {
// zero out directly above by shifting row but not column
N_s[shared_row - RADIUS][shared_column] = 0.0;
// first two zero out the diagonal to the up and left because this is the top of N
if (shared_column - RADIUS == 0 || shared_column - RADIUS == 1) {
// shift up and left
N_s[shared_row - RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
} // end global radius check before else
// not top row in N
else {
// copy directly above by shifting row but not column
N_s[shared_row - RADIUS][shared_column] = N.elements[global_row - RADIUS * N.width + global_column];
// zero out for diagonal because this is the left most blocks of N
if (global_column - RADIUS < 0)
// shift up and left
N_s[shared_row - RADIUS][shared_column - RADIUS] = 0.0;
// otherwise, just copy in what is there in the diagonal of N by using the left two threads
else if (shared_column == RADIUS || shared_column == (RADIUS + 1)) {
N_s[shared_row - RADIUS][shared_column - RADIUS] = N.elements[(global_row - RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
} // final end of global radius check
} // end block top row check
// if the two leftmost columns in block
if (threadIdx.x < RADIUS) {
// if leftmost rows in N
if (global_column < RADIUS) {
// zero out directly left by shifting column but not row
N_s[shared_row][shared_column - RADIUS] = 0.0;
// last two zero out the diagonal by using the bottom threads
if (shared_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
if (shared_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
} // end leftmost global row check before else
else {
// copy directly to the left by shifting the column but not row
N_s[shared_row][shared_column - RADIUS] = N.elements[global_row * N.width + global_column - RADIUS];
// zero out for the diagonal because these are the bottom blocks of N
if (global_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = 0.0;
}
if (global_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = 0.0;
}
// otherwise copy what is there
else if (shared_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
else if (shared_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
} // final end of global leftmost check
} // end block left column check
// if the two rightmost columns in block
if (threadIdx.x == (TILE_SIZE - 1) || threadIdx.x == (TILE_SIZE - 2)) {
// if the two rightmost global columns
if (global_column == (N.width - 1) || global_column == (N.width - 2)) {
// zero out directly right by shifting column but not row
N_s[shared_row][shared_column + RADIUS] = 0.0;
// the last two zero out the diagonal by using the rightmost threads
if (shared_column == (TILE_SIZE - 1) || shared_column == (TILE_SIZE - 2)) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal if
} // end global rightmost check
else {
// copy directly to the right by shifting the column but not row
N_s[shared_row][(TILE_SIZE - 1) + RADIUS] = N.elements[global_row * N.width + global_column + RADIUS];
// zero out for diagonal because these are the top most block of N
if (global_row - RADIUS < 0) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal zeroing
//otherwise copy what is there
else if (shared_row < RADIUS) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = N.elements[(global_row - RADIUS) * N.width + global_column + RADIUS];
}
} // final end global rightmost check
} // end block right column check
// if the two bottom rows in block
if (threadIdx.y == (TILE_SIZE - 1) || threadIdx.y == (TILE_SIZE - 2)) {
// if the two bottom global rows
if (global_column == (N.height - 1) || global_column == (N.height - 2)) {
// zero out directly below by shifting row but not column
N_s[shared_row + RADIUS][shared_column] = 0.0;
// the last two zero out the diagonal by using the bottommost threads
if (shared_row == (TILE_SIZE -1) || shared_row == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal if
} // end global bottom check
else {
// copy directly underneath by shifting row but not column
N_s[shared_row + RADIUS][shared_column] = N.elements[(global_row + RADIUS) * N.width + global_column];
// zero out for diagonal because this is the rightmost block of N
if (global_column == (TILE_SIZE - 1) || global_column == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal zeroing
// otherwise copy what is there
else if (shared_column == (TILE_SIZE - 1) || shared_column == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column + RADIUS];
}
} // final end global bottom check
} // end block bottom row check
__syncthreads();
// the filter needs to be applied using a for loop from -2 to 2 and applied to row (i) and column (j) from N_s
// accumulating as you go
float Pvalue = 0.0;
int mi = 0;
int mj = 0;
// calculate the stuff
if (global_row < N.height && global_column < N.width) {
for (int i = -2; i < 3; i++) {
for (int j = -2; j < 3; j++) {
// can't use negative indices
mi = i + 2;
mj = j + 2;
Pvalue += N_s[shared_row + i][shared_column + j] * M_c[mi][mj];
}
}
P.elements[global_row * P.width + global_column] = Pvalue;
}
} // end global height and width if
} // end kernel
|
149441b703e217d986befec7f7392c24cdc55f75.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define FILTER_SIZE 5
#define TILE_SIZE 12
#define BLOCK_SIZE (TILE_SIZE + FILTER_SIZE - 1)
#define RADIUS (FILTER_SIZE / 2)
__constant__ float M_c[FILTER_SIZE][FILTER_SIZE];
__global__ void convolution(Matrix N, Matrix P)
{
/********************************************************************
Determine input and output indexes of each thread
Load a tile of the input image to shared memory
Apply the filter on the input image tile
Write the compute values to the output image at the correct indexes
********************************************************************/
//INSERT KERNEL CODE HERE
__shared__ float N_s[16][16];
// determine row and column for each thread
int global_row;
int global_column;
int shared_row;
int shared_column;
global_row = blockIdx.y * TILE_SIZE + threadIdx.y;
global_column = blockIdx.x * TILE_SIZE + threadIdx.x;
shared_row = threadIdx.y + RADIUS;
shared_column = threadIdx.x + RADIUS;
// initial boundary checking
if (global_row < N.height && global_column < N.width) {
// each thread needs to copy its N element into the shared memory
N_s[shared_row][shared_column] = N.elements[global_row * N.width + global_column];
// the halo elements need to be copied into the shared memory using the edge threads
// if the halo element is outside the original N matrix, input 0.0
// if top rows in block
if (threadIdx.y < RADIUS) {
// if top rows in N
if (global_row < RADIUS) {
// zero out directly above by shifting row but not column
N_s[shared_row - RADIUS][shared_column] = 0.0;
// first two zero out the diagonal to the up and left because this is the top of N
if (shared_column - RADIUS == 0 || shared_column - RADIUS == 1) {
// shift up and left
N_s[shared_row - RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
} // end global radius check before else
// not top row in N
else {
// copy directly above by shifting row but not column
N_s[shared_row - RADIUS][shared_column] = N.elements[global_row - RADIUS * N.width + global_column];
// zero out for diagonal because this is the left most blocks of N
if (global_column - RADIUS < 0)
// shift up and left
N_s[shared_row - RADIUS][shared_column - RADIUS] = 0.0;
// otherwise, just copy in what is there in the diagonal of N by using the left two threads
else if (shared_column == RADIUS || shared_column == (RADIUS + 1)) {
N_s[shared_row - RADIUS][shared_column - RADIUS] = N.elements[(global_row - RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
} // final end of global radius check
} // end block top row check
// if the two leftmost columns in block
if (threadIdx.x < RADIUS) {
// if leftmost rows in N
if (global_column < RADIUS) {
// zero out directly left by shifting column but not row
N_s[shared_row][shared_column - RADIUS] = 0.0;
// last two zero out the diagonal by using the bottom threads
if (shared_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
if (shared_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = 0.0;
} // end diagonal if
} // end leftmost global row check before else
else {
// copy directly to the left by shifting the column but not row
N_s[shared_row][shared_column - RADIUS] = N.elements[global_row * N.width + global_column - RADIUS];
// zero out for the diagonal because these are the bottom blocks of N
if (global_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = 0.0;
}
if (global_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = 0.0;
}
// otherwise copy what is there
else if (shared_row == (TILE_SIZE - 1)) {
// shift down and left
N_s[(TILE_SIZE - 1) + RADIUS][shared_column - RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
else if (shared_row == (TILE_SIZE - 2)) {
// shift down and left
N_s[(TILE_SIZE - 2) + RADIUS][shared_column - RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column - RADIUS];
} // end diagonal if
} // final end of global leftmost check
} // end block left column check
// if the two rightmost columns in block
if (threadIdx.x == (TILE_SIZE - 1) || threadIdx.x == (TILE_SIZE - 2)) {
// if the two rightmost global columns
if (global_column == (N.width - 1) || global_column == (N.width - 2)) {
// zero out directly right by shifting column but not row
N_s[shared_row][shared_column + RADIUS] = 0.0;
// the last two zero out the diagonal by using the rightmost threads
if (shared_column == (TILE_SIZE - 1) || shared_column == (TILE_SIZE - 2)) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal if
} // end global rightmost check
else {
// copy directly to the right by shifting the column but not row
N_s[shared_row][(TILE_SIZE - 1) + RADIUS] = N.elements[global_row * N.width + global_column + RADIUS];
// zero out for diagonal because these are the top most block of N
if (global_row - RADIUS < 0) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal zeroing
//otherwise copy what is there
else if (shared_row < RADIUS) {
// shift up and right
N_s[shared_row - RADIUS][shared_column + RADIUS] = N.elements[(global_row - RADIUS) * N.width + global_column + RADIUS];
}
} // final end global rightmost check
} // end block right column check
// if the two bottom rows in block
if (threadIdx.y == (TILE_SIZE - 1) || threadIdx.y == (TILE_SIZE - 2)) {
// if the two bottom global rows
if (global_column == (N.height - 1) || global_column == (N.height - 2)) {
// zero out directly below by shifting row but not column
N_s[shared_row + RADIUS][shared_column] = 0.0;
// the last two zero out the diagonal by using the bottommost threads
if (shared_row == (TILE_SIZE -1) || shared_row == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal if
} // end global bottom check
else {
// copy directly underneath by shifting row but not column
N_s[shared_row + RADIUS][shared_column] = N.elements[(global_row + RADIUS) * N.width + global_column];
// zero out for diagonal because this is the rightmost block of N
if (global_column == (TILE_SIZE - 1) || global_column == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = 0.0;
} // end diagonal zeroing
// otherwise copy what is there
else if (shared_column == (TILE_SIZE - 1) || shared_column == (TILE_SIZE - 2)) {
// shift down and right
N_s[shared_row + RADIUS][shared_column + RADIUS] = N.elements[(global_row + RADIUS) * N.width + global_column + RADIUS];
}
} // final end global bottom check
} // end block bottom row check
__syncthreads();
// the filter needs to be applied using a for loop from -2 to 2 and applied to row (i) and column (j) from N_s
// accumulating as you go
float Pvalue = 0.0;
int mi = 0;
int mj = 0;
// calculate the stuff
if (global_row < N.height && global_column < N.width) {
for (int i = -2; i < 3; i++) {
for (int j = -2; j < 3; j++) {
// can't use negative indices
mi = i + 2;
mj = j + 2;
Pvalue += N_s[shared_row + i][shared_column + j] * M_c[mi][mj];
}
}
P.elements[global_row * P.width + global_column] = Pvalue;
}
} // end global height and width if
} // end kernel
|
add_torque.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "add_torque.h"
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
namespace mirheo
{
namespace AddTorqueKernels
{
__global__ void addTorque(ROVview view, real3 torque)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.nObjects) return;
view.motions[gid].torque += torque;
}
} // namespace AddTorqueKernels
AddTorquePlugin::AddTorquePlugin(const MirState *state, const std::string& name, const std::string& rovName, real3 torque) :
SimulationPlugin(state, name),
rovName_(rovName),
torque_(torque)
{}
void AddTorquePlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) );
if (rov_ == nullptr)
die("Need rigid object vector to add torque, plugin '%s', OV name '%s'",
getCName(), rovName_.c_str());
info("Objects '%s' will experience external torque [%f %f %f]",
rovName_.c_str(), torque_.x, torque_.y, torque_.z);
}
void AddTorquePlugin::beforeForces(hipStream_t stream)
{
ROVview view(rov_, rov_->local());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
AddTorqueKernels::addTorque,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, torque_ );
}
} // namespace mirheo
|
add_torque.cu
|
#include "add_torque.h"
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
namespace mirheo
{
namespace AddTorqueKernels
{
__global__ void addTorque(ROVview view, real3 torque)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.nObjects) return;
view.motions[gid].torque += torque;
}
} // namespace AddTorqueKernels
AddTorquePlugin::AddTorquePlugin(const MirState *state, const std::string& name, const std::string& rovName, real3 torque) :
SimulationPlugin(state, name),
rovName_(rovName),
torque_(torque)
{}
void AddTorquePlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) );
if (rov_ == nullptr)
die("Need rigid object vector to add torque, plugin '%s', OV name '%s'",
getCName(), rovName_.c_str());
info("Objects '%s' will experience external torque [%f %f %f]",
rovName_.c_str(), torque_.x, torque_.y, torque_.z);
}
void AddTorquePlugin::beforeForces(cudaStream_t stream)
{
ROVview view(rov_, rov_->local());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
AddTorqueKernels::addTorque,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, torque_ );
}
} // namespace mirheo
|
3056f445da8ce7d29cb7df0850805aa10432e04e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_layer_ctpn.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Ftype, typename Btype>
void LstmLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
CHECK_EQ(top[0]->gpu_data<Ftype>(), top_.gpu_data());
Ftype* top_data = top_.mutable_gpu_data();
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
const Ftype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data<Ftype>();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Ftype* weight_i = this->blobs_[0]->template gpu_data<Ftype>();
const Ftype* weight_h = this->blobs_[1]->template gpu_data<Ftype>();
const Ftype* bias = this->blobs_[2]->template gpu_data<Ftype>();
Ftype* pre_gate_data = pre_gate_.mutable_gpu_data();
Ftype* gate_data = gate_.mutable_gpu_data();
Ftype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Ftype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Ftype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Ftype(1.),
bottom_data, weight_i, Ftype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Ftype(1.),
bias_multiplier_->gpu_data<Ftype>(), bias, Ftype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Ftype* h_t = top_data + top_.offset(t);
Ftype* c_t = cell_data + cell_.offset(t);
Ftype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Ftype* gate_t = gate_data + gate_.offset(t);
const Ftype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Ftype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Ftype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Ftype(1.),
h_t_1, weight_h, Ftype(0.), h_to_gate_.mutable_gpu_data());
hipLaunchKernelGGL(( ClipAdd<Ftype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ActivationForward<Ftype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( LSTMForward<Ftype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FORWARD_ONLY_FB(LstmLayer);
} // namespace caffe
|
3056f445da8ce7d29cb7df0850805aa10432e04e.cu
|
#include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_layer_ctpn.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Ftype, typename Btype>
void LstmLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
CHECK_EQ(top[0]->gpu_data<Ftype>(), top_.gpu_data());
Ftype* top_data = top_.mutable_gpu_data();
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
const Ftype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data<Ftype>();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Ftype* weight_i = this->blobs_[0]->template gpu_data<Ftype>();
const Ftype* weight_h = this->blobs_[1]->template gpu_data<Ftype>();
const Ftype* bias = this->blobs_[2]->template gpu_data<Ftype>();
Ftype* pre_gate_data = pre_gate_.mutable_gpu_data();
Ftype* gate_data = gate_.mutable_gpu_data();
Ftype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Ftype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Ftype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Ftype(1.),
bottom_data, weight_i, Ftype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Ftype(1.),
bias_multiplier_->gpu_data<Ftype>(), bias, Ftype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Ftype* h_t = top_data + top_.offset(t);
Ftype* c_t = cell_data + cell_.offset(t);
Ftype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Ftype* gate_t = gate_data + gate_.offset(t);
const Ftype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Ftype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Ftype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Ftype(1.),
h_t_1, weight_h, Ftype(0.), h_to_gate_.mutable_gpu_data());
ClipAdd<Ftype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
ActivationForward<Ftype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
LSTMForward<Ftype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FORWARD_ONLY_FB(LstmLayer);
} // namespace caffe
|
b7970f3c876095218cce2a7654effa237507d92d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <ctime>
#include <iostream>
#include <fstream>
#include <math.h>
#include <sstream>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <utility>
#include <time.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
// SET NUMBER OF TESTS
#define NUM_TESTS 20
// CUDA CONFIG
//--------------------
#define NUM_THREADS 256 // this is also the number of tours (ie, population size)
#define BLOCKS 1024 // Number of sub populations
//--------------------
// POPULATION CONTROL
//-------------------
#define NUM_CITIES 52 // must be number of cities being read in file
#define MAX_COORD 250
#define POPULATION_SIZE NUM_THREADS // this should match #threads, at least in the beginning
#define NUM_POPULATIONS BLOCKS
#define NUM_EVOLUTIONS 50
#define MUTATION_RATE 0.05 // used to be 0.0015
#define ELITISM true
#define TOURNAMENT_SIZE 16
//--------------------
#include "headers/city.h"
#include "headers/tour.h"
#include "headers/population.h"
#include "headers/hostUtils.h"
#include "headers/gaUtils.h"
using namespace std;
__global__ void initCuRand(hiprandState_t *randState)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
hiprand_init(1337, tid, 0, &randState[tid]);
}
__global__ void evaluatePopulations(population_t *populations, const float *costTable)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
evalTour(populations[blockIdx.x].tours[threadIdx.x], costTable);
}
// Maybe add cost table to reference during "tournamentSelection()"
__global__ void selection(population_t *populations, hiprandState_t *randState, tour_t *parents)
{
// Get thread (particle) ID
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
if (ELITISM && blockIdx.x == 0)
parents[tid*2] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
else
parents[tid*2] = tournamentSelection(populations[blockIdx.x], randState, tid);
parents[tid*2+1] = tournamentSelection(populations[blockIdx.x], randState, tid);
}
__global__ void crossover(population_t *populations, tour_t *parents, hiprandState_t *randState, float *costTable, int index)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
populations[blockIdx.x].tours[threadIdx.x].cities[0] = parents[2*tid].cities[0];
city_t c1 = getValidNextCity(parents[tid*2], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index);
city_t c2 = getValidNextCity(parents[tid*2+1], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index);
// compare the two cities from parents to the last city that was chosen in the child
city_t currentCity = populations[blockIdx.x].tours[threadIdx.x].cities[index-1];
if (costTable[c1.n*NUM_CITIES + currentCity.n] <= costTable[c2.n*NUM_CITIES + currentCity.n])
populations[blockIdx.x].tours[threadIdx.x].cities[index] = c1;
else
populations[blockIdx.x].tours[threadIdx.x].cities[index] = c2;
}
__global__ void mutate(population_t *populations, hiprandState_t *d_state)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
// pick random number between 0 and 1
hiprandState_t localState = d_state[tid];
// if random num is less than mutation_rate, perform mutation (swap two cities in tour)
if (hiprand_uniform(&localState) < MUTATION_RATE)
{
int randNum1 = 1 + hiprand_uniform(&localState) * (NUM_CITIES - 1.0000001);
int randNum2 = 1 + hiprand_uniform(&localState) * (NUM_CITIES - 1.0000001);
city_t temp = populations[blockIdx.x].tours[threadIdx.x].cities[randNum1];
populations[blockIdx.x].tours[threadIdx.x].cities[randNum1] = populations[blockIdx.x].tours[threadIdx.x].cities[randNum2];
populations[blockIdx.x].tours[threadIdx.x].cities[randNum2] = temp;
d_state[tid] = localState;
}
}
__global__ void migrate(population_t *populations)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS || threadIdx.x != 0) return;
int indexOfLeastFitInNeighbor;
if (blockIdx.x == BLOCKS-1)
{
indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[0]);
populations[0].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
}
else
{
indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[blockIdx.x+1]);
populations[blockIdx.x+1].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
}
}
int main(int argc, char **argv)
{
printf("THREADS: %d\n", NUM_THREADS);
printf("BLOCKS: %d\n", BLOCKS);
printf("TOURNAMENT_SIZE: %d\n", TOURNAMENT_SIZE);
printf("NUM_EVOLUTIONS: %d\n", NUM_EVOLUTIONS);
// Build city distances table
tour_t initialTour;
float costTable[NUM_CITIES*NUM_CITIES];
population_t populations[BLOCKS];
tour_t parents[NUM_POPULATIONS*POPULATION_SIZE*2];
// READS INITIAL TOUR FROM FILE
ifstream file("berlin52.txt");
readTourFromFile(initialTour, file);
// Build cost table to save time computing distance between cities
// - array lookups are cheaper than squaring, adding, and sqrting
buildCostTable(initialTour, costTable);
// ---------------------------
// GPU Mem allocation
// ---------------------------
population_t *d_populations;
hipMalloc((void **) &d_populations, BLOCKS * sizeof(population_t));
// array to store parents selected from tournament selection
tour_t *d_parents;
hipMalloc((void **) &d_parents, sizeof(tour_t) * BLOCKS * NUM_THREADS * 2);
// cost table for crossover function (SCX crossover)
float *d_costTable;
hipMalloc((void **) &d_costTable, sizeof(float) * NUM_CITIES * NUM_CITIES);
hipMemcpy(d_costTable, &costTable, sizeof(float) * NUM_CITIES * NUM_CITIES, hipMemcpyHostToDevice);
hiprandState_t *d_state;
hipMalloc((void**)&d_state, BLOCKS * NUM_THREADS * sizeof(hiprandState_t));
// collects run results
tour_t tours[NUM_TESTS];
// -----------------
// MAIN LOOP
// -----------------
for (int k = 0; k < NUM_TESTS; ++k)
{
// Initializes all populations to NUMTHREADS number of individuals, randomized
// Done on CPU (host)
for (int i = 0; i < BLOCKS; ++i)
initializePop(populations[i], initialTour);
// copies data from host to device for evolution
hipMemcpy(d_populations, &populations, NUM_POPULATIONS * sizeof(population_t), hipMemcpyHostToDevice);
// ----------------------------------------------
// Times execution of evolve population on gpu
// -----------------------------------------------
float milliseconds = 0;
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start);
// -----------
// MAIN LOOP
// -----------
// initialize random numbers array for tournament selection
hipLaunchKernelGGL(( initCuRand) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_state);
// figure out distance and fitness for each individual in population
hipLaunchKernelGGL(( evaluatePopulations) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_costTable);
for (int i = 0; i < NUM_EVOLUTIONS; ++i)
{
hipLaunchKernelGGL(( selection) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_state, d_parents);
// breed the population with tournament selection and SCX crossover
// perform computation parallelized, build children iteratively
for (int j = 1; j < NUM_CITIES; ++j)
hipLaunchKernelGGL(( crossover) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_parents, d_state, d_costTable, j);
hipLaunchKernelGGL(( mutate) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_state);
hipLaunchKernelGGL(( evaluatePopulations) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_costTable);
// migrate every 5 evolutions
if (i % 3 == 0)
hipLaunchKernelGGL(( migrate) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations);
}
// -----------------------------------
// END MAIN LOOP
// -----------------------------------
hipEventRecord (stop);
hipEventSynchronize (stop);
hipEventElapsedTime (&milliseconds, start, stop);
// copy memory back to device!
hipMemcpy(&populations, d_populations, NUM_POPULATIONS * sizeof(population_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
checkForError();
//printPopulation(initialPopulation);
tour_t bestIndivs[NUM_POPULATIONS];
for (int i = 0; i < NUM_POPULATIONS; ++i)
bestIndivs[i] = getFittestTour(populations[i].tours, NUM_THREADS);
tour_t fittest = getFittestTour(bestIndivs, NUM_POPULATIONS);
// ---------------------
// PRINTS OUTPUT TO FILE
// ---------------------
printf("%f %f\n", milliseconds/1000, fittest.distance);
//printf("Program execution time: %f sec", timeInitGPUPop+timeInitHostPop+(milliseconds/1000)+evalPopTime);
tours[k] = fittest;
}
hipFree(d_populations);
hipFree(d_parents);
hipFree(d_costTable);
hipFree(d_state);
// tour_t mostFittest = getFittestTour(tours, NUM_TESTS);
// printf("\nThe fittest tour OVERALL has length %f\n\n", mostFittest.distance);
// printf("Winning Tour:\n");
// printTour(mostFittest);
return 0;
}
|
b7970f3c876095218cce2a7654effa237507d92d.cu
|
#include <algorithm>
#include <ctime>
#include <iostream>
#include <fstream>
#include <math.h>
#include <sstream>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <utility>
#include <time.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// SET NUMBER OF TESTS
#define NUM_TESTS 20
// CUDA CONFIG
//--------------------
#define NUM_THREADS 256 // this is also the number of tours (ie, population size)
#define BLOCKS 1024 // Number of sub populations
//--------------------
// POPULATION CONTROL
//-------------------
#define NUM_CITIES 52 // must be number of cities being read in file
#define MAX_COORD 250
#define POPULATION_SIZE NUM_THREADS // this should match #threads, at least in the beginning
#define NUM_POPULATIONS BLOCKS
#define NUM_EVOLUTIONS 50
#define MUTATION_RATE 0.05 // used to be 0.0015
#define ELITISM true
#define TOURNAMENT_SIZE 16
//--------------------
#include "headers/city.h"
#include "headers/tour.h"
#include "headers/population.h"
#include "headers/hostUtils.h"
#include "headers/gaUtils.h"
using namespace std;
__global__ void initCuRand(curandState *randState)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
curand_init(1337, tid, 0, &randState[tid]);
}
__global__ void evaluatePopulations(population_t *populations, const float *costTable)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
evalTour(populations[blockIdx.x].tours[threadIdx.x], costTable);
}
// Maybe add cost table to reference during "tournamentSelection()"
__global__ void selection(population_t *populations, curandState *randState, tour_t *parents)
{
// Get thread (particle) ID
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
if (ELITISM && blockIdx.x == 0)
parents[tid*2] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
else
parents[tid*2] = tournamentSelection(populations[blockIdx.x], randState, tid);
parents[tid*2+1] = tournamentSelection(populations[blockIdx.x], randState, tid);
}
__global__ void crossover(population_t *populations, tour_t *parents, curandState *randState, float *costTable, int index)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
populations[blockIdx.x].tours[threadIdx.x].cities[0] = parents[2*tid].cities[0];
city_t c1 = getValidNextCity(parents[tid*2], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index);
city_t c2 = getValidNextCity(parents[tid*2+1], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index);
// compare the two cities from parents to the last city that was chosen in the child
city_t currentCity = populations[blockIdx.x].tours[threadIdx.x].cities[index-1];
if (costTable[c1.n*NUM_CITIES + currentCity.n] <= costTable[c2.n*NUM_CITIES + currentCity.n])
populations[blockIdx.x].tours[threadIdx.x].cities[index] = c1;
else
populations[blockIdx.x].tours[threadIdx.x].cities[index] = c2;
}
__global__ void mutate(population_t *populations, curandState *d_state)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS) return;
// pick random number between 0 and 1
curandState localState = d_state[tid];
// if random num is less than mutation_rate, perform mutation (swap two cities in tour)
if (curand_uniform(&localState) < MUTATION_RATE)
{
int randNum1 = 1 + curand_uniform(&localState) * (NUM_CITIES - 1.0000001);
int randNum2 = 1 + curand_uniform(&localState) * (NUM_CITIES - 1.0000001);
city_t temp = populations[blockIdx.x].tours[threadIdx.x].cities[randNum1];
populations[blockIdx.x].tours[threadIdx.x].cities[randNum1] = populations[blockIdx.x].tours[threadIdx.x].cities[randNum2];
populations[blockIdx.x].tours[threadIdx.x].cities[randNum2] = temp;
d_state[tid] = localState;
}
}
__global__ void migrate(population_t *populations)
{
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= NUM_THREADS*BLOCKS || threadIdx.x != 0) return;
int indexOfLeastFitInNeighbor;
if (blockIdx.x == BLOCKS-1)
{
indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[0]);
populations[0].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
}
else
{
indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[blockIdx.x+1]);
populations[blockIdx.x+1].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE);
}
}
int main(int argc, char **argv)
{
printf("THREADS: %d\n", NUM_THREADS);
printf("BLOCKS: %d\n", BLOCKS);
printf("TOURNAMENT_SIZE: %d\n", TOURNAMENT_SIZE);
printf("NUM_EVOLUTIONS: %d\n", NUM_EVOLUTIONS);
// Build city distances table
tour_t initialTour;
float costTable[NUM_CITIES*NUM_CITIES];
population_t populations[BLOCKS];
tour_t parents[NUM_POPULATIONS*POPULATION_SIZE*2];
// READS INITIAL TOUR FROM FILE
ifstream file("berlin52.txt");
readTourFromFile(initialTour, file);
// Build cost table to save time computing distance between cities
// - array lookups are cheaper than squaring, adding, and sqrting
buildCostTable(initialTour, costTable);
// ---------------------------
// GPU Mem allocation
// ---------------------------
population_t *d_populations;
cudaMalloc((void **) &d_populations, BLOCKS * sizeof(population_t));
// array to store parents selected from tournament selection
tour_t *d_parents;
cudaMalloc((void **) &d_parents, sizeof(tour_t) * BLOCKS * NUM_THREADS * 2);
// cost table for crossover function (SCX crossover)
float *d_costTable;
cudaMalloc((void **) &d_costTable, sizeof(float) * NUM_CITIES * NUM_CITIES);
cudaMemcpy(d_costTable, &costTable, sizeof(float) * NUM_CITIES * NUM_CITIES, cudaMemcpyHostToDevice);
curandState *d_state;
cudaMalloc((void**)&d_state, BLOCKS * NUM_THREADS * sizeof(curandState));
// collects run results
tour_t tours[NUM_TESTS];
// -----------------
// MAIN LOOP
// -----------------
for (int k = 0; k < NUM_TESTS; ++k)
{
// Initializes all populations to NUMTHREADS number of individuals, randomized
// Done on CPU (host)
for (int i = 0; i < BLOCKS; ++i)
initializePop(populations[i], initialTour);
// copies data from host to device for evolution
cudaMemcpy(d_populations, &populations, NUM_POPULATIONS * sizeof(population_t), cudaMemcpyHostToDevice);
// ----------------------------------------------
// Times execution of evolve population on gpu
// -----------------------------------------------
float milliseconds = 0;
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start);
// -----------
// MAIN LOOP
// -----------
// initialize random numbers array for tournament selection
initCuRand <<< BLOCKS, NUM_THREADS >>> (d_state);
// figure out distance and fitness for each individual in population
evaluatePopulations <<< BLOCKS, NUM_THREADS >>> (d_populations, d_costTable);
for (int i = 0; i < NUM_EVOLUTIONS; ++i)
{
selection <<< BLOCKS, NUM_THREADS >>> (d_populations, d_state, d_parents);
// breed the population with tournament selection and SCX crossover
// perform computation parallelized, build children iteratively
for (int j = 1; j < NUM_CITIES; ++j)
crossover <<< BLOCKS, NUM_THREADS >>> (d_populations, d_parents, d_state, d_costTable, j);
mutate <<< BLOCKS, NUM_THREADS >>> (d_populations, d_state);
evaluatePopulations <<< BLOCKS, NUM_THREADS >>> (d_populations, d_costTable);
// migrate every 5 evolutions
if (i % 3 == 0)
migrate <<< BLOCKS, NUM_THREADS >>> (d_populations);
}
// -----------------------------------
// END MAIN LOOP
// -----------------------------------
cudaEventRecord (stop);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&milliseconds, start, stop);
// copy memory back to device!
cudaMemcpy(&populations, d_populations, NUM_POPULATIONS * sizeof(population_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
checkForError();
//printPopulation(initialPopulation);
tour_t bestIndivs[NUM_POPULATIONS];
for (int i = 0; i < NUM_POPULATIONS; ++i)
bestIndivs[i] = getFittestTour(populations[i].tours, NUM_THREADS);
tour_t fittest = getFittestTour(bestIndivs, NUM_POPULATIONS);
// ---------------------
// PRINTS OUTPUT TO FILE
// ---------------------
printf("%f %f\n", milliseconds/1000, fittest.distance);
//printf("Program execution time: %f sec", timeInitGPUPop+timeInitHostPop+(milliseconds/1000)+evalPopTime);
tours[k] = fittest;
}
cudaFree(d_populations);
cudaFree(d_parents);
cudaFree(d_costTable);
cudaFree(d_state);
// tour_t mostFittest = getFittestTour(tours, NUM_TESTS);
// printf("\nThe fittest tour OVERALL has length %f\n\n", mostFittest.distance);
// printf("Winning Tour:\n");
// printTour(mostFittest);
return 0;
}
|
ffec62d3429c58738465aa71c8758270165fb9b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "common/grid_sync.cuh"
#include "cuda_utils.cuh"
#include "test_utils.h"
namespace MLCommon {
__global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type) {
GridSync gs(workspace, type, true);
bool master;
int updatePosition;
if (type == ACROSS_ALL) {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 &&
blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0;
updatePosition = 0;
} else {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 &&
blockIdx.x == 0;
updatePosition = blockIdx.y + blockIdx.z * gridDim.y;
}
if (master) {
out[updatePosition] = 1;
__threadfence();
}
gs.sync();
int val = out[updatePosition];
// make sure everybody has read the updated value!
gs.sync();
atomicAdd(out + updatePosition, val);
}
struct GridSyncInputs {
dim3 gridDim, blockDim;
bool checkWorkspaceReuse;
SyncType type;
};
void gridSyncTest(int* out, int* out1, const GridSyncInputs& params) {
size_t workspaceSize =
GridSync::computeWorkspaceSize(params.gridDim, params.type, true);
char* workspace;
allocate(workspace, workspaceSize);
CUDA_CHECK(hipMemset(workspace, 0, workspaceSize));
hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace, out,
params.type);
CUDA_CHECK(hipPeekAtLastError());
if (params.checkWorkspaceReuse) {
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace, out1,
params.type);
CUDA_CHECK(hipPeekAtLastError());
}
CUDA_CHECK(hipFree(workspace));
}
::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) {
return os;
}
class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<GridSyncInputs>::GetParam();
size_t len = computeOutLen();
allocate(out, len);
allocate(out1, len);
gridSyncTest(out, out1, params);
}
void TearDown() override {
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out1));
}
size_t computeOutLen() const {
size_t len;
if (params.type == ACROSS_ALL) {
len = 1;
} else {
len = params.gridDim.y * params.gridDim.z;
}
return len;
}
protected:
GridSyncInputs params;
int *out, *out1;
};
const std::vector<GridSyncInputs> inputs = {
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, false, ACROSS_X},
{{32, 256, 1}, {1, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, true, ACROSS_X},
{{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}};
TEST_P(GridSyncTest, Result) {
size_t len = computeOutLen();
// number of blocks atomicAdd'ing the same location
int nblks = params.type == ACROSS_X
? params.gridDim.x
: params.gridDim.x * params.gridDim.y * params.gridDim.z;
int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z;
int expected = (nblks * nthreads) + 1;
ASSERT_TRUE(devArrMatch(expected, out, len, Compare<int>()));
if (params.checkWorkspaceReuse) {
ASSERT_TRUE(devArrMatch(expected, out1, len, Compare<int>()));
}
}
INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest,
::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
ffec62d3429c58738465aa71c8758270165fb9b5.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "common/grid_sync.cuh"
#include "cuda_utils.cuh"
#include "test_utils.h"
namespace MLCommon {
__global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type) {
GridSync gs(workspace, type, true);
bool master;
int updatePosition;
if (type == ACROSS_ALL) {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 &&
blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0;
updatePosition = 0;
} else {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 &&
blockIdx.x == 0;
updatePosition = blockIdx.y + blockIdx.z * gridDim.y;
}
if (master) {
out[updatePosition] = 1;
__threadfence();
}
gs.sync();
int val = out[updatePosition];
// make sure everybody has read the updated value!
gs.sync();
atomicAdd(out + updatePosition, val);
}
struct GridSyncInputs {
dim3 gridDim, blockDim;
bool checkWorkspaceReuse;
SyncType type;
};
void gridSyncTest(int* out, int* out1, const GridSyncInputs& params) {
size_t workspaceSize =
GridSync::computeWorkspaceSize(params.gridDim, params.type, true);
char* workspace;
allocate(workspace, workspaceSize);
CUDA_CHECK(cudaMemset(workspace, 0, workspaceSize));
gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace, out,
params.type);
CUDA_CHECK(cudaPeekAtLastError());
if (params.checkWorkspaceReuse) {
CUDA_CHECK(cudaDeviceSynchronize());
gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace, out1,
params.type);
CUDA_CHECK(cudaPeekAtLastError());
}
CUDA_CHECK(cudaFree(workspace));
}
::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) {
return os;
}
class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<GridSyncInputs>::GetParam();
size_t len = computeOutLen();
allocate(out, len);
allocate(out1, len);
gridSyncTest(out, out1, params);
}
void TearDown() override {
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out1));
}
size_t computeOutLen() const {
size_t len;
if (params.type == ACROSS_ALL) {
len = 1;
} else {
len = params.gridDim.y * params.gridDim.z;
}
return len;
}
protected:
GridSyncInputs params;
int *out, *out1;
};
const std::vector<GridSyncInputs> inputs = {
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, false, ACROSS_X},
{{32, 256, 1}, {1, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, true, ACROSS_X},
{{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}};
TEST_P(GridSyncTest, Result) {
size_t len = computeOutLen();
// number of blocks atomicAdd'ing the same location
int nblks = params.type == ACROSS_X
? params.gridDim.x
: params.gridDim.x * params.gridDim.y * params.gridDim.z;
int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z;
int expected = (nblks * nthreads) + 1;
ASSERT_TRUE(devArrMatch(expected, out, len, Compare<int>()));
if (params.checkWorkspaceReuse) {
ASSERT_TRUE(devArrMatch(expected, out1, len, Compare<int>()));
}
}
INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest,
::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
0416b32b1adf05d133cc0fdb8399943ba97e8374.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "DamierHSBAFloatMath.h"
#include "IndiceTools_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t)
{
DamierHSBAFloatMath damierHSBAFloatMath = DamierHSBAFloatMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierHSBAFloatMath.colorXY(&ptrDevPixels[s], x, y, t); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0416b32b1adf05d133cc0fdb8399943ba97e8374.cu
|
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "DamierHSBAFloatMath.h"
#include "IndiceTools_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t)
{
DamierHSBAFloatMath damierHSBAFloatMath = DamierHSBAFloatMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
double x;
double y;
int i; // in [0,h[
int j; // in [0,w[
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
// (i,j) domaine ecran
// (x,y) domaine math
domaineMath.toXY(i, j, &x, &y); // (i,j) -> (x,y)
damierHSBAFloatMath.colorXY(&ptrDevPixels[s], x, y, t); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
20cd99718f62c5dc986e53252b71e09cf6f2cb22.hip
|
// !!! This is a file automatically generated by hipify!!!
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <gloop/statistics.h>
#include <gloop/initialize.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL( call) do { \
cuda_calls++; \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, hipGetErrorString( err) ); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} } while (0)
# define CU_SAFE_CALL_NO_SYNC( call ) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__ ); \
exit(EXIT_FAILURE); \
} } while (0)
# define CUT_DEVICE_INIT_DRV(cuDevice) do { \
cuDevice = 0; \
int deviceCount = 0; \
hipError_t err = hipInit(0); \
if (hipSuccess == err) \
CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev));\
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) do { \
CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while(0)
#define BIND_TEX_ARRAY(tex, arr, desc) do { \
CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC(ptr, size) do { \
hipMalloc(ptr, size); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \
hipMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \
hipMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while(0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C"
int lookupNumLeaves(ReferencePage * page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C"
void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char * createTimer()
{
unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *) ptr;
}
void startTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char * ptr)
{
Timer_t * timer = (Timer_t*) ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char * ptr)
{
free((Timer_t *)ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1)
{
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx) {
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyInitializeContext();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyFinalizeContext();
}
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char * reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *) malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y*refpitch+x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t * free_mem, size_t * total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512*1024*1024;
*total_mem = 768*1024*1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows*ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice) );
reftex.addressMode[0] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc refDesc =
hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch ,
ref->pitch * sizeof(char),
numrows,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice) );
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(hipUnbindTexture( reftex ) );
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(hipFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) );
#endif
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) );
#endif
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(hipFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
hipChannelFormatDesc nodeTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height );
CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height );
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
hipMemcpyHostToDevice));
#endif
if (ref->tex_children_height)
{
#if CHILDTEX
hipChannelFormatDesc childrenTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height );
CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height );
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
hipMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL( hipMemcpy( (ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
#if NODETEX
hipChannelFormatDesc nodeTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height* sizeof(PixelOfNode));
#endif
if (ref->tex_children_height)
{
// Child tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL( hipMemcpy( (ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
#if CHILDTEX
hipChannelFormatDesc childTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2];
#else
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1];
#else
child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count-1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
hipMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = hipAddressModeClamp;
qrytex.filterMode = hipFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc qryDesc =
hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
CUDA_MALLOC((void**) &queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE)
{
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
int num_coords = lengths[i + j] - match_length + 1;
if ( max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i)
{
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0)
{
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords*sizeof(MatchCoord) );
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr,"board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL)
{
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu)
{
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost) );
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost) );
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height)
{
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height)
{
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0)
{
for (int j = 0; j < numCoords; ++j)
{
MatchCoord * coord = ctx->results.h_match_coords+j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j)
{
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION)
{
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries)
{
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen)
{
if (midx >= numMatches)
{
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment));
//hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**) &d_matches, matchesSize);
struct Alignment * d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**) &d_alignments, alignmentSize);
CUDA_SAFE_CALL(hipMemset((void*) d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG)
{
for (int i = 0; i < numMatches; i++)
{
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hipLaunchKernelGGL(( printKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if ( hipSuccess != err)
{
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(hipMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
hipMemcpyDeviceToHost));
hipDeviceSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(hipFree(d_alignments));
CUDA_SAFE_CALL(hipFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i)
{
MatchInfo& match = h_matches[i];
if (match.queryid != qry)
{
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK))
{
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu)
{
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else
{
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord)
{
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100)
{
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
{
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu)
{
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else
{
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
}
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++)
{
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++)
{
// See if there are any more left maximal alignments for this match
if (h_alignments[base+i].left_in_ref == 0)
{
break;
}
if (h_matches[m].queryid != lastqry)
{
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base+i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base+i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//hipHostFree((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size)
{
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size)
{
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename)
{
FILE* f = fopen(stats_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else
{
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f,"\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename)
{
FILE* f = fopen(node_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename)
{
FILE* f = fopen(child_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
total_node_hits +=ctx->statistics.node_hist[i];
if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; }
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
total_child_hits +=ctx->statistics.child_hist[i];
if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; }
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
hipLaunchKernelGGL(( mummergpuRCKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else {
hipLaunchKernelGGL(( mummergpuKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren*)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char*)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
}
// check if kernel execution generated an error
hipError_t err = hipGetLastError();
if ( hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu)
{
matchOnCPU(ctx, reverse_complement);
}
else
{
matchOnGPU(ctx, reverse_complement);
hipDeviceSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu)
{
getExactAlignments(ctx, page, false);
}
else
{
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int * p = NULL;
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(hipFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100)
{
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ?
BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx) {
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
|
20cd99718f62c5dc986e53252b71e09cf6f2cb22.cu
|
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <gloop/statistics.h>
#include <gloop/initialize.cuh>
#include <cuda.h>
#include <vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL( call) do { \
cuda_calls++; \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, cudaGetErrorString( err) ); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} } while (0)
# define CU_SAFE_CALL_NO_SYNC( call ) do { \
CUresult err = call; \
if( CUDA_SUCCESS != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__ ); \
exit(EXIT_FAILURE); \
} } while (0)
# define CUT_DEVICE_INIT_DRV(cuDevice) do { \
cuDevice = 0; \
int deviceCount = 0; \
CUresult err = cuInit(0); \
if (CUDA_SUCCESS == err) \
CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev));\
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) do { \
CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while(0)
#define BIND_TEX_ARRAY(tex, arr, desc) do { \
CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC(ptr, size) do { \
cudaMalloc(ptr, size); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \
cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \
cudaMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while(0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C"
int lookupNumLeaves(ReferencePage * page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C"
void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char * createTimer()
{
unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *) ptr;
}
void startTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char * ptr)
{
Timer_t * timer = (Timer_t*) ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char * ptr)
{
free((Timer_t *)ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1)
{
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx) {
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyInitializeContext();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyFinalizeContext();
}
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char * reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *) malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y*refpitch+x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t * free_mem, size_t * total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512*1024*1024;
*total_mem = 768*1024*1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows*ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice) );
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc refDesc =
cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch ,
ref->pitch * sizeof(char),
numrows,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice) );
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) );
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(cudaFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) );
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) );
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(cudaFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height );
CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height );
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
cudaMemcpyHostToDevice));
#endif
if (ref->tex_children_height)
{
#if CHILDTEX
cudaChannelFormatDesc childrenTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height );
CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height );
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
cudaMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL( cudaMemcpy( (ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height* sizeof(PixelOfNode));
#endif
if (ref->tex_children_height)
{
// Child tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL( cudaMemcpy( (ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
#if CHILDTEX
cudaChannelFormatDesc childTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2];
#else
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1];
#else
child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count-1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
cudaMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = cudaAddressModeClamp;
qrytex.filterMode = cudaFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc qryDesc =
cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_MALLOC((void**) &queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE)
{
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
int num_coords = lengths[i + j] - match_length + 1;
if ( max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i)
{
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0)
{
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords*sizeof(MatchCoord) );
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr,"board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL)
{
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu)
{
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost) );
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost) );
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height)
{
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height)
{
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0)
{
for (int j = 0; j < numCoords; ++j)
{
MatchCoord * coord = ctx->results.h_match_coords+j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j)
{
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION)
{
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries)
{
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen)
{
if (midx >= numMatches)
{
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment));
//cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**) &d_matches, matchesSize);
struct Alignment * d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**) &d_alignments, alignmentSize);
CUDA_SAFE_CALL(cudaMemset((void*) d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG)
{
for (int i = 0; i < numMatches; i++)
{
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
printKernel <<< dimGrid, dimBlock, 0 >>> (d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err)
{
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(cudaMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(cudaFree(d_alignments));
CUDA_SAFE_CALL(cudaFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i)
{
MatchInfo& match = h_matches[i];
if (match.queryid != qry)
{
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK))
{
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu)
{
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else
{
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord)
{
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100)
{
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
{
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu)
{
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else
{
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
}
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++)
{
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++)
{
// See if there are any more left maximal alignments for this match
if (h_alignments[base+i].left_in_ref == 0)
{
break;
}
if (h_matches[m].queryid != lastqry)
{
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base+i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base+i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//cudaFreeHost((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size)
{
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size)
{
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename)
{
FILE* f = fopen(stats_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else
{
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f,"\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename)
{
FILE* f = fopen(node_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename)
{
FILE* f = fopen(child_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
total_node_hits +=ctx->statistics.node_hist[i];
if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; }
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
total_child_hits +=ctx->statistics.child_hist[i];
if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; }
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
mummergpuRCKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else {
mummergpuKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren*)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char*)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
}
// check if kernel execution generated an error
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu)
{
matchOnCPU(ctx, reverse_complement);
}
else
{
matchOnGPU(ctx, reverse_complement);
cudaThreadSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu)
{
getExactAlignments(ctx, page, false);
}
else
{
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int * p = NULL;
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(cudaFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100)
{
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ?
BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx) {
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
|
ac515f27b33c62cf75e7ee7f739221f1eac0ce4e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_quicksort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *values = NULL;
hipMalloc(&values, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_quicksort), dim3(gridBlock),dim3(threadBlock), 0, 0, values,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_quicksort), dim3(gridBlock),dim3(threadBlock), 0, 0, values,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_quicksort), dim3(gridBlock),dim3(threadBlock), 0, 0, values,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ac515f27b33c62cf75e7ee7f739221f1eac0ce4e.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_quicksort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *values = NULL;
cudaMalloc(&values, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_quicksort<<<gridBlock,threadBlock>>>(values,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_quicksort<<<gridBlock,threadBlock>>>(values,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_quicksort<<<gridBlock,threadBlock>>>(values,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
98c3f9d81b4fd47d826a4cd46a96f14c12460398.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <stdlib.h>
#include <time.h>
extern "C"
{
#include "libs/bitmap.h"
}
#define cudaErrorCheck(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %s %d\n", hipGetErrorName(code), hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#define BLOCK_DIMENSION 16 // A thread block size of 16x16 (256 threads) is a common choice (from https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-hierarchy)
// Convolutional Filter Examples, each with dimension 3,
// gaussian filter with dimension 5
int sobelYFilter[] = {-1, -2, -1,
0, 0, 0,
1, 2, 1};
int sobelXFilter[] = {-1, -0, 1,
-2, 0, 2,
-1, 0, 1};
int laplacian1Filter[] = {-1, -4, -1,
-4, 20, -4,
-1, -4, -1};
int laplacian2Filter[] = {0, 1, 0,
1, -4, 1,
0, 1, 0};
int laplacian3Filter[] = {-1, -1, -1,
-1, 8, -1,
-1, -1, -1};
int gaussianFilter[] = {1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1};
const char *filterNames[] = {"SobelY", "SobelX", "Laplacian 1", "Laplacian 2", "Laplacian 3", "Gaussian"};
int *const filters[] = {sobelYFilter, sobelXFilter, laplacian1Filter, laplacian2Filter, laplacian3Filter, gaussianFilter};
unsigned int const filterDims[] = {3, 3, 3, 3, 3, 5};
float const filterFactors[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0 / 256.0};
int const maxFilterIndex = sizeof(filterDims) / sizeof(unsigned int);
void cleanup(char **input, char **output)
{
if (*input)
free(*input);
if (*output)
free(*output);
}
void graceful_exit(char **input, char **output)
{
cleanup(input, output);
exit(0);
}
void error_exit(char **input, char **output)
{
cleanup(input, output);
exit(1);
}
// Helper function to swap bmpImageChannel pointers
void swapImageRawdata(pixel **one, pixel **two)
{
pixel *helper = *two;
*two = *one;
*one = helper;
}
void swapImage(bmpImage **one, bmpImage **two)
{
bmpImage *helper = *two;
*two = *one;
*one = helper;
}
// Apply convolutional filter on image data
void applyFilter(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
unsigned int const filterCenter = (filterDim / 2);
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}
}
}
// Task 1-4
// Apply convolutional filter on image data
__global__ void applyFilter_CUDA_Kernel(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Handle out of bounds
if (x >= width || y >= height)
{
return;
}
unsigned int const filterCenter = (filterDim / 2);
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}
// Task 5
// Apply convolutional filter on image data
/*__global__ void applyFilter_CUDA_Kernel(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
// Now instead of using the filter directly from global memory, we want to copy the filter to shared memory.
// Dynamic shared memory because the filterDim is not known at compile time.
// This one holds all of the data
extern __shared__ int s[];
int *shared_filter = s; // Length of filterDim * filterDim
pixel *shared_pixels = (pixel *)&shared_filter[filterDim * filterDim]; // Length of BLOCK_DIMENSION * BLOCK_DIMENSION
for (int i = 0; i < filterDim * filterDim; i++)
{
shared_filter[i] = filter[i];
}
// Sync to make sure that all threads have completed the loads to shared memory
__syncthreads();
// Now we can use shared_filter!
// Because shared memory is only shared between blocks, it makes sense to make the shared memory array for
// the image as big as the block, since each thread in the block changes one pixel.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Handle out of bounds
if (x >= width || y >= height)
{
// __syncthreads(); // ? Needed? Think so, to avoid deadlock
return;
}
// Set the position in the block to the correct value
shared_pixels[threadIdx.y * BLOCK_DIMENSION + threadIdx.x] = in[y * width + x];
// Sync to make sure that all threads have completed the loads to shared memory
__syncthreads();
// Now we can use shared_pixels!
unsigned int const filterCenter = (filterDim / 2);
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
// Now, since the edge threads needs pixels outside the block's shared memory,
// we need to check its position.
if (xx >= 0 && xx < BLOCK_DIMENSION && yy >= 0 && yy < BLOCK_DIMENSION)
{
ar += shared_pixels[yy * BLOCK_DIMENSION + xx].r * shared_filter[nky * filterDim + nkx];
ag += shared_pixels[yy * BLOCK_DIMENSION + xx].g * shared_filter[nky * filterDim + nkx];
ab += shared_pixels[yy * BLOCK_DIMENSION + xx].b * shared_filter[nky * filterDim + nkx];
}
// Else if the normal code from task 1-4
else if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * shared_filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * shared_filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * shared_filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}*/
void help(char const *exec, char const opt, char const *optarg)
{
FILE *out = stdout;
if (opt != 0)
{
out = stderr;
if (optarg)
{
fprintf(out, "Invalid parameter - %c %s\n", opt, optarg);
}
else
{
fprintf(out, "Invalid parameter - %c\n", opt);
}
}
fprintf(out, "%s [options] <input-bmp> <output-bmp>\n", exec);
fprintf(out, "\n");
fprintf(out, "Options:\n");
fprintf(out, " -k, --filter <filter> filter index (0<=x<=%u) (2)\n", maxFilterIndex - 1);
fprintf(out, " -i, --iterations <iterations> number of iterations (1)\n");
fprintf(out, "\n");
fprintf(out, "Example: %s before.bmp after.bmp -i 10000\n", exec);
}
int main(int argc, char **argv)
{
/*
Parameter parsing, don't change this!
*/
unsigned int iterations = 1;
char *output = NULL;
char *input = NULL;
unsigned int filterIndex = 2;
static struct option const long_options[] = {
{"help", no_argument, 0, 'h'},
{"filter", required_argument, 0, 'k'},
{"iterations", required_argument, 0, 'i'},
{0, 0, 0, 0}};
static char const *short_options = "hk:i:";
{
char *endptr;
int c;
int parse;
int option_index = 0;
while ((c = getopt_long(argc, argv, short_options, long_options, &option_index)) != -1)
{
switch (c)
{
case 'h':
help(argv[0], 0, NULL);
graceful_exit(&input, &output);
case 'k':
parse = strtol(optarg, &endptr, 10);
if (endptr == optarg || parse < 0 || parse >= maxFilterIndex)
{
help(argv[0], c, optarg);
error_exit(&input, &output);
}
filterIndex = (unsigned int)parse;
break;
case 'i':
iterations = strtol(optarg, &endptr, 10);
if (endptr == optarg)
{
help(argv[0], c, optarg);
error_exit(&input, &output);
}
break;
default:
abort();
}
}
}
if (argc <= (optind + 1))
{
help(argv[0], ' ', "Not enough arugments");
error_exit(&input, &output);
}
unsigned int arglen = strlen(argv[optind]);
input = (char *)calloc(arglen + 1, sizeof(char));
strncpy(input, argv[optind], arglen);
optind++;
arglen = strlen(argv[optind]);
output = (char *)calloc(arglen + 1, sizeof(char));
strncpy(output, argv[optind], arglen);
optind++;
/*
End of Parameter parsing!
*/
/*
Create the BMP image and load it from disk.
*/
bmpImage *image = newBmpImage(0, 0);
if (image == NULL)
{
fprintf(stderr, "Could not allocate new image!\n");
error_exit(&input, &output);
}
if (loadBmpImage(image, input) != 0)
{
fprintf(stderr, "Could not load bmp image '%s'!\n", input);
freeBmpImage(image);
error_exit(&input, &output);
}
printf("Apply filter '%s' on image with %u x %u pixels for %u iterations\n", filterNames[filterIndex], image->width, image->height, iterations);
// Time measurement init
// Inspired from https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
hipEvent_t start_time, end_time;
hipEventCreate(&start_time);
hipEventCreate(&end_time);
// Here we do the actual computation!
// image->data is a 2-dimensional array of pixel which is accessed row first ([y][x])
// image->rawdata is a 1-dimensional array of pixel containing the same data as image->data
// each pixel is a struct of 3 unsigned char for the red, blue and green colour channel
// bmpImage *processImage = newBmpImage(image->width, image->height);
int image_size = image->width * image->height * sizeof(pixel);
int filter_size = filterDims[filterIndex] * filterDims[filterIndex] * sizeof(int);
// We could also made all filters __device__ available, but it is simple to copy over only the needed one
pixel *d_image_rawdata, *d_process_image_rawdata;
int *d_filter;
hipMalloc((void **)&d_image_rawdata, image_size);
hipMalloc((void **)&d_process_image_rawdata, image_size);
hipMalloc((void **)&d_filter, filter_size);
hipMemcpy(d_image_rawdata, image->rawdata, image_size, hipMemcpyHostToDevice);
hipMemcpy(d_filter, filters[filterIndex], filter_size, hipMemcpyHostToDevice);
// Task 6
// From https://developer.nvidia.com/blog/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
int blockSizeInt; // The launch configurator returned block size
int minGridSizeInt; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSizeInt; // The actual grid size needed, based on input size
hipOccupancyMaxPotentialBlockSize(&minGridSizeInt, &blockSizeInt, applyFilter_CUDA_Kernel, 0, 0);
// Round up according to array size
gridSizeInt = (image->width * image->height + blockSizeInt - 1) / blockSizeInt;
if (blockSizeInt % 32 != 0)
{
printf("NOTE: blockSizeInt was not a multiple of 32: %f\n", float(blockSizeInt) / 32.0);
}
dim3 blockSize(blockSizeInt / 32, blockSizeInt / 32);
printf("BlockSizeInt %d\n", blockSizeInt);
printf("minGridSizeInt %d\n", minGridSizeInt);
printf("gridSizeInt %d\n", gridSizeInt);
// End Task 6
// We allocate one thread per pixel
// gridSize and blockSize inspired from Section 2.2. in the CUDA Programming Guide
// dim3 blockSize(BLOCK_DIMENSION, BLOCK_DIMENSION); // Threads per block
printf("The grid has thread blocks of dimension (%d width * %d height)\n", blockSize.x, blockSize.y);
// We may need to add 1 extra block to width or height if the image's dimensions are not evenly divided by the block's dimension
int extraWidth = 0;
int extraHeight = 0;
if (image->width % blockSize.x != 0)
{
extraWidth = 1;
}
if (image->height % blockSize.y != 0)
{
extraHeight = 1;
}
dim3 gridSize(image->width / blockSize.x + extraWidth, image->height / blockSize.y + extraHeight); // Number of blocks
printf("Launching a grid of dimension (%d width * %d height)\n", image->width / blockSize.x + extraWidth, image->height / blockSize.y + extraHeight);
// Start time measurement
hipEventRecord(start_time);
for (unsigned int i = 0; i < iterations; i++)
{
// Task 2-3
hipLaunchKernelGGL(( applyFilter_CUDA_Kernel), dim3(gridSize), dim3(blockSize), 0, 0,
// Task 5
// TODO Experiment with different bytes in shared memory. Share the border pixels so that we never have to access global memory for the outside bounds.
// int sharedMemoryUsedPerBlock = filterDims[filterIndex] * filterDims[filterIndex] * sizeof(int) + BLOCK_DIMENSION * BLOCK_DIMENSION * sizeof(pixel);
// applyFilter_CUDA_Kernel<<<gridSize, blockSize, sharedMemoryUsedPerBlock>>>(
d_process_image_rawdata, // Out
d_image_rawdata, // In
image->width,
image->height,
// filters[filterIndex],
d_filter,
filterDims[filterIndex],
filterFactors[filterIndex]);
// swapImage(&processImage, &image);
swapImageRawdata(&d_process_image_rawdata, &d_image_rawdata);
}
// End time measurement
hipEventRecord(end_time);
hipMemcpy(image->rawdata, d_image_rawdata, image_size, hipMemcpyDeviceToHost);
hipFree(d_image_rawdata);
hipFree(d_process_image_rawdata);
hipFree(d_filter);
// Blocks CPU execution until end_time is recorded
hipEventSynchronize(end_time);
float spentTime = 0.0;
hipEventElapsedTime(&spentTime, start_time, end_time);
printf("Time spent: %.3f seconds\n", spentTime / 1000);
hipEventDestroy(start_time);
hipEventDestroy(end_time);
// Task 6
hipDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, applyFilter_CUDA_Kernel, blockSizeInt, 0);
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSizeInt / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize);
printf("Launched blocks of size %d=>(%dx%d). Theoretical occupancy: %f\n", blockSizeInt, blockSize.x, blockSize.y, occupancy);
// End Task 6
// Check for error
hipError_t error = hipPeekAtLastError();
if (error)
{
fprintf(stderr, "A CUDA error has occurred while cracking: %s\n", hipGetErrorString(error));
}
//Write the image back to disk
if (saveBmpImage(image, output) != 0)
{
fprintf(stderr, "Could not save output to '%s'!\n", output);
freeBmpImage(image);
error_exit(&input, &output);
};
graceful_exit(&input, &output);
};
|
98c3f9d81b4fd47d826a4cd46a96f14c12460398.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <stdlib.h>
#include <time.h>
extern "C"
{
#include "libs/bitmap.h"
}
#define cudaErrorCheck(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %s %d\n", cudaGetErrorName(code), cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#define BLOCK_DIMENSION 16 // A thread block size of 16x16 (256 threads) is a common choice (from https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-hierarchy)
// Convolutional Filter Examples, each with dimension 3,
// gaussian filter with dimension 5
int sobelYFilter[] = {-1, -2, -1,
0, 0, 0,
1, 2, 1};
int sobelXFilter[] = {-1, -0, 1,
-2, 0, 2,
-1, 0, 1};
int laplacian1Filter[] = {-1, -4, -1,
-4, 20, -4,
-1, -4, -1};
int laplacian2Filter[] = {0, 1, 0,
1, -4, 1,
0, 1, 0};
int laplacian3Filter[] = {-1, -1, -1,
-1, 8, -1,
-1, -1, -1};
int gaussianFilter[] = {1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1};
const char *filterNames[] = {"SobelY", "SobelX", "Laplacian 1", "Laplacian 2", "Laplacian 3", "Gaussian"};
int *const filters[] = {sobelYFilter, sobelXFilter, laplacian1Filter, laplacian2Filter, laplacian3Filter, gaussianFilter};
unsigned int const filterDims[] = {3, 3, 3, 3, 3, 5};
float const filterFactors[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0 / 256.0};
int const maxFilterIndex = sizeof(filterDims) / sizeof(unsigned int);
void cleanup(char **input, char **output)
{
if (*input)
free(*input);
if (*output)
free(*output);
}
void graceful_exit(char **input, char **output)
{
cleanup(input, output);
exit(0);
}
void error_exit(char **input, char **output)
{
cleanup(input, output);
exit(1);
}
// Helper function to swap bmpImageChannel pointers
void swapImageRawdata(pixel **one, pixel **two)
{
pixel *helper = *two;
*two = *one;
*one = helper;
}
void swapImage(bmpImage **one, bmpImage **two)
{
bmpImage *helper = *two;
*two = *one;
*one = helper;
}
// Apply convolutional filter on image data
void applyFilter(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
unsigned int const filterCenter = (filterDim / 2);
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}
}
}
// Task 1-4
// Apply convolutional filter on image data
__global__ void applyFilter_CUDA_Kernel(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Handle out of bounds
if (x >= width || y >= height)
{
return;
}
unsigned int const filterCenter = (filterDim / 2);
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}
// Task 5
// Apply convolutional filter on image data
/*__global__ void applyFilter_CUDA_Kernel(pixel *out, pixel *in, unsigned int width, unsigned int height, int *filter, unsigned int filterDim, float filterFactor)
{
// Now instead of using the filter directly from global memory, we want to copy the filter to shared memory.
// Dynamic shared memory because the filterDim is not known at compile time.
// This one holds all of the data
extern __shared__ int s[];
int *shared_filter = s; // Length of filterDim * filterDim
pixel *shared_pixels = (pixel *)&shared_filter[filterDim * filterDim]; // Length of BLOCK_DIMENSION * BLOCK_DIMENSION
for (int i = 0; i < filterDim * filterDim; i++)
{
shared_filter[i] = filter[i];
}
// Sync to make sure that all threads have completed the loads to shared memory
__syncthreads();
// Now we can use shared_filter!
// Because shared memory is only shared between blocks, it makes sense to make the shared memory array for
// the image as big as the block, since each thread in the block changes one pixel.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Handle out of bounds
if (x >= width || y >= height)
{
// __syncthreads(); // ? Needed? Think so, to avoid deadlock
return;
}
// Set the position in the block to the correct value
shared_pixels[threadIdx.y * BLOCK_DIMENSION + threadIdx.x] = in[y * width + x];
// Sync to make sure that all threads have completed the loads to shared memory
__syncthreads();
// Now we can use shared_pixels!
unsigned int const filterCenter = (filterDim / 2);
int ar = 0, ag = 0, ab = 0;
for (unsigned int ky = 0; ky < filterDim; ky++)
{
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++)
{
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
// Now, since the edge threads needs pixels outside the block's shared memory,
// we need to check its position.
if (xx >= 0 && xx < BLOCK_DIMENSION && yy >= 0 && yy < BLOCK_DIMENSION)
{
ar += shared_pixels[yy * BLOCK_DIMENSION + xx].r * shared_filter[nky * filterDim + nkx];
ag += shared_pixels[yy * BLOCK_DIMENSION + xx].g * shared_filter[nky * filterDim + nkx];
ab += shared_pixels[yy * BLOCK_DIMENSION + xx].b * shared_filter[nky * filterDim + nkx];
}
// Else if the normal code from task 1-4
else if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
{
ar += in[yy * width + xx].r * shared_filter[nky * filterDim + nkx];
ag += in[yy * width + xx].g * shared_filter[nky * filterDim + nkx];
ab += in[yy * width + xx].b * shared_filter[nky * filterDim + nkx];
}
}
}
ar *= filterFactor;
ag *= filterFactor;
ab *= filterFactor;
ar = (ar < 0) ? 0 : ar;
ag = (ag < 0) ? 0 : ag;
ab = (ab < 0) ? 0 : ab;
out[y * width + x].r = (ar > 255) ? 255 : ar;
out[y * width + x].g = (ag > 255) ? 255 : ag;
out[y * width + x].b = (ab > 255) ? 255 : ab;
}*/
void help(char const *exec, char const opt, char const *optarg)
{
FILE *out = stdout;
if (opt != 0)
{
out = stderr;
if (optarg)
{
fprintf(out, "Invalid parameter - %c %s\n", opt, optarg);
}
else
{
fprintf(out, "Invalid parameter - %c\n", opt);
}
}
fprintf(out, "%s [options] <input-bmp> <output-bmp>\n", exec);
fprintf(out, "\n");
fprintf(out, "Options:\n");
fprintf(out, " -k, --filter <filter> filter index (0<=x<=%u) (2)\n", maxFilterIndex - 1);
fprintf(out, " -i, --iterations <iterations> number of iterations (1)\n");
fprintf(out, "\n");
fprintf(out, "Example: %s before.bmp after.bmp -i 10000\n", exec);
}
int main(int argc, char **argv)
{
/*
Parameter parsing, don't change this!
*/
unsigned int iterations = 1;
char *output = NULL;
char *input = NULL;
unsigned int filterIndex = 2;
static struct option const long_options[] = {
{"help", no_argument, 0, 'h'},
{"filter", required_argument, 0, 'k'},
{"iterations", required_argument, 0, 'i'},
{0, 0, 0, 0}};
static char const *short_options = "hk:i:";
{
char *endptr;
int c;
int parse;
int option_index = 0;
while ((c = getopt_long(argc, argv, short_options, long_options, &option_index)) != -1)
{
switch (c)
{
case 'h':
help(argv[0], 0, NULL);
graceful_exit(&input, &output);
case 'k':
parse = strtol(optarg, &endptr, 10);
if (endptr == optarg || parse < 0 || parse >= maxFilterIndex)
{
help(argv[0], c, optarg);
error_exit(&input, &output);
}
filterIndex = (unsigned int)parse;
break;
case 'i':
iterations = strtol(optarg, &endptr, 10);
if (endptr == optarg)
{
help(argv[0], c, optarg);
error_exit(&input, &output);
}
break;
default:
abort();
}
}
}
if (argc <= (optind + 1))
{
help(argv[0], ' ', "Not enough arugments");
error_exit(&input, &output);
}
unsigned int arglen = strlen(argv[optind]);
input = (char *)calloc(arglen + 1, sizeof(char));
strncpy(input, argv[optind], arglen);
optind++;
arglen = strlen(argv[optind]);
output = (char *)calloc(arglen + 1, sizeof(char));
strncpy(output, argv[optind], arglen);
optind++;
/*
End of Parameter parsing!
*/
/*
Create the BMP image and load it from disk.
*/
bmpImage *image = newBmpImage(0, 0);
if (image == NULL)
{
fprintf(stderr, "Could not allocate new image!\n");
error_exit(&input, &output);
}
if (loadBmpImage(image, input) != 0)
{
fprintf(stderr, "Could not load bmp image '%s'!\n", input);
freeBmpImage(image);
error_exit(&input, &output);
}
printf("Apply filter '%s' on image with %u x %u pixels for %u iterations\n", filterNames[filterIndex], image->width, image->height, iterations);
// Time measurement init
// Inspired from https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
cudaEvent_t start_time, end_time;
cudaEventCreate(&start_time);
cudaEventCreate(&end_time);
// Here we do the actual computation!
// image->data is a 2-dimensional array of pixel which is accessed row first ([y][x])
// image->rawdata is a 1-dimensional array of pixel containing the same data as image->data
// each pixel is a struct of 3 unsigned char for the red, blue and green colour channel
// bmpImage *processImage = newBmpImage(image->width, image->height);
int image_size = image->width * image->height * sizeof(pixel);
int filter_size = filterDims[filterIndex] * filterDims[filterIndex] * sizeof(int);
// We could also made all filters __device__ available, but it is simple to copy over only the needed one
pixel *d_image_rawdata, *d_process_image_rawdata;
int *d_filter;
cudaMalloc((void **)&d_image_rawdata, image_size);
cudaMalloc((void **)&d_process_image_rawdata, image_size);
cudaMalloc((void **)&d_filter, filter_size);
cudaMemcpy(d_image_rawdata, image->rawdata, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter, filters[filterIndex], filter_size, cudaMemcpyHostToDevice);
// Task 6
// From https://developer.nvidia.com/blog/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
int blockSizeInt; // The launch configurator returned block size
int minGridSizeInt; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSizeInt; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSizeInt, &blockSizeInt, applyFilter_CUDA_Kernel, 0, 0);
// Round up according to array size
gridSizeInt = (image->width * image->height + blockSizeInt - 1) / blockSizeInt;
if (blockSizeInt % 32 != 0)
{
printf("NOTE: blockSizeInt was not a multiple of 32: %f\n", float(blockSizeInt) / 32.0);
}
dim3 blockSize(blockSizeInt / 32, blockSizeInt / 32);
printf("BlockSizeInt %d\n", blockSizeInt);
printf("minGridSizeInt %d\n", minGridSizeInt);
printf("gridSizeInt %d\n", gridSizeInt);
// End Task 6
// We allocate one thread per pixel
// gridSize and blockSize inspired from Section 2.2. in the CUDA Programming Guide
// dim3 blockSize(BLOCK_DIMENSION, BLOCK_DIMENSION); // Threads per block
printf("The grid has thread blocks of dimension (%d width * %d height)\n", blockSize.x, blockSize.y);
// We may need to add 1 extra block to width or height if the image's dimensions are not evenly divided by the block's dimension
int extraWidth = 0;
int extraHeight = 0;
if (image->width % blockSize.x != 0)
{
extraWidth = 1;
}
if (image->height % blockSize.y != 0)
{
extraHeight = 1;
}
dim3 gridSize(image->width / blockSize.x + extraWidth, image->height / blockSize.y + extraHeight); // Number of blocks
printf("Launching a grid of dimension (%d width * %d height)\n", image->width / blockSize.x + extraWidth, image->height / blockSize.y + extraHeight);
// Start time measurement
cudaEventRecord(start_time);
for (unsigned int i = 0; i < iterations; i++)
{
// Task 2-3
applyFilter_CUDA_Kernel<<<gridSize, blockSize>>>(
// Task 5
// TODO Experiment with different bytes in shared memory. Share the border pixels so that we never have to access global memory for the outside bounds.
// int sharedMemoryUsedPerBlock = filterDims[filterIndex] * filterDims[filterIndex] * sizeof(int) + BLOCK_DIMENSION * BLOCK_DIMENSION * sizeof(pixel);
// applyFilter_CUDA_Kernel<<<gridSize, blockSize, sharedMemoryUsedPerBlock>>>(
d_process_image_rawdata, // Out
d_image_rawdata, // In
image->width,
image->height,
// filters[filterIndex],
d_filter,
filterDims[filterIndex],
filterFactors[filterIndex]);
// swapImage(&processImage, &image);
swapImageRawdata(&d_process_image_rawdata, &d_image_rawdata);
}
// End time measurement
cudaEventRecord(end_time);
cudaMemcpy(image->rawdata, d_image_rawdata, image_size, cudaMemcpyDeviceToHost);
cudaFree(d_image_rawdata);
cudaFree(d_process_image_rawdata);
cudaFree(d_filter);
// Blocks CPU execution until end_time is recorded
cudaEventSynchronize(end_time);
float spentTime = 0.0;
cudaEventElapsedTime(&spentTime, start_time, end_time);
printf("Time spent: %.3f seconds\n", spentTime / 1000);
cudaEventDestroy(start_time);
cudaEventDestroy(end_time);
// Task 6
cudaDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, applyFilter_CUDA_Kernel, blockSizeInt, 0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSizeInt / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize);
printf("Launched blocks of size %d=>(%dx%d). Theoretical occupancy: %f\n", blockSizeInt, blockSize.x, blockSize.y, occupancy);
// End Task 6
// Check for error
cudaError_t error = cudaPeekAtLastError();
if (error)
{
fprintf(stderr, "A CUDA error has occurred while cracking: %s\n", cudaGetErrorString(error));
}
//Write the image back to disk
if (saveBmpImage(image, output) != 0)
{
fprintf(stderr, "Could not save output to '%s'!\n", output);
freeBmpImage(image);
error_exit(&input, &output);
};
graceful_exit(&input, &output);
};
|
bb8b739336952fc4f65d408688354ba994a77af7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
bb8b739336952fc4f65d408688354ba994a77af7.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
ff0bec869c900cb90541983e420df67e410391f0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <limits.h>
#include "timerc.h"
#define N 1024
__global__ void parallelMST(int *graph, int *new_graph, int *edges, int *new_edges, int *roots, int *serial, int *result, int *n) {
int tid = threadIdx.x;
int size = *n; // the size of the current (shrinked) graph
int original_size = size; // the size of the original graph
// Initialize the result to be a graph with all vertices but no edges
for (int i = 0; i < size; i++) {
result[tid * size + i] = INT_MAX;
}
while (size > 1) { // While there are more than one trees to be merged
// For each vertex, find the edge with minimum weight
if (tid < size) {
int dist = INT_MAX;
for (int i = 0; i < size; i++) {
if (graph[tid * size + i] < dist) { // if node tid is closer to node bid than previous nodes
dist = graph[tid * size + i]; // record the shortest distance from node bid
roots[tid] = i; // record tid to be the new nearest neighbor
}
}
// Mark the edge we found
int a = edges[2*(tid * size + roots[tid])]; // get the first endpoint of chosen edge in the original graph
int b = edges[2*(tid * size + roots[tid]) + 1]; // // get the second endpoint of chosen edge in the original graph
result[a * original_size + b] = dist; // mark (a,b)
result[b * original_size + a] = dist; // mark (b,a)
}
__syncthreads();
// Find the super-vertex for each tree
if (tid < size) {
// calculate each node's root in the shrinked tree
int root = roots[tid];
while (roots[roots[root]] != root) {
root = roots[root];
}
if (roots[root] < root) {
root = roots[root];
}
roots[tid] = root;
}
__syncthreads();
// Find the serial number of each grouped tree, i.e. 1, 2, 3, ....
serial[tid] = -1;
if (tid == 0) {
int count = 0;
for (int i = 0; i < size; i++) { // for each vertex
if (serial[roots[i]] == -1) { // if its root has not yet been assigned a serial ID
serial[roots[i]] = count; // then assign next serial number to it
count++;
}
}
*n = count; // update the size of the new graph to other threads
}
__syncthreads();
// For each vertex, change its root to be the serial number assigned
if (tid < size) {
roots[tid] = serial[roots[tid]];
}
__syncthreads();
int next_size = *n; // have each vertex agree on the new size
// Initialize the new weight matrix
if (tid < next_size) {
for (int i = 0; i < next_size; i++) {
new_graph[tid * next_size + i] = INT_MAX;
}
}
__syncthreads();
// Generate new weight matrix
if (tid < size) {
for (int i = 0; i < size; i++) { // for each node
if (tid != i && roots[tid] != roots[i]) { // if we do not have same root
if (graph[tid * size + i] < new_graph[roots[tid] * next_size + roots[i]]) {
// if our distance is less than the current distance between our roots,
// then update the new distance as our distance
new_graph[roots[tid] * next_size + roots[i]] = graph[tid * size + i];
new_graph[roots[i] * next_size + roots[tid]] = graph[tid * size + i];
// record the original endpoints of our edge
new_edges[2 * (roots[tid] * next_size + roots[i])] = edges[2 * (tid * size + i)];
new_edges[2 * (roots[tid] * next_size + roots[i]) + 1] = edges[2 * (tid * size + i) + 1];
new_edges[2 * (roots[i] * next_size + roots[tid])] = edges[2 * (tid * size + i)];
new_edges[2 * (roots[i] * next_size + roots[tid]) + 1] = edges[2 * (tid * size + i) + 1];
}
}
}
}
__syncthreads();
size = next_size; // update the new size
// update the graph and edge sets for next round
if (tid < size) {
for (int i = 0; i < size; i++) {
graph[tid * size + i] = new_graph[tid * size + i];
edges[2 * (tid * size + i)] = new_edges[2 * (tid * size + i)];
edges[2 * (tid * size + i) + 1] = new_edges[2 * (tid * size + i) + 1];
}
}
__syncthreads();
}
}
// returns the node with minimum edge
int minKey(int *key, int *mstSet, int size) {
int min = INT_MAX;
int minKey;
for (int i = 0; i < size; i++) {
if (mstSet[i] == 0 && key[i] < min) {
min = key[i];
minKey = i;
}
}
return minKey;
}
int *sequentialMST(int *graph, int size) {
int *mst = (int *) malloc(size * size * sizeof(int)); // To store final result MST
int *mstSet = (int *) malloc(size * sizeof(int)); // Set of vertices that have not yet been included in the MST
int *key = (int *) malloc(size * sizeof(int)); // Store the shorest edge for each vertex
int *parent = (int *) malloc(size * sizeof(int)); // To record parent for each vertex
// Intialization
for (int i = 0; i < size; i++) {
key[i] = INT_MAX;
mstSet[i] = 0;
for (int j = 0; j < size; j++) {
mst[i * size + j] = INT_MAX;
}
}
// First vertex is always picked first
key[0] = 0;
parent[0] = -1;
for (int i = 0; i < size; i++) {
int u = minKey(key, mstSet, size); // Find the vertex with minimum edge
mstSet[u] = 1; // Mark the vertex as found
// Include the vertex and weight into MST
if (u != 0) {
mst[u * size + parent[u]] = key[u];
mst[parent[u] * size + u] = key[u];
}
// Update minimum edge for each neighbor of the chosen vertex
for (int v = 0; v < size; v++) {
int weight = graph[u * size + v];
if (weight != INT_MAX && mstSet[v] == 0 && weight < key[v]) { // if vertex is not marked and needs a update
parent[v] = u;
key[v] = weight;
}
}
}
free(mstSet);
free(key);
free(parent);
return mst;
}
int main() {
int *graph = (int *) malloc(N * N * sizeof(int));
int *edges = (int *) malloc(2 * N * N * sizeof(int));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
graph[i * N + j] = INT_MAX;
}
}
// graph[1] = 7; graph[2] = 4; graph[5] = 3;
// graph[N] = 7; graph[N + 2] = 2;
// graph[2 * N] = 4; graph[2 * N + 1] = 2; graph[2 * N + 3] = 1; graph[2 * N + 4] = 5;
// graph[3 * N + 2] = 1; graph[3 * N + 4] = 6;
// graph[4 * N + 2] = 5; graph[4 * N + 3] = 6;
// graph[5 * N] = 3;
// edges[2*5] = 0; edges[2*5+1] = 5; edges[2*(5*N)] = 0; edges[2*(5*N)+1] = 5;
// edges[2*1] = 0; edges[2*1+1] = 1; edges[2*(1*N)] = 0; edges[2*(1*N)+1] = 1;
// edges[2*2] = 0; edges[2*2+1] = 2; edges[2*(2*N)] = 0; edges[2*(2*N)+1] = 2;
// edges[2*(1*N+2)] = 1; edges[2*(1*N+2)+1] = 2; edges[2*(2*N+1)] = 1; edges[2*(2*N+1)+1] = 2;
// edges[2*(2*N+3)] = 2; edges[2*(2*N+3)+1] = 3; edges[2*(3*N+2)] = 2; edges[2*(3*N+2)+1] = 3;
// edges[2*(2*N+4)] = 2; edges[2*(2*N+4)+1] = 4; edges[2*(4*N+2)] = 2; edges[2*(4*N+2)+1] = 4;
// edges[2*(3*N+4)] = 3; edges[2*(3*N+4)+1] = 4; edges[2*(4*N+3)] = 3; edges[2*(4*N+3)+1] = 4;
for (int i = 0; i < N; i++) {
for (int j = i+1; j < N; j++) {
int r = rand() % 100;
if (r % 2) {
graph[i * N + j] = r;
graph[j * N + i] = r;
edges[2*(i*N+j)] = i; edges[2*(i*N+j)+1] = j;
edges[2*(j*N+i)] = i; edges[2*(j*N+i)+1] = j;
}
}
}
// CPU Test
float ctime;
cstart();
int *mst = sequentialMST(graph, N);
cend(&ctime);
// for (int i = 0; i < N; i ++) {
// for (int j = 0; j < N; j++) {
// if (mst[i*N+j] == INT_MAX) {
// printf(" %3d ", 0);
// } else {
// printf(" %3d ",mst[i * N + j]);
// }
// }
// printf("\n");
// }
free(mst);
printf("\n");
printf("CPU time = %f\n", ctime);
int *new_graph = (int *) malloc(N * N * sizeof(int));
int *new_edges = (int *) malloc(2 * N * N * sizeof(int));
int *roots = (int *) malloc(N * sizeof(int));
int *serial = (int *) malloc(N * sizeof(int));
int *results = (int *) malloc(N * N * sizeof(int));
int n = N;
int *graph_dev, *new_graph_dev, *edges_dev, *new_edges_dev, *roots_dev, *serial_dev, *results_dev, *size;
hipMalloc((void **) &graph_dev, N * N * sizeof(int));
hipMalloc((void **) &new_graph_dev, N * N * sizeof(int));
hipMalloc((void **) &edges_dev, 2 * N * N * sizeof(int));
hipMalloc((void **) &new_edges_dev, 2* N * N * sizeof(int));
hipMalloc((void **) &roots_dev, N * sizeof(int));
hipMalloc((void **) &serial_dev, N * sizeof(int));
hipMalloc((void **) &results_dev, N * N * sizeof(int));
hipMalloc((void **) &size, sizeof(int));
float gtime_copy;
gstart();
hipMemcpy(graph_dev, graph, N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(new_graph_dev, new_graph, N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(edges_dev, edges, 2 * N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(new_edges_dev, new_edges, 2 * N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(roots_dev, roots, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(serial_dev, serial, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(results_dev, results, N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(size, &n, sizeof(int), hipMemcpyHostToDevice);
gend(>ime_copy);
printf("Time to copy input = %f\n", gtime_copy);
float gtime;
gstart();
hipLaunchKernelGGL(( parallelMST), dim3(1), dim3(N), 0, 0, graph_dev, new_graph_dev, edges_dev, new_edges_dev, roots_dev, serial_dev, results_dev, size);
gend(>ime);
printf("GPU time = %f\n", gtime);
float gtime_output;
gstart();
hipMemcpy(results, results_dev, N * N * sizeof(int), hipMemcpyDeviceToHost);
gend(>ime_output);
printf("Time to copy output = %f\n", gtime_output);
// for (int i = 0; i < N; i ++) {
// for (int j = 0; j < N; j++) {
// if (results[i*N+j] == INT_MAX) {
// printf(" %3d ", 0);
// } else {
// printf(" %3d ",results[i * N + j]);
// }
// }
// printf("\n");
// }
hipFree(graph_dev); hipFree(new_graph_dev); hipFree(roots_dev); hipFree(serial_dev);
hipFree(results_dev); hipFree(size);
free(graph); free(new_graph); free(roots); free(serial); free(results);
}
|
ff0bec869c900cb90541983e420df67e410391f0.cu
|
#include <stdio.h>
#include <limits.h>
#include "timerc.h"
#define N 1024
__global__ void parallelMST(int *graph, int *new_graph, int *edges, int *new_edges, int *roots, int *serial, int *result, int *n) {
int tid = threadIdx.x;
int size = *n; // the size of the current (shrinked) graph
int original_size = size; // the size of the original graph
// Initialize the result to be a graph with all vertices but no edges
for (int i = 0; i < size; i++) {
result[tid * size + i] = INT_MAX;
}
while (size > 1) { // While there are more than one trees to be merged
// For each vertex, find the edge with minimum weight
if (tid < size) {
int dist = INT_MAX;
for (int i = 0; i < size; i++) {
if (graph[tid * size + i] < dist) { // if node tid is closer to node bid than previous nodes
dist = graph[tid * size + i]; // record the shortest distance from node bid
roots[tid] = i; // record tid to be the new nearest neighbor
}
}
// Mark the edge we found
int a = edges[2*(tid * size + roots[tid])]; // get the first endpoint of chosen edge in the original graph
int b = edges[2*(tid * size + roots[tid]) + 1]; // // get the second endpoint of chosen edge in the original graph
result[a * original_size + b] = dist; // mark (a,b)
result[b * original_size + a] = dist; // mark (b,a)
}
__syncthreads();
// Find the super-vertex for each tree
if (tid < size) {
// calculate each node's root in the shrinked tree
int root = roots[tid];
while (roots[roots[root]] != root) {
root = roots[root];
}
if (roots[root] < root) {
root = roots[root];
}
roots[tid] = root;
}
__syncthreads();
// Find the serial number of each grouped tree, i.e. 1, 2, 3, ....
serial[tid] = -1;
if (tid == 0) {
int count = 0;
for (int i = 0; i < size; i++) { // for each vertex
if (serial[roots[i]] == -1) { // if its root has not yet been assigned a serial ID
serial[roots[i]] = count; // then assign next serial number to it
count++;
}
}
*n = count; // update the size of the new graph to other threads
}
__syncthreads();
// For each vertex, change its root to be the serial number assigned
if (tid < size) {
roots[tid] = serial[roots[tid]];
}
__syncthreads();
int next_size = *n; // have each vertex agree on the new size
// Initialize the new weight matrix
if (tid < next_size) {
for (int i = 0; i < next_size; i++) {
new_graph[tid * next_size + i] = INT_MAX;
}
}
__syncthreads();
// Generate new weight matrix
if (tid < size) {
for (int i = 0; i < size; i++) { // for each node
if (tid != i && roots[tid] != roots[i]) { // if we do not have same root
if (graph[tid * size + i] < new_graph[roots[tid] * next_size + roots[i]]) {
// if our distance is less than the current distance between our roots,
// then update the new distance as our distance
new_graph[roots[tid] * next_size + roots[i]] = graph[tid * size + i];
new_graph[roots[i] * next_size + roots[tid]] = graph[tid * size + i];
// record the original endpoints of our edge
new_edges[2 * (roots[tid] * next_size + roots[i])] = edges[2 * (tid * size + i)];
new_edges[2 * (roots[tid] * next_size + roots[i]) + 1] = edges[2 * (tid * size + i) + 1];
new_edges[2 * (roots[i] * next_size + roots[tid])] = edges[2 * (tid * size + i)];
new_edges[2 * (roots[i] * next_size + roots[tid]) + 1] = edges[2 * (tid * size + i) + 1];
}
}
}
}
__syncthreads();
size = next_size; // update the new size
// update the graph and edge sets for next round
if (tid < size) {
for (int i = 0; i < size; i++) {
graph[tid * size + i] = new_graph[tid * size + i];
edges[2 * (tid * size + i)] = new_edges[2 * (tid * size + i)];
edges[2 * (tid * size + i) + 1] = new_edges[2 * (tid * size + i) + 1];
}
}
__syncthreads();
}
}
// returns the node with minimum edge
int minKey(int *key, int *mstSet, int size) {
int min = INT_MAX;
int minKey;
for (int i = 0; i < size; i++) {
if (mstSet[i] == 0 && key[i] < min) {
min = key[i];
minKey = i;
}
}
return minKey;
}
int *sequentialMST(int *graph, int size) {
int *mst = (int *) malloc(size * size * sizeof(int)); // To store final result MST
int *mstSet = (int *) malloc(size * sizeof(int)); // Set of vertices that have not yet been included in the MST
int *key = (int *) malloc(size * sizeof(int)); // Store the shorest edge for each vertex
int *parent = (int *) malloc(size * sizeof(int)); // To record parent for each vertex
// Intialization
for (int i = 0; i < size; i++) {
key[i] = INT_MAX;
mstSet[i] = 0;
for (int j = 0; j < size; j++) {
mst[i * size + j] = INT_MAX;
}
}
// First vertex is always picked first
key[0] = 0;
parent[0] = -1;
for (int i = 0; i < size; i++) {
int u = minKey(key, mstSet, size); // Find the vertex with minimum edge
mstSet[u] = 1; // Mark the vertex as found
// Include the vertex and weight into MST
if (u != 0) {
mst[u * size + parent[u]] = key[u];
mst[parent[u] * size + u] = key[u];
}
// Update minimum edge for each neighbor of the chosen vertex
for (int v = 0; v < size; v++) {
int weight = graph[u * size + v];
if (weight != INT_MAX && mstSet[v] == 0 && weight < key[v]) { // if vertex is not marked and needs a update
parent[v] = u;
key[v] = weight;
}
}
}
free(mstSet);
free(key);
free(parent);
return mst;
}
int main() {
int *graph = (int *) malloc(N * N * sizeof(int));
int *edges = (int *) malloc(2 * N * N * sizeof(int));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
graph[i * N + j] = INT_MAX;
}
}
// graph[1] = 7; graph[2] = 4; graph[5] = 3;
// graph[N] = 7; graph[N + 2] = 2;
// graph[2 * N] = 4; graph[2 * N + 1] = 2; graph[2 * N + 3] = 1; graph[2 * N + 4] = 5;
// graph[3 * N + 2] = 1; graph[3 * N + 4] = 6;
// graph[4 * N + 2] = 5; graph[4 * N + 3] = 6;
// graph[5 * N] = 3;
// edges[2*5] = 0; edges[2*5+1] = 5; edges[2*(5*N)] = 0; edges[2*(5*N)+1] = 5;
// edges[2*1] = 0; edges[2*1+1] = 1; edges[2*(1*N)] = 0; edges[2*(1*N)+1] = 1;
// edges[2*2] = 0; edges[2*2+1] = 2; edges[2*(2*N)] = 0; edges[2*(2*N)+1] = 2;
// edges[2*(1*N+2)] = 1; edges[2*(1*N+2)+1] = 2; edges[2*(2*N+1)] = 1; edges[2*(2*N+1)+1] = 2;
// edges[2*(2*N+3)] = 2; edges[2*(2*N+3)+1] = 3; edges[2*(3*N+2)] = 2; edges[2*(3*N+2)+1] = 3;
// edges[2*(2*N+4)] = 2; edges[2*(2*N+4)+1] = 4; edges[2*(4*N+2)] = 2; edges[2*(4*N+2)+1] = 4;
// edges[2*(3*N+4)] = 3; edges[2*(3*N+4)+1] = 4; edges[2*(4*N+3)] = 3; edges[2*(4*N+3)+1] = 4;
for (int i = 0; i < N; i++) {
for (int j = i+1; j < N; j++) {
int r = rand() % 100;
if (r % 2) {
graph[i * N + j] = r;
graph[j * N + i] = r;
edges[2*(i*N+j)] = i; edges[2*(i*N+j)+1] = j;
edges[2*(j*N+i)] = i; edges[2*(j*N+i)+1] = j;
}
}
}
// CPU Test
float ctime;
cstart();
int *mst = sequentialMST(graph, N);
cend(&ctime);
// for (int i = 0; i < N; i ++) {
// for (int j = 0; j < N; j++) {
// if (mst[i*N+j] == INT_MAX) {
// printf(" %3d ", 0);
// } else {
// printf(" %3d ",mst[i * N + j]);
// }
// }
// printf("\n");
// }
free(mst);
printf("\n");
printf("CPU time = %f\n", ctime);
int *new_graph = (int *) malloc(N * N * sizeof(int));
int *new_edges = (int *) malloc(2 * N * N * sizeof(int));
int *roots = (int *) malloc(N * sizeof(int));
int *serial = (int *) malloc(N * sizeof(int));
int *results = (int *) malloc(N * N * sizeof(int));
int n = N;
int *graph_dev, *new_graph_dev, *edges_dev, *new_edges_dev, *roots_dev, *serial_dev, *results_dev, *size;
cudaMalloc((void **) &graph_dev, N * N * sizeof(int));
cudaMalloc((void **) &new_graph_dev, N * N * sizeof(int));
cudaMalloc((void **) &edges_dev, 2 * N * N * sizeof(int));
cudaMalloc((void **) &new_edges_dev, 2* N * N * sizeof(int));
cudaMalloc((void **) &roots_dev, N * sizeof(int));
cudaMalloc((void **) &serial_dev, N * sizeof(int));
cudaMalloc((void **) &results_dev, N * N * sizeof(int));
cudaMalloc((void **) &size, sizeof(int));
float gtime_copy;
gstart();
cudaMemcpy(graph_dev, graph, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(new_graph_dev, new_graph, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(edges_dev, edges, 2 * N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(new_edges_dev, new_edges, 2 * N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(roots_dev, roots, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(serial_dev, serial, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(results_dev, results, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(size, &n, sizeof(int), cudaMemcpyHostToDevice);
gend(>ime_copy);
printf("Time to copy input = %f\n", gtime_copy);
float gtime;
gstart();
parallelMST<<<1, N>>>(graph_dev, new_graph_dev, edges_dev, new_edges_dev, roots_dev, serial_dev, results_dev, size);
gend(>ime);
printf("GPU time = %f\n", gtime);
float gtime_output;
gstart();
cudaMemcpy(results, results_dev, N * N * sizeof(int), cudaMemcpyDeviceToHost);
gend(>ime_output);
printf("Time to copy output = %f\n", gtime_output);
// for (int i = 0; i < N; i ++) {
// for (int j = 0; j < N; j++) {
// if (results[i*N+j] == INT_MAX) {
// printf(" %3d ", 0);
// } else {
// printf(" %3d ",results[i * N + j]);
// }
// }
// printf("\n");
// }
cudaFree(graph_dev); cudaFree(new_graph_dev); cudaFree(roots_dev); cudaFree(serial_dev);
cudaFree(results_dev); cudaFree(size);
free(graph); free(new_graph); free(roots); free(serial); free(results);
}
|
d8f92ed91acd970564532a4253b737feb38ee483.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "IntDataPointIdentity.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const int *inputX = NULL;
hipMalloc(&inputX, XSIZE*YSIZE);
const int *inputY = NULL;
hipMalloc(&inputY, XSIZE*YSIZE);
int *outputX = NULL;
hipMalloc(&outputX, XSIZE*YSIZE);
int *outputY = NULL;
hipMalloc(&outputY, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
IntDataPointIdentity), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inputX,inputY,outputX,outputY,length);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
IntDataPointIdentity), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inputX,inputY,outputX,outputY,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
IntDataPointIdentity), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inputX,inputY,outputX,outputY,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d8f92ed91acd970564532a4253b737feb38ee483.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "IntDataPointIdentity.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const int *inputX = NULL;
cudaMalloc(&inputX, XSIZE*YSIZE);
const int *inputY = NULL;
cudaMalloc(&inputY, XSIZE*YSIZE);
int *outputX = NULL;
cudaMalloc(&outputX, XSIZE*YSIZE);
int *outputY = NULL;
cudaMalloc(&outputY, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
IntDataPointIdentity<<<gridBlock,threadBlock>>>(size,inputX,inputY,outputX,outputY,length);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
IntDataPointIdentity<<<gridBlock,threadBlock>>>(size,inputX,inputY,outputX,outputY,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
IntDataPointIdentity<<<gridBlock,threadBlock>>>(size,inputX,inputY,outputX,outputY,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3315a4410aa8f39907fddf8aef42903e0b8a5aea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <string>
#include <iostream>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "hiprand/hiprand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define MAX_SPHERES 9
#define BLOCK_SIZE 64
#define NUM_CURAND 196608
static __constant__ Sphere SPHERES[MAX_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id, int num_spheres) {
double d;
double inf = t = 1e20;
for(int i = num_spheres; i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
__device__ Vec linear_radiance(const Ray &r_, int depth_, int num_spheres, hiprandState_t *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id, num_spheres)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (hiprand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*hiprand_uniform(Xi), r2=hiprand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
if (hiprand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps, int offset,
int num_spheres,
int w, int h,
Ray cam, Vec cx, Vec cy,
hiprandState_t __restrict__ *states) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
int t = blockIdx.x * blockDim.x + threadIdx.x + offset;
hiprandState_t state = states[t - offset];
int y = (((h-1) * w) - t)/w;
int x = (((h-1) * w) - t)%w;
if (t < w*h) {
int i = (h-y-1) * w + x;
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*hiprand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*hiprand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
d = d.norm();
Vec rad = linear_radiance(new_ray(cam.o+d*140,d),0, num_spheres, &state);
r = r + rad * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
__global__ void init_curand(hiprandState_t __restrict__ *states) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(t, t, 0, &states[t]);
}
int main(int argc, char *argv[]) {
timer_start("Starting program."); //@@ start a timer
int w=1024, h=768; // # samples
int samps = argc>=2 ? atoi(argv[1])/4 : 250;
const int num_streams = 4;
dim3 grid(ceil((w*h)/(BLOCK_SIZE * num_streams)), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
hiprandState_t *device_states;
hipMalloc((void **) &device_states, sizeof(hiprandState_t) * NUM_CURAND);
// DEVICE - Start initializing the curandstate objects in the background
hipLaunchKernelGGL(( init_curand), dim3(grid), dim3(block), 0, 0, device_states);
// HOST - Start initialzing everything else
Sphere *spheres;// = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
hipHostMalloc((void**)&spheres, MAX_SPHERES * sizeof(Sphere), hipHostMallocDefault);
std::ifstream f_in(argv[2]);
int num_spheres = 0;
for (int i = 0; i < MAX_SPHERES; i++) {
std::string rad;
std::string px, py, pz;
std::string ex, ey, ez;
std::string cx, cy, cz;
std::string refl;
if (std::getline(f_in, rad, ',') &&
std::getline(f_in, px, ',') && std::getline(f_in, py, ',') && std::getline(f_in, pz, ',') &&
std::getline(f_in, ex, ',') && std::getline(f_in, ey, ',') && std::getline(f_in, ez, ',') &&
std::getline(f_in, cx, ',') && std::getline(f_in, cy, ',') && std::getline(f_in, cz, ',') &&
std::getline(f_in, refl)) {
if (refl.compare("DIFF") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
DIFF);
} else if (refl.compare("SPEC") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
SPEC);
} else if (refl.compare("REFR") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
REFR);
}
num_spheres++;
} else {
spheres[i] = new_sphere(0, new_vec(), new_vec(), new_vec(), DIFF);
}
}
f_in.close();
hipStream_t stream[num_streams];
for (int i = 0; i < num_streams; ++i)
hipStreamCreate(&stream[i]);
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
hipMalloc((void **) &device_out, sizeof(Vec) * w * h);
int num_elems_per_segment = w * h / num_streams;
int segment_size = sizeof(Vec) * w * h / num_streams;
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
// DEVICE - Synchronize with CUDA to finish curandStates initialization
hipDeviceSynchronize();
for (int i = 0, offset = 0; i < num_streams; i++, offset += num_elems_per_segment) {
hipMemcpyToSymbolAsync(SPHERES, spheres, MAX_SPHERES * sizeof(Sphere), 0, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( calc_pixel), dim3(grid), dim3(block), 0, stream[i], device_out, samps, offset, num_spheres, w, h, cam, cx, cy, device_states);
}
for (int i = 0, offset = 0; i < num_streams; i++, offset += num_elems_per_segment) {
hipMemcpyAsync(&host_out[offset], &device_out[offset], segment_size, hipMemcpyDeviceToHost, stream[i]);
for (int j=0; j < num_elems_per_segment; j++)
fprintf(f,"%d %d %d ", toInt(host_out[j + offset].x), toInt(host_out[j + offset].y), toInt(host_out[j + offset].z));
}
fclose(f);
for (int i = 0; i < num_streams; ++i)
hipStreamDestroy(stream[i]);
hipFree(device_out);
hipFree(device_states);
free(host_out);
hipHostFree(spheres);
timer_stop();
return 0;
}
|
3315a4410aa8f39907fddf8aef42903e0b8a5aea.cu
|
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <string>
#include <iostream>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "curand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define MAX_SPHERES 9
#define BLOCK_SIZE 64
#define NUM_CURAND 196608
static __constant__ Sphere SPHERES[MAX_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id, int num_spheres) {
double d;
double inf = t = 1e20;
for(int i = num_spheres; i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
__device__ Vec linear_radiance(const Ray &r_, int depth_, int num_spheres, curandState *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id, num_spheres)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (curand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*curand_uniform(Xi), r2=curand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
if (curand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps, int offset,
int num_spheres,
int w, int h,
Ray cam, Vec cx, Vec cy,
curandState __restrict__ *states) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
int t = blockIdx.x * blockDim.x + threadIdx.x + offset;
curandState state = states[t - offset];
int y = (((h-1) * w) - t)/w;
int x = (((h-1) * w) - t)%w;
if (t < w*h) {
int i = (h-y-1) * w + x;
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*curand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*curand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
d = d.norm();
Vec rad = linear_radiance(new_ray(cam.o+d*140,d),0, num_spheres, &state);
r = r + rad * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
__global__ void init_curand(curandState __restrict__ *states) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(t, t, 0, &states[t]);
}
int main(int argc, char *argv[]) {
timer_start("Starting program."); //@@ start a timer
int w=1024, h=768; // # samples
int samps = argc>=2 ? atoi(argv[1])/4 : 250;
const int num_streams = 4;
dim3 grid(ceil((w*h)/(BLOCK_SIZE * num_streams)), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
curandState *device_states;
cudaMalloc((void **) &device_states, sizeof(curandState) * NUM_CURAND);
// DEVICE - Start initializing the curandstate objects in the background
init_curand<<<grid, block>>>(device_states);
// HOST - Start initialzing everything else
Sphere *spheres;// = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
cudaHostAlloc((void**)&spheres, MAX_SPHERES * sizeof(Sphere), cudaHostAllocDefault);
std::ifstream f_in(argv[2]);
int num_spheres = 0;
for (int i = 0; i < MAX_SPHERES; i++) {
std::string rad;
std::string px, py, pz;
std::string ex, ey, ez;
std::string cx, cy, cz;
std::string refl;
if (std::getline(f_in, rad, ',') &&
std::getline(f_in, px, ',') && std::getline(f_in, py, ',') && std::getline(f_in, pz, ',') &&
std::getline(f_in, ex, ',') && std::getline(f_in, ey, ',') && std::getline(f_in, ez, ',') &&
std::getline(f_in, cx, ',') && std::getline(f_in, cy, ',') && std::getline(f_in, cz, ',') &&
std::getline(f_in, refl)) {
if (refl.compare("DIFF") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
DIFF);
} else if (refl.compare("SPEC") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
SPEC);
} else if (refl.compare("REFR") == 0) {
spheres[i] = new_sphere(std::stod(rad),
new_vec(std::stod(px), std::stod(py), std::stod(pz)),
new_vec(std::stod(ex), std::stod(ey), std::stod(ez)),
new_vec(std::stod(cx), std::stod(cy), std::stod(cz)),
REFR);
}
num_spheres++;
} else {
spheres[i] = new_sphere(0, new_vec(), new_vec(), new_vec(), DIFF);
}
}
f_in.close();
cudaStream_t stream[num_streams];
for (int i = 0; i < num_streams; ++i)
cudaStreamCreate(&stream[i]);
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
cudaMalloc((void **) &device_out, sizeof(Vec) * w * h);
int num_elems_per_segment = w * h / num_streams;
int segment_size = sizeof(Vec) * w * h / num_streams;
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
// DEVICE - Synchronize with CUDA to finish curandStates initialization
cudaDeviceSynchronize();
for (int i = 0, offset = 0; i < num_streams; i++, offset += num_elems_per_segment) {
cudaMemcpyToSymbolAsync(SPHERES, spheres, MAX_SPHERES * sizeof(Sphere), 0, cudaMemcpyHostToDevice, stream[i]);
calc_pixel<<<grid, block, 0, stream[i]>>>(device_out, samps, offset, num_spheres, w, h, cam, cx, cy, device_states);
}
for (int i = 0, offset = 0; i < num_streams; i++, offset += num_elems_per_segment) {
cudaMemcpyAsync(&host_out[offset], &device_out[offset], segment_size, cudaMemcpyDeviceToHost, stream[i]);
for (int j=0; j < num_elems_per_segment; j++)
fprintf(f,"%d %d %d ", toInt(host_out[j + offset].x), toInt(host_out[j + offset].y), toInt(host_out[j + offset].z));
}
fclose(f);
for (int i = 0; i < num_streams; ++i)
cudaStreamDestroy(stream[i]);
cudaFree(device_out);
cudaFree(device_states);
free(host_out);
cudaFreeHost(spheres);
timer_stop();
return 0;
}
|
8f51f2f9565bd2b86b5309d2f23d71a61a1e03ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
22/12/2019
hmhuan-1612858
nnkhai-1612909
*/
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
// In each counting sort, we sort data in "src" and write result to "dst"
// Then, we swap these 2 pointers and go to the next counting sort
// At first, we assign "src = in" and "dest = out"
// However, the data pointed by "in" is read-only
// --> we create a copy of this data and assign "src" to the address of this copy
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit // histogram cua mang in xet tren digit hien tai
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0] = 0;
for (int i = 1; i < nBins; i++)
histScan[i] = histScan[i - 1] + hist[i - 1];
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++; // (neu cung bin thi ghi ben canh)
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// histogram kernel
__global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_bin[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int delta = (nBins - 1) / blockDim.x + 1;
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
s_bin[id] = 0;
}
__syncthreads();
if (i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_bin[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
atomicAdd(&hist[id], s_bin[id]);
}
}
// scan kernel
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
// TODO
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
s_data[threadIdx.x] = in[i - 1];
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (threadIdx.x == 0 && blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(int * in, int n, int* blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
in[i] += blkSums[blockIdx.x - 1];
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Why "int * blockSizes"?
// Because we may want different block sizes for diffrent kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
dim3 gridSize2((nBins - 1)/ blkSize2.x + 1); // grid size for scan kernel
size_t smemSize = nBins * sizeof(int); // shared memory size for histogram kernel
int * d_hist, *d_histScan, * d_blkSums;
uint32_t *d_src;
int * blkSums;
blkSums = (int*)malloc(gridSize2.x * sizeof(int));
size_t sMemSize = blkSize2.x * sizeof(int); // shared memory size for scan kernel
CHECK(hipMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_hist, nBins * sizeof(int)));
CHECK(hipMalloc(&d_histScan, nBins * sizeof(int)));
CHECK(hipMalloc(&d_blkSums, gridSize2.x * sizeof(int)));
CHECK(hipMemcpy(d_src, src, n * sizeof(uint32_t), hipMemcpyHostToDevice));
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: compute hist by Device
CHECK(hipMemset(d_hist, 0, nBins * sizeof(int)));
hipLaunchKernelGGL(( computeHistKernel), dim3(gridSize1), dim3(blkSize1), smemSize, 0, d_src, n, d_hist, nBins, bit);
hipDeviceSynchronize();
CHECK(hipGetLastError());
CHECK(hipMemcpy(hist, d_hist, nBins * sizeof(int), hipMemcpyDeviceToHost));
// TODO: exclusice scan
hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize2), dim3(blkSize2), sMemSize, 0, d_hist, nBins, d_histScan, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
//CHECK(hipMemcpy(histScan, d_histScan, nBins * sizeof(int), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 1; i < gridSize2.x; i++)
blkSums[i] += blkSums[i-1];
//for (int i = blkSize2.x; i < nBins; i++)
// histScan[i] += blkSums[(i - 1) / blkSize2.x];
CHECK(hipMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( addBlkSums), dim3(gridSize2), dim3(blkSize2), 0, 0, d_histScan, nBins, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
CHECK(hipMemcpy(histScan, d_histScan, nBins * sizeof(int), hipMemcpyDeviceToHost));
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
CHECK(hipFree(d_src));
CHECK(hipFree(d_hist));
CHECK(hipFree(d_blkSums));
CHECK(hipFree(d_histScan));
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(blkSums);
free(hist);
free(histScan);
free(originalSrc);
}
__global__ void scanBlkKernel_1(uint32_t *in, int n, int bit, int *out, int * blkSums)
{
// TODO: compute bits
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
{
s_data[threadIdx.x] = (in[i - 1] >> bit) & 1;
}
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (threadIdx.x == 0 && blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
__global__ void scatter(uint32_t * in, int bit, int *inScan, int n, uint32_t *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
int nZeros = n - inScan[n - 1] - ((in[n - 1] >> bit) & 1);
int inBit = (in[i] >> bit) & 1;
int rank = 0;
if (inBit == 0)
rank = i - inScan[i];
else
rank = nZeros + inScan[i];
out[rank] = in[i];
}
}
void printArray(uint32_t * a, int n);
void sortByDevice_base03(const uint32_t * in, int n,
uint32_t * out, int * blockSizes)
{
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * dst = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
dim3 blkSize(blockSizes[0]); // block size for histogram kernel
dim3 gridSize((n - 1) / blkSize.x + 1); // grid size for histogram kernel
int *d_bitsScan, * d_bits, * d_blkSums;
uint32_t *d_src, *d_dst;
size_t sMemSize = blkSize.x * sizeof(int); // shared memory size for scan kernel
int * blkSums = (int *)malloc(gridSize.x * sizeof(int));
int * bitsScan = (int *)malloc(n * sizeof(int));
int * bits = (int *)malloc(n * sizeof(int));
CHECK(hipMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_dst, n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_bitsScan, n * sizeof(int)));
CHECK(hipMalloc(&d_bits, n * sizeof(int)));
CHECK(hipMalloc(&d_blkSums, gridSize.x * sizeof(int)));
CHECK(hipMemcpy(d_src, src, n * sizeof(uint32_t), hipMemcpyHostToDevice));
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit++)
{
// TODO: compute bits [0 1 1 . ..] and exclusice scan
hipLaunchKernelGGL(( scanBlkKernel_1), dim3(gridSize), dim3(blkSize), sMemSize, 0, d_src, n, bit, d_bitsScan, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
CHECK(hipMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 1; i < gridSize.x; i++)
blkSums[i] += blkSums[i-1];
CHECK(hipMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( addBlkSums), dim3(gridSize), dim3(blkSize), 0, 0, d_bitsScan, n, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
// TODO: scatter
hipLaunchKernelGGL(( scatter), dim3(gridSize), dim3(blkSize), 0, 0, d_src, bit, d_bitsScan, n, d_dst);
hipDeviceSynchronize();
CHECK(hipGetLastError());
// TODO: Swap "src" and "dst"
uint32_t * d_temp = d_src;
d_src = d_dst;
d_dst = d_temp;
}
CHECK(hipMemcpy(out, d_src, n * sizeof(uint32_t), hipMemcpyDeviceToHost));
//free Cuda
CHECK(hipFree(d_src));
CHECK(hipFree(d_dst));
CHECK(hipFree(d_bits));
CHECK(hipFree(d_bitsScan));
CHECK(hipFree(d_blkSums));
// Free memories
free(originalSrc);
free(dst);
free(blkSums);
free(bitsScan);
free(bits);
}
void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix sort
float sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
int useDevice=0, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == 0)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else if (useDevice == 1)// use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
else if (useDevice == 2)
{
sortByDevice_base03(in, n, out, blockSizes);
}
else
{
printf("\nSort by thrust\n");
sortByDevice_thrust(in, n, out);
}
timer.Stop();
float time = timer.Elapsed();
if (useDevice != 2)
printf("Time: %.3f ms\n", time);
return time;
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("%d, %d != %d\n", i, out[i], correctOut[i]);
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * out_base03 = (uint32_t *)malloc(bytes); // Device result base03
uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// SET UP NBITS
int nBits = 8;
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits, 0);
// SORT BY DEVICE
sort(in, n, out, nBits, 1, blockSizes);
checkCorrectness(out, correctOut, n);
// SORT base 03
printf("\nRadix sort by device by base03\n");
float avg_time = 0;
int loop = 16;
for (int i = 0; i < loop; i++)
{
float time = sort(in, n, out_base03, 1, 2, blockSizes);
avg_time += time;
}
printf("Avg Time: %.3f ms\n", avg_time / loop);
checkCorrectness(out_base03, correctOut, n);
// SORT BY DEVICE by thrust
sort(in, n, out_thrust, nBits, 3, blockSizes);
checkCorrectness(out_thrust, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(out_base03);
free(out_thrust);
free(correctOut);
return EXIT_SUCCESS;
}
|
8f51f2f9565bd2b86b5309d2f23d71a61a1e03ae.cu
|
/*
22/12/2019
hmhuan-1612858
nnkhai-1612909
*/
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
// In each counting sort, we sort data in "src" and write result to "dst"
// Then, we swap these 2 pointers and go to the next counting sort
// At first, we assign "src = in" and "dest = out"
// However, the data pointed by "in" is read-only
// --> we create a copy of this data and assign "src" to the address of this copy
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit // histogram cua mang in xet tren digit hien tai
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0] = 0;
for (int i = 1; i < nBins; i++)
histScan[i] = histScan[i - 1] + hist[i - 1];
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++; // (neu cung bin thi ghi ben canh)
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// histogram kernel
__global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_bin[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int delta = (nBins - 1) / blockDim.x + 1;
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
s_bin[id] = 0;
}
__syncthreads();
if (i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_bin[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
atomicAdd(&hist[id], s_bin[id]);
}
}
// scan kernel
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
// TODO
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
s_data[threadIdx.x] = in[i - 1];
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (threadIdx.x == 0 && blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(int * in, int n, int* blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
in[i] += blkSums[blockIdx.x - 1];
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Why "int * blockSizes"?
// Because we may want different block sizes for diffrent kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
dim3 gridSize2((nBins - 1)/ blkSize2.x + 1); // grid size for scan kernel
size_t smemSize = nBins * sizeof(int); // shared memory size for histogram kernel
int * d_hist, *d_histScan, * d_blkSums;
uint32_t *d_src;
int * blkSums;
blkSums = (int*)malloc(gridSize2.x * sizeof(int));
size_t sMemSize = blkSize2.x * sizeof(int); // shared memory size for scan kernel
CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_hist, nBins * sizeof(int)));
CHECK(cudaMalloc(&d_histScan, nBins * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int)));
CHECK(cudaMemcpy(d_src, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: compute hist by Device
CHECK(cudaMemset(d_hist, 0, nBins * sizeof(int)));
computeHistKernel<<<gridSize1, blkSize1, smemSize>>>(d_src, n, d_hist, nBins, bit);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(hist, d_hist, nBins * sizeof(int), cudaMemcpyDeviceToHost));
// TODO: exclusice scan
scanBlkKernel<<<gridSize2, blkSize2, sMemSize>>>(d_hist, nBins, d_histScan, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
//CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 1; i < gridSize2.x; i++)
blkSums[i] += blkSums[i-1];
//for (int i = blkSize2.x; i < nBins; i++)
// histScan[i] += blkSums[(i - 1) / blkSize2.x];
CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), cudaMemcpyHostToDevice));
addBlkSums<<<gridSize2, blkSize2>>>(d_histScan, nBins, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost));
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
CHECK(cudaFree(d_src));
CHECK(cudaFree(d_hist));
CHECK(cudaFree(d_blkSums));
CHECK(cudaFree(d_histScan));
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(blkSums);
free(hist);
free(histScan);
free(originalSrc);
}
__global__ void scanBlkKernel_1(uint32_t *in, int n, int bit, int *out, int * blkSums)
{
// TODO: compute bits
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
{
s_data[threadIdx.x] = (in[i - 1] >> bit) & 1;
}
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (threadIdx.x == 0 && blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
__global__ void scatter(uint32_t * in, int bit, int *inScan, int n, uint32_t *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
int nZeros = n - inScan[n - 1] - ((in[n - 1] >> bit) & 1);
int inBit = (in[i] >> bit) & 1;
int rank = 0;
if (inBit == 0)
rank = i - inScan[i];
else
rank = nZeros + inScan[i];
out[rank] = in[i];
}
}
void printArray(uint32_t * a, int n);
void sortByDevice_base03(const uint32_t * in, int n,
uint32_t * out, int * blockSizes)
{
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * dst = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
dim3 blkSize(blockSizes[0]); // block size for histogram kernel
dim3 gridSize((n - 1) / blkSize.x + 1); // grid size for histogram kernel
int *d_bitsScan, * d_bits, * d_blkSums;
uint32_t *d_src, *d_dst;
size_t sMemSize = blkSize.x * sizeof(int); // shared memory size for scan kernel
int * blkSums = (int *)malloc(gridSize.x * sizeof(int));
int * bitsScan = (int *)malloc(n * sizeof(int));
int * bits = (int *)malloc(n * sizeof(int));
CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_dst, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_bitsScan, n * sizeof(int)));
CHECK(cudaMalloc(&d_bits, n * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums, gridSize.x * sizeof(int)));
CHECK(cudaMemcpy(d_src, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit++)
{
// TODO: compute bits [0 1 1 . ..] and exclusice scan
scanBlkKernel_1<<<gridSize, blkSize, sMemSize>>>(d_src, n, bit, d_bitsScan, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 1; i < gridSize.x; i++)
blkSums[i] += blkSums[i-1];
CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int), cudaMemcpyHostToDevice));
addBlkSums<<<gridSize, blkSize>>>(d_bitsScan, n, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// TODO: scatter
scatter<<<gridSize, blkSize>>>(d_src, bit, d_bitsScan, n, d_dst);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// TODO: Swap "src" and "dst"
uint32_t * d_temp = d_src;
d_src = d_dst;
d_dst = d_temp;
}
CHECK(cudaMemcpy(out, d_src, n * sizeof(uint32_t), cudaMemcpyDeviceToHost));
//free Cuda
CHECK(cudaFree(d_src));
CHECK(cudaFree(d_dst));
CHECK(cudaFree(d_bits));
CHECK(cudaFree(d_bitsScan));
CHECK(cudaFree(d_blkSums));
// Free memories
free(originalSrc);
free(dst);
free(blkSums);
free(bitsScan);
free(bits);
}
void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix sort
float sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
int useDevice=0, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == 0)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else if (useDevice == 1)// use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
else if (useDevice == 2)
{
sortByDevice_base03(in, n, out, blockSizes);
}
else
{
printf("\nSort by thrust\n");
sortByDevice_thrust(in, n, out);
}
timer.Stop();
float time = timer.Elapsed();
if (useDevice != 2)
printf("Time: %.3f ms\n", time);
return time;
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("%d, %d != %d\n", i, out[i], correctOut[i]);
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * out_base03 = (uint32_t *)malloc(bytes); // Device result base03
uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// SET UP NBITS
int nBits = 8;
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits, 0);
// SORT BY DEVICE
sort(in, n, out, nBits, 1, blockSizes);
checkCorrectness(out, correctOut, n);
// SORT base 03
printf("\nRadix sort by device by base03\n");
float avg_time = 0;
int loop = 16;
for (int i = 0; i < loop; i++)
{
float time = sort(in, n, out_base03, 1, 2, blockSizes);
avg_time += time;
}
printf("Avg Time: %.3f ms\n", avg_time / loop);
checkCorrectness(out_base03, correctOut, n);
// SORT BY DEVICE by thrust
sort(in, n, out_thrust, nBits, 3, blockSizes);
checkCorrectness(out_thrust, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(out_base03);
free(out_thrust);
free(correctOut);
return EXIT_SUCCESS;
}
|
d4b17b1e9963dd7b01d870d3cac13ae7eca3652f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void HierarchicalSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, int label_num_, const Dtype* split_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
// const int s = index % spatial_dim;
const int channels = dim / spatial_dim;
counts[index] = 0;
for (int i = 0; i < channels; i++) {
loss[index] = 0;
const int label_value = static_cast<int>(label[n * channels + i]);
if (label_value == 1) {
loss[index] -= log(max(prob_data[n * channels + i],
Dtype(FLT_MIN)));
counts[index] += 1;
}
}
}
}
template <typename Dtype>
void HierarchicalSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype* split_data = split_.gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( HierarchicalSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, label_num_, split_data);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void HierarchicalSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts, int label_num_, const Dtype* split_data) {
// const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
// const int n = index / spatial_dim;
// const int s = index % spatial_dim;
counts[index] = 0;
for (int i = 0; i < label_num_; i++) {
int start = split_data[2 * i];
int end = split_data[2 * i + 1];
bool flag = false;
int idx = 0;
for (int k = start; k <= end; k++) {
if (label[index * dim + k] == 1) {
flag = true;
idx = k;
break;
}
}
if (flag) {
bottom_diff[index * dim + idx] -= 1;
counts[index] += end - start + 1;
} else {
for (int k = start; k <= end; k++) {
bottom_diff[index * dim + k] = 0;
}
}
}
}
}
template <typename Dtype>
void HierarchicalSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_;
const Dtype* split_data = split_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( HierarchicalSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, label_num_, split_data);
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
// LOG(INFO) << prob_data[0];
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HierarchicalSoftmaxWithLossLayer);
} // namespace caffe
|
d4b17b1e9963dd7b01d870d3cac13ae7eca3652f.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void HierarchicalSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, int label_num_, const Dtype* split_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
// const int s = index % spatial_dim;
const int channels = dim / spatial_dim;
counts[index] = 0;
for (int i = 0; i < channels; i++) {
loss[index] = 0;
const int label_value = static_cast<int>(label[n * channels + i]);
if (label_value == 1) {
loss[index] -= log(max(prob_data[n * channels + i],
Dtype(FLT_MIN)));
counts[index] += 1;
}
}
}
}
template <typename Dtype>
void HierarchicalSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype* split_data = split_.gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
HierarchicalSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, label_num_, split_data);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void HierarchicalSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts, int label_num_, const Dtype* split_data) {
// const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
// const int n = index / spatial_dim;
// const int s = index % spatial_dim;
counts[index] = 0;
for (int i = 0; i < label_num_; i++) {
int start = split_data[2 * i];
int end = split_data[2 * i + 1];
bool flag = false;
int idx = 0;
for (int k = start; k <= end; k++) {
if (label[index * dim + k] == 1) {
flag = true;
idx = k;
break;
}
}
if (flag) {
bottom_diff[index * dim + idx] -= 1;
counts[index] += end - start + 1;
} else {
for (int k = start; k <= end; k++) {
bottom_diff[index * dim + k] = 0;
}
}
}
}
}
template <typename Dtype>
void HierarchicalSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_;
const Dtype* split_data = split_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
HierarchicalSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, label_num_, split_data);
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
// LOG(INFO) << prob_data[0];
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HierarchicalSoftmaxWithLossLayer);
} // namespace caffe
|
650603f5496faf606fdfc1eb0c5364274a73f5b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <THH/THHTensorCopy.h>
#include <THH/THHApply.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathCompareT.cuh>
#include <THH/THHTensor.hpp>
template <typename T>
struct TensorAddConstantOp {
TensorAddConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(T* v) {
*v += val;
}
const T val;
};
template <typename T>
struct TensorSubConstantOp {
TensorSubConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in - val;
}
__device__ __forceinline__ void operator()(T* v) {
*v -= val;
}
const T val;
};
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
template <typename T>
struct TensorDivConstantOp {
TensorDivConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in / val;
}
__device__ __forceinline__ void operator()(T* v) {
*v /= val;
}
const T val;
};
template <>
struct TensorDivConstantOp<float> {
TensorDivConstantOp(float v) : val(1.f / v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
template <>
struct TensorDivConstantOp<double> {
TensorDivConstantOp(double v) : val(1. / v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(double* v) {
*v *= val;
}
const double val;
};
template<typename T>
static __device__ __forceinline__
typename std::enable_if<std::is_signed<T>::value, bool>::type
modulo_wrap(T a, T b) {
return (a != 0) && (a < 0) != (b < 0);
}
template<typename T>
static __device__ __forceinline__
typename std::enable_if<std::is_unsigned<T>::value, bool>::type
modulo_wrap(T a, T b) {
return false;
}
template <typename T>
struct TensorRemainderOp {
TensorRemainderOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in % val;
if (modulo_wrap<T>(*out, val)) {
*out += val;
}
}
__device__ __forceinline__ void operator()(T* v) {
*v = *v % val;
if (modulo_wrap<T>(*v, val)) {
*v += val;
}
}
const T val;
};
template <>
struct TensorRemainderOp<float> {
TensorRemainderOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in - val * floorf(*in / val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = *v - val * floorf(*v / val);
}
const float val;
};
template <>
struct TensorRemainderOp<double> {
TensorRemainderOp(double v) : val(v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in - val * floor(*in / val);
}
__device__ __forceinline__ void operator()(double* v) {
*v = *v - val * floor(*v / val);
}
const double val;
};
template <>
struct TensorRemainderOp<at::Half> {
TensorRemainderOp(at::Half v): val(v) {}
__device__ __forceinline__ void operator()(at::Half* out, at::Half* in) {
*out = *in - val * floorf(*in / val);
}
__device__ __forceinline__ void operator()(at::Half* v) {
*v = *v - val * floorf(*v / val);
}
const at::Half val;
};
template <typename T>
struct TensorFmodOp {
TensorFmodOp(T v) : val((float)v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = (T) fmodf((float) *in, val);
}
__device__ __forceinline__ void operator()(T* v) {
*v = (T) fmodf((float) *v, val);
}
const float val;
};
template <>
struct TensorFmodOp<double> {
TensorFmodOp(double v) : val(v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = fmod(*in, val);
}
__device__ __forceinline__ void operator()(double* v) {
*v = fmod(*v, val);
}
const double val;
};
template <typename T, int Upper>
struct TensorTriOp {
TensorTriOp(T *start_, int64_t stride0_, int64_t stride1_, int64_t k_)
: start(start_), stride0(stride0_), stride1(stride1_), k(k_) {}
__device__ __forceinline__ int mask(T *out) {
ptrdiff_t n = out - start;
int64_t row, col;
if (stride0 > stride1)
{
row = (int64_t) (n / stride0);
col = (int64_t) ((n % stride0) / stride1);
}
else
{
row = (int64_t) ((n % stride1) / stride0);
col = (int64_t) (n / stride1);
}
return Upper ? (col - row >= k) : (col - row <= k);
}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = mask(out) ? *in : ScalarConvert<int, T>::to(0);
}
__device__ __forceinline__ void operator()(T* v) {
if (!mask(v))
*v = ScalarConvert<int, T>::to(0);
}
const T *start;
const int64_t stride0, stride1, k;
};
template <typename T>
struct TensorLShiftConstantOp {
TensorLShiftConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in << val;
}
__device__ __forceinline__ void operator()(T* v) {
*v <<= val;
}
const T val;
};
template <typename T>
struct TensorRShiftConstantOp {
TensorRShiftConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in >> val;
}
__device__ __forceinline__ void operator()(T* v) {
*v >>= val;
}
const T val;
};
template <typename T>
struct TensorBitOrConstantOp {
TensorBitOrConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in | val;
}
__device__ __forceinline__ void operator()(T* v) {
*v |= val;
}
const T val;
};
#include <THH/generic/THHTensorMathPairwise.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorMathPairwise.hip>
#include <THH/THHGenerateBoolType.h>
|
650603f5496faf606fdfc1eb0c5364274a73f5b8.cu
|
#include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <THC/THCTensorCopy.h>
#include <THC/THCApply.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathCompareT.cuh>
#include <THC/THCTensor.hpp>
template <typename T>
struct TensorAddConstantOp {
TensorAddConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(T* v) {
*v += val;
}
const T val;
};
template <typename T>
struct TensorSubConstantOp {
TensorSubConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in - val;
}
__device__ __forceinline__ void operator()(T* v) {
*v -= val;
}
const T val;
};
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
template <typename T>
struct TensorDivConstantOp {
TensorDivConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in / val;
}
__device__ __forceinline__ void operator()(T* v) {
*v /= val;
}
const T val;
};
template <>
struct TensorDivConstantOp<float> {
TensorDivConstantOp(float v) : val(1.f / v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
template <>
struct TensorDivConstantOp<double> {
TensorDivConstantOp(double v) : val(1. / v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(double* v) {
*v *= val;
}
const double val;
};
template<typename T>
static __device__ __forceinline__
typename std::enable_if<std::is_signed<T>::value, bool>::type
modulo_wrap(T a, T b) {
return (a != 0) && (a < 0) != (b < 0);
}
template<typename T>
static __device__ __forceinline__
typename std::enable_if<std::is_unsigned<T>::value, bool>::type
modulo_wrap(T a, T b) {
return false;
}
template <typename T>
struct TensorRemainderOp {
TensorRemainderOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in % val;
if (modulo_wrap<T>(*out, val)) {
*out += val;
}
}
__device__ __forceinline__ void operator()(T* v) {
*v = *v % val;
if (modulo_wrap<T>(*v, val)) {
*v += val;
}
}
const T val;
};
template <>
struct TensorRemainderOp<float> {
TensorRemainderOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in - val * floorf(*in / val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = *v - val * floorf(*v / val);
}
const float val;
};
template <>
struct TensorRemainderOp<double> {
TensorRemainderOp(double v) : val(v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in - val * floor(*in / val);
}
__device__ __forceinline__ void operator()(double* v) {
*v = *v - val * floor(*v / val);
}
const double val;
};
template <>
struct TensorRemainderOp<at::Half> {
TensorRemainderOp(at::Half v): val(v) {}
__device__ __forceinline__ void operator()(at::Half* out, at::Half* in) {
*out = *in - val * floorf(*in / val);
}
__device__ __forceinline__ void operator()(at::Half* v) {
*v = *v - val * floorf(*v / val);
}
const at::Half val;
};
template <typename T>
struct TensorFmodOp {
TensorFmodOp(T v) : val((float)v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = (T) fmodf((float) *in, val);
}
__device__ __forceinline__ void operator()(T* v) {
*v = (T) fmodf((float) *v, val);
}
const float val;
};
template <>
struct TensorFmodOp<double> {
TensorFmodOp(double v) : val(v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = fmod(*in, val);
}
__device__ __forceinline__ void operator()(double* v) {
*v = fmod(*v, val);
}
const double val;
};
template <typename T, int Upper>
struct TensorTriOp {
TensorTriOp(T *start_, int64_t stride0_, int64_t stride1_, int64_t k_)
: start(start_), stride0(stride0_), stride1(stride1_), k(k_) {}
__device__ __forceinline__ int mask(T *out) {
ptrdiff_t n = out - start;
int64_t row, col;
if (stride0 > stride1)
{
row = (int64_t) (n / stride0);
col = (int64_t) ((n % stride0) / stride1);
}
else
{
row = (int64_t) ((n % stride1) / stride0);
col = (int64_t) (n / stride1);
}
return Upper ? (col - row >= k) : (col - row <= k);
}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = mask(out) ? *in : ScalarConvert<int, T>::to(0);
}
__device__ __forceinline__ void operator()(T* v) {
if (!mask(v))
*v = ScalarConvert<int, T>::to(0);
}
const T *start;
const int64_t stride0, stride1, k;
};
template <typename T>
struct TensorLShiftConstantOp {
TensorLShiftConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in << val;
}
__device__ __forceinline__ void operator()(T* v) {
*v <<= val;
}
const T val;
};
template <typename T>
struct TensorRShiftConstantOp {
TensorRShiftConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in >> val;
}
__device__ __forceinline__ void operator()(T* v) {
*v >>= val;
}
const T val;
};
template <typename T>
struct TensorBitOrConstantOp {
TensorBitOrConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in | val;
}
__device__ __forceinline__ void operator()(T* v) {
*v |= val;
}
const T val;
};
#include <THC/generic/THCTensorMathPairwise.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorMathPairwise.cu>
#include <THC/THCGenerateBoolType.h>
|
191fe5dd39f1642975868aac7f3e74df5d4da195.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "ticketLock.h"
#define NBLOCKS_TRUE 512
#define NTHREADS_TRUE 512 * 2
__global__ void blockCounterUnlocked( int *nblocks ){
if(threadIdx.x == 0){
*nblocks = *nblocks + 1;
}
}
__global__ void blockCounter1( Lock lock, int *nblocks ){
if(threadIdx.x == 0){
lock.lock();
*nblocks = *nblocks + 1;
lock.unlock();
}
}
// THIS KERNEL WILL CREATE A DIVERGENCE CONDITION
// AND STALL OUT. DON'T USE IT.
__global__ void blockCounter2( Lock lock, int *nblocks ){
lock.lock();
if(threadIdx.x == 0){
*nblocks = *nblocks + 1 ;
}
lock.unlock();
}
int main(){
int nblocks_host, *nblocks_dev;
Lock lock;
float elapsedTime;
hipEvent_t start, stop;
hipMalloc((void**) &nblocks_dev, sizeof(int));
//blockCounterUnlocked:
nblocks_host = 0;
hipMemcpy( nblocks_dev, &nblocks_host, sizeof(int), hipMemcpyHostToDevice );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( blockCounterUnlocked), dim3(NBLOCKS_TRUE), dim3(NTHREADS_TRUE), 0, 0, nblocks_dev);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
hipMemcpy( &nblocks_host, nblocks_dev, sizeof(int), hipMemcpyDeviceToHost );
printf("blockCounterUnlocked <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
//blockCounter1:
nblocks_host = 0;
hipMemcpy( nblocks_dev, &nblocks_host, sizeof(int), hipMemcpyHostToDevice );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( blockCounter1), dim3(NBLOCKS_TRUE), dim3(NTHREADS_TRUE), 0, 0, lock, nblocks_dev);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
hipMemcpy( &nblocks_host, nblocks_dev, sizeof(int), hipMemcpyDeviceToHost );
printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
hipFree(nblocks_dev);
}
|
191fe5dd39f1642975868aac7f3e74df5d4da195.cu
|
#include <stdio.h>
#include "ticketLock.h"
#define NBLOCKS_TRUE 512
#define NTHREADS_TRUE 512 * 2
__global__ void blockCounterUnlocked( int *nblocks ){
if(threadIdx.x == 0){
*nblocks = *nblocks + 1;
}
}
__global__ void blockCounter1( Lock lock, int *nblocks ){
if(threadIdx.x == 0){
lock.lock();
*nblocks = *nblocks + 1;
lock.unlock();
}
}
// THIS KERNEL WILL CREATE A DIVERGENCE CONDITION
// AND STALL OUT. DON'T USE IT.
__global__ void blockCounter2( Lock lock, int *nblocks ){
lock.lock();
if(threadIdx.x == 0){
*nblocks = *nblocks + 1 ;
}
lock.unlock();
}
int main(){
int nblocks_host, *nblocks_dev;
Lock lock;
float elapsedTime;
cudaEvent_t start, stop;
cudaMalloc((void**) &nblocks_dev, sizeof(int));
//blockCounterUnlocked:
nblocks_host = 0;
cudaMemcpy( nblocks_dev, &nblocks_host, sizeof(int), cudaMemcpyHostToDevice );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
blockCounterUnlocked<<<NBLOCKS_TRUE, NTHREADS_TRUE>>>(nblocks_dev);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaMemcpy( &nblocks_host, nblocks_dev, sizeof(int), cudaMemcpyDeviceToHost );
printf("blockCounterUnlocked <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
//blockCounter1:
nblocks_host = 0;
cudaMemcpy( nblocks_dev, &nblocks_host, sizeof(int), cudaMemcpyHostToDevice );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
blockCounter1<<<NBLOCKS_TRUE, NTHREADS_TRUE>>>(lock, nblocks_dev);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaMemcpy( &nblocks_host, nblocks_dev, sizeof(int), cudaMemcpyDeviceToHost );
printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
cudaFree(nblocks_dev);
}
|
dd498451e9288b2e90629fcd91d49b3464ad0881.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// nvcc -arch sm_50 -cubin sconv_fprop_s8_K64_N64.cu
extern "C"
__global__ void __launch_bounds__(64) sconv_fprop_s8_K64_N64
(
unsigned int* param_Rand,
float* param_O,
const float* param_I,
const float* param_F,
float param_alpha,
int param_flags,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_magic_RST,
int param_shift_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_P,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_part_P,
int param_part_Q,
int param_part_PQ
)
{
__shared__ float share[64*8*4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[63-tid];
}
|
dd498451e9288b2e90629fcd91d49b3464ad0881.cu
|
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// nvcc -arch sm_50 -cubin sconv_fprop_s8_K64_N64.cu
extern "C"
__global__ void __launch_bounds__(64) sconv_fprop_s8_K64_N64
(
unsigned int* param_Rand,
float* param_O,
const float* param_I,
const float* param_F,
float param_alpha,
int param_flags,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_magic_RST,
int param_shift_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_P,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_part_P,
int param_part_Q,
int param_part_PQ
)
{
__shared__ float share[64*8*4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[63-tid];
}
|
91986af3d1774a976d80603da6f07b760a81d277.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define Width 31
#define TITE_WIDTH 16
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
printf("Block ID X : %d and Block ID Y: %d\n", blockIdx.x,blockIdx.y);
float Pvalue = 0;
if(row < Width || col < Width){
for(int k=0;k<ncols;k++){
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
}
Pd[row*ncols+col] = Pvalue;
}
int main (int argc, char *argv[]){
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width], P[Width][Width];
float* Md, *Nd, *Pd;
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
hipMalloc( (void**)&Md, size);
hipMalloc( (void**)&Nd, size);
hipMalloc( (void**)&Pd, size);
hipMemcpy( Md, M, size, hipMemcpyHostToDevice);
hipMemcpy( Nd, N, size, hipMemcpyHostToDevice);
dim3 dimBlock(TITE_WIDTH, TITE_WIDTH);
dim3 dimGrid((Width+TITE_WIDTH-1)/TITE_WIDTH,(Width+TITE_WIDTH-1)/TITE_WIDTH);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width);
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
printf("\n================================\n");
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
printf("%.2f ", P[i][j]);
}
}
}
|
91986af3d1774a976d80603da6f07b760a81d277.cu
|
#include <stdio.h>
#define Width 31
#define TITE_WIDTH 16
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
printf("Block ID X : %d and Block ID Y: %d\n", blockIdx.x,blockIdx.y);
float Pvalue = 0;
if(row < Width || col < Width){
for(int k=0;k<ncols;k++){
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
}
Pd[row*ncols+col] = Pvalue;
}
int main (int argc, char *argv[]){
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width], P[Width][Width];
float* Md, *Nd, *Pd;
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
cudaMalloc( (void**)&Md, size);
cudaMalloc( (void**)&Nd, size);
cudaMalloc( (void**)&Pd, size);
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice);
dim3 dimBlock(TITE_WIDTH, TITE_WIDTH);
dim3 dimGrid((Width+TITE_WIDTH-1)/TITE_WIDTH,(Width+TITE_WIDTH-1)/TITE_WIDTH);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
printf("\n================================\n");
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
printf("%.2f ", P[i][j]);
}
}
}
|
015891fc7b9bf26ee38b8473bcadf2c51da37304.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
dh::safe_cuda(hipSetDevice(0));
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
dh::caching_device_vector<int64_t> d_left_count = left_count;
dh::caching_device_vector<int> position = position_in;
dh::caching_device_vector<int> position_out(position.size());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size());
RowPartitioner rp(0,10);
rp.SortPosition(
common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()),
common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestUpdatePosition() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePosition(0, 1, 2,
[=] __device__(RowPartitioner::RowIndexT ridx) {
if (ridx > 4) {
return 1;
}
else {
return 2;
}
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx)
{
if (ridx < 7) {
return 3
;
}
return 4;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
// Check position is as expected
EXPECT_EQ(rp.GetPositionHost(), std::vector<bst_node_t>({3,3,4,4,4,2,2,2,2,2}));
}
TEST(RowPartitioner, Basic) { TestUpdatePosition(); }
void TestFinalise() {
const int kNumRows = 10;
ObjInfo task{ObjInfo::kRegression, false, false};
HostDeviceVector<bst_node_t> position;
Context ctx;
ctx.gpu_id = 0;
{
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition(
&ctx, task, &position,
[=] __device__(RowPartitioner::RowIndexT ridx, int position) { return 7; },
[] XGBOOST_DEVICE(size_t idx) { return false; });
auto position = rp.GetPositionHost();
for (auto p : position) {
EXPECT_EQ(p, 7);
}
}
/**
* Test for sampling.
*/
dh::device_vector<float> hess(kNumRows);
for (size_t i = 0; i < hess.size(); ++i) {
// removed rows, 0, 3, 6, 9
if (i % 3 == 0) {
hess[i] = 0;
} else {
hess[i] = i;
}
}
auto d_hess = dh::ToSpan(hess);
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition(
&ctx, task, &position,
[] __device__(RowPartitioner::RowIndexT ridx, bst_node_t position) {
return ridx % 2 == 0 ? 1 : 2;
},
[d_hess] __device__(size_t ridx) { return d_hess[ridx] - 0.f == 0.f; });
auto const& h_position = position.ConstHostVector();
for (size_t ridx = 0; ridx < h_position.size(); ++ridx) {
if (ridx % 3 == 0) {
ASSERT_LT(h_position[ridx], 0);
} else {
ASSERT_EQ(h_position[ridx], ridx % 2 == 0 ? 1 : 2);
}
}
}
TEST(RowPartitioner, Finalise) { TestFinalise(); }
void TestIncorrectRow() {
RowPartitioner rp(0, 1);
rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx)
{
return 4; // This is not the left branch or the right branch
});
}
TEST(RowPartitionerDeathTest, IncorrectRow) {
ASSERT_DEATH({ TestIncorrectRow(); },".*");
}
} // namespace tree
} // namespace xgboost
|
015891fc7b9bf26ee38b8473bcadf2c51da37304.cu
|
/*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
dh::safe_cuda(cudaSetDevice(0));
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
dh::caching_device_vector<int64_t> d_left_count = left_count;
dh::caching_device_vector<int> position = position_in;
dh::caching_device_vector<int> position_out(position.size());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size());
RowPartitioner rp(0,10);
rp.SortPosition(
common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()),
common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestUpdatePosition() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePosition(0, 1, 2,
[=] __device__(RowPartitioner::RowIndexT ridx) {
if (ridx > 4) {
return 1;
}
else {
return 2;
}
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx)
{
if (ridx < 7) {
return 3
;
}
return 4;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
// Check position is as expected
EXPECT_EQ(rp.GetPositionHost(), std::vector<bst_node_t>({3,3,4,4,4,2,2,2,2,2}));
}
TEST(RowPartitioner, Basic) { TestUpdatePosition(); }
void TestFinalise() {
const int kNumRows = 10;
ObjInfo task{ObjInfo::kRegression, false, false};
HostDeviceVector<bst_node_t> position;
Context ctx;
ctx.gpu_id = 0;
{
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition(
&ctx, task, &position,
[=] __device__(RowPartitioner::RowIndexT ridx, int position) { return 7; },
[] XGBOOST_DEVICE(size_t idx) { return false; });
auto position = rp.GetPositionHost();
for (auto p : position) {
EXPECT_EQ(p, 7);
}
}
/**
* Test for sampling.
*/
dh::device_vector<float> hess(kNumRows);
for (size_t i = 0; i < hess.size(); ++i) {
// removed rows, 0, 3, 6, 9
if (i % 3 == 0) {
hess[i] = 0;
} else {
hess[i] = i;
}
}
auto d_hess = dh::ToSpan(hess);
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition(
&ctx, task, &position,
[] __device__(RowPartitioner::RowIndexT ridx, bst_node_t position) {
return ridx % 2 == 0 ? 1 : 2;
},
[d_hess] __device__(size_t ridx) { return d_hess[ridx] - 0.f == 0.f; });
auto const& h_position = position.ConstHostVector();
for (size_t ridx = 0; ridx < h_position.size(); ++ridx) {
if (ridx % 3 == 0) {
ASSERT_LT(h_position[ridx], 0);
} else {
ASSERT_EQ(h_position[ridx], ridx % 2 == 0 ? 1 : 2);
}
}
}
TEST(RowPartitioner, Finalise) { TestFinalise(); }
void TestIncorrectRow() {
RowPartitioner rp(0, 1);
rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx)
{
return 4; // This is not the left branch or the right branch
});
}
TEST(RowPartitionerDeathTest, IncorrectRow) {
ASSERT_DEATH({ TestIncorrectRow(); },".*");
}
} // namespace tree
} // namespace xgboost
|
939862fe132a41b332428c699fd13ca917d0b169.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cassert>
#include <type_traits>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "DataFormats/HcalRecHit/interface/HBHERecHit.h"
#include "DataFormats/HcalRecHit/interface/HFRecHit.h"
#include "DataFormats/HcalRecHit/interface/HORecHit.h"
#include "DataFormats/HcalRecHit/interface/HFQIE10Info.h"
#include "DataFormats/HcalRecHit/interface/HBHEChannelInfo.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
template <typename T>
__global__ void kernel_test_hcal_rechits(T *other) {
T rh(HcalDetId(0), 10.0f, 10.0f);
other->setEnergy(rh.energy());
other->setTime(rh.time());
}
__global__ void kernel_test_hcal_hfqie10info() { HFQIE10Info info; }
__global__ void kernel_test_hcal_hbhechinfo(HBHEChannelInfo *other) {
HBHEChannelInfo info{true, true};
info.setChannelInfo(HcalDetId{0}, 10, 10, 10, 1, 2.0, 2.0, 2.0, false, false, false);
other->setChannelInfo(info.id(),
info.recoShape(),
info.nSamples(),
info.soi(),
info.capid(),
info.darkCurrent(),
info.fcByPE(),
info.lambda(),
info.hasLinkError(),
info.hasCapidError(),
info.isDropped());
}
void test_hcal_hfqie10info() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
hipLaunchKernelGGL(( kernel_test_hcal_hfqie10info), dim3(1), dim3(1), 0, 0, );
check_error(hipGetLastError());
}
template <typename T>
void test_hcal_rechits() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
T h_rh, h_rh_test{HcalDetId(0), 10.0f, 10.0f};
T *d_rh;
hipMalloc((void **)&d_rh, sizeof(T));
hipMemcpy(d_rh, &h_rh, sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_rechits<T>), dim3(1), dim3(1), 0, 0, d_rh);
hipDeviceSynchronize();
check_error(hipGetLastError());
hipMemcpy(&h_rh, d_rh, sizeof(T), hipMemcpyDeviceToHost);
std::cout << h_rh << std::endl;
std::cout << h_rh_test << std::endl;
assert(h_rh.energy() == h_rh_test.energy());
assert(h_rh.time() == h_rh_test.time());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
void test_hcal_hbhechinfo() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
HBHEChannelInfo h_info, h_info_test{true, true};
h_info_test.setChannelInfo(HcalDetId{0}, 10, 10, 10, 1, 2.0, 2.0, 2.0, false, false, false);
HBHEChannelInfo *d_info;
hipMalloc((void **)&d_info, sizeof(HBHEChannelInfo));
hipMemcpy(d_info, &h_info, sizeof(HBHEChannelInfo), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_hbhechinfo), dim3(1), dim3(1), 0, 0, d_info);
hipDeviceSynchronize();
check_error(hipGetLastError());
hipMemcpy(&h_info, d_info, sizeof(HBHEChannelInfo), hipMemcpyDeviceToHost);
assert(h_info.id() == h_info_test.id());
assert(h_info.recoShape() == h_info_test.recoShape());
assert(h_info.nSamples() == h_info_test.nSamples());
assert(h_info.soi() == h_info_test.soi());
assert(h_info.capid() == h_info_test.capid());
assert(h_info.darkCurrent() == h_info_test.darkCurrent());
assert(h_info.fcByPE() == h_info_test.fcByPE());
assert(h_info.lambda() == h_info_test.lambda());
assert(h_info.hasLinkError() == h_info_test.hasLinkError());
assert(h_info.hasCapidError() == h_info_test.hasCapidError());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
int main(int argc, char **argv) {
requireCUDADevices();
test_hcal_rechits<HBHERecHit>();
test_hcal_rechits<HFRecHit>();
test_hcal_rechits<HORecHit>();
test_hcal_hbhechinfo();
std::cout << "all good" << std::endl;
return 0;
}
|
939862fe132a41b332428c699fd13ca917d0b169.cu
|
#include <iostream>
#include <cassert>
#include <type_traits>
#include <cuda.h>
#include <cuda_runtime.h>
#include "DataFormats/HcalRecHit/interface/HBHERecHit.h"
#include "DataFormats/HcalRecHit/interface/HFRecHit.h"
#include "DataFormats/HcalRecHit/interface/HORecHit.h"
#include "DataFormats/HcalRecHit/interface/HFQIE10Info.h"
#include "DataFormats/HcalRecHit/interface/HBHEChannelInfo.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
template <typename T>
__global__ void kernel_test_hcal_rechits(T *other) {
T rh(HcalDetId(0), 10.0f, 10.0f);
other->setEnergy(rh.energy());
other->setTime(rh.time());
}
__global__ void kernel_test_hcal_hfqie10info() { HFQIE10Info info; }
__global__ void kernel_test_hcal_hbhechinfo(HBHEChannelInfo *other) {
HBHEChannelInfo info{true, true};
info.setChannelInfo(HcalDetId{0}, 10, 10, 10, 1, 2.0, 2.0, 2.0, false, false, false);
other->setChannelInfo(info.id(),
info.recoShape(),
info.nSamples(),
info.soi(),
info.capid(),
info.darkCurrent(),
info.fcByPE(),
info.lambda(),
info.hasLinkError(),
info.hasCapidError(),
info.isDropped());
}
void test_hcal_hfqie10info() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
kernel_test_hcal_hfqie10info<<<1, 1>>>();
check_error(cudaGetLastError());
}
template <typename T>
void test_hcal_rechits() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
T h_rh, h_rh_test{HcalDetId(0), 10.0f, 10.0f};
T *d_rh;
cudaMalloc((void **)&d_rh, sizeof(T));
cudaMemcpy(d_rh, &h_rh, sizeof(T), cudaMemcpyHostToDevice);
kernel_test_hcal_rechits<T><<<1, 1>>>(d_rh);
cudaDeviceSynchronize();
check_error(cudaGetLastError());
cudaMemcpy(&h_rh, d_rh, sizeof(T), cudaMemcpyDeviceToHost);
std::cout << h_rh << std::endl;
std::cout << h_rh_test << std::endl;
assert(h_rh.energy() == h_rh_test.energy());
assert(h_rh.time() == h_rh_test.time());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
void test_hcal_hbhechinfo() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
HBHEChannelInfo h_info, h_info_test{true, true};
h_info_test.setChannelInfo(HcalDetId{0}, 10, 10, 10, 1, 2.0, 2.0, 2.0, false, false, false);
HBHEChannelInfo *d_info;
cudaMalloc((void **)&d_info, sizeof(HBHEChannelInfo));
cudaMemcpy(d_info, &h_info, sizeof(HBHEChannelInfo), cudaMemcpyHostToDevice);
kernel_test_hcal_hbhechinfo<<<1, 1>>>(d_info);
cudaDeviceSynchronize();
check_error(cudaGetLastError());
cudaMemcpy(&h_info, d_info, sizeof(HBHEChannelInfo), cudaMemcpyDeviceToHost);
assert(h_info.id() == h_info_test.id());
assert(h_info.recoShape() == h_info_test.recoShape());
assert(h_info.nSamples() == h_info_test.nSamples());
assert(h_info.soi() == h_info_test.soi());
assert(h_info.capid() == h_info_test.capid());
assert(h_info.darkCurrent() == h_info_test.darkCurrent());
assert(h_info.fcByPE() == h_info_test.fcByPE());
assert(h_info.lambda() == h_info_test.lambda());
assert(h_info.hasLinkError() == h_info_test.hasLinkError());
assert(h_info.hasCapidError() == h_info_test.hasCapidError());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
int main(int argc, char **argv) {
requireCUDADevices();
test_hcal_rechits<HBHERecHit>();
test_hcal_rechits<HFRecHit>();
test_hcal_rechits<HORecHit>();
test_hcal_hbhechinfo();
std::cout << "all good" << std::endl;
return 0;
}
|
f7f93f4aa5580a91f4e9bc343ba36055173322ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s -> s
*/
#include "common_magma.h"
#define block_M 32
#define block_N 32
#define thread_x 32
#define thread_y 2
#define unroll_f 16
__device__ void saxpy(float a,float *b, float *c) {
#pragma unroll
for (int i = 0; i < unroll_f; i++)
c[i] += a * b[i];
}
__global__ void
Ssyr2k_v16_ts_even_generic(float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
iby = (iby+ibx+3 ) % gridDim.y ;
const int minor = iby&1 ;
const bool bottom = ibx >iby ;
ibx = ( bottom ) ? (ibx-1) : ( iby + gridDim.y );
iby = ( bottom ) ? iby : ( blockIdx.x +minor + gridDim.y );
if( iby > ibx ) iby = in ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A+= ibx + tx ;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4];
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k ;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B+= 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
// -- 2nd Half
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A+= ibx + tx ;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
if( (ibx + threadIdx.x ) >= m )
tx = -1 ;
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta * C[0];C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_odd_generic(float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
iby = (iby+ibx ) % gridDim.y ;
int minor = iby &1 ;
bool bottom = ibx >=iby ;
ibx = ( bottom ) ? ibx : ( iby + gridDim.y - 1 );
iby = ( bottom ) ? iby : ( blockIdx.x + minor + gridDim.y );
if( iby > ibx ) iby = in +1 ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
lda = 0 ;
if ( iby < ibx ){
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
if( (ibx + threadIdx.x ) >= m )
tx = -1;
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_even_special(int flag ,
float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
if ( flag ==1 )
iby = (iby+ibx ) % gridDim.y ;
const int minor = iby&1 ;
const bool bottom = ibx >iby ;
ibx = ( bottom ) ? (ibx-1) : ( iby + gridDim.y );
iby = ( bottom ) ? iby : ( blockIdx.x +minor + gridDim.y );
if( iby > ibx ) iby = in ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
// -- 2nd Half
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
/* #pragma unroll 16
for (int i = 0; i < unroll_f; i++, C += ldc)
C[0] =alpha*Cb[i] + beta * C[0];
*/
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_odd_special(int flag,
float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
if ( flag ==1 )
iby = (iby+ibx ) % gridDim.y ;
int minor = iby &1 ;
bool bottom = ibx >=iby ;
ibx = ( bottom ) ? ibx : ( iby + gridDim.y - 1 );
iby = ( bottom ) ? iby : ( blockIdx.x + minor + gridDim.y );
if( iby > ibx ) iby = in +1 ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
if( iby > ibx) {
return ;
}
else{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
/* #pragma unroll 16
for (int i = 0; i < unroll_f; i++, C += ldc)
C[0] =alpha*Cb[i] + beta * C[0];
*/
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
extern "C" void
magmablas_ssyr2k(char UPLO, char TRANS, magma_int_t m , magma_int_t k, float alpha,
const float *A, magma_int_t lda , const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SSYR2K performs one of the symmetric rank 2k operations
C := alpha*A*B' + alpha*B*A' + beta*C,
or
C := alpha*A'*B + alpha*B'*A + beta*C,
where alpha and beta are scalars, C is an n by n symmetric matrix
and A and B are n by k matrices in the first case and k by n
matrices in the second case.
This implementation is for UPLO == 'L' and TRANS == 'N'.
Assumptions
===========
Both lda and ldb must be multiple of 32.
Parameter k must be divisible by 8 - note that this algorithm was developed
for the tridiagonal factorization and k in that case would be the blocking size.
We always request the blocking size to be divisible by at least 16.
This kernel goes to about 300 GFlop/s on the GTX280.
====================================================================== */
int in = m / block_M;
int flag = 1 ;
if ( lda >=1024 && lda %256 == 0 )
flag = 1 ; // It was kept to reorder the GPUs internal scheduling of thread blocks.
if( m % block_M == 0 ) {
if ( in&1 )
{
dim3 grid( in, (in/2+1));
dim3 threads( thread_x, thread_y );
hipLaunchKernelGGL(( Ssyr2k_v16_ts_odd_special), dim3(grid), dim3(threads), 0, magma_stream , flag,
C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
else
{
dim3 grid( in+1, (in/2));
dim3 threads( thread_x, thread_y );
hipLaunchKernelGGL(( Ssyr2k_v16_ts_even_special), dim3(grid), dim3(threads), 0, magma_stream , flag,
C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
}
else{
in+=1;
if( in&1 )
{
dim3 grid( in, (in/2+1));
dim3 threads( thread_x, thread_y );
hipLaunchKernelGGL(( Ssyr2k_v16_ts_odd_generic), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
else
{
dim3 grid( in+1, (in/2));
dim3 threads( thread_x, thread_y );
hipLaunchKernelGGL(( Ssyr2k_v16_ts_even_generic), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
}
}
|
f7f93f4aa5580a91f4e9bc343ba36055173322ab.cu
|
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s -> s
*/
#include "common_magma.h"
#define block_M 32
#define block_N 32
#define thread_x 32
#define thread_y 2
#define unroll_f 16
__device__ void saxpy(float a,float *b, float *c) {
#pragma unroll
for (int i = 0; i < unroll_f; i++)
c[i] += a * b[i];
}
__global__ void
Ssyr2k_v16_ts_even_generic(float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
iby = (iby+ibx+3 ) % gridDim.y ;
const int minor = iby&1 ;
const bool bottom = ibx >iby ;
ibx = ( bottom ) ? (ibx-1) : ( iby + gridDim.y );
iby = ( bottom ) ? iby : ( blockIdx.x +minor + gridDim.y );
if( iby > ibx ) iby = in ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A+= ibx + tx ;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4];
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k ;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B+= 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
// -- 2nd Half
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A+= ibx + tx ;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
if( (ibx + threadIdx.x ) >= m )
tx = -1 ;
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta * C[0];C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_odd_generic(float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
iby = (iby+ibx ) % gridDim.y ;
int minor = iby &1 ;
bool bottom = ibx >=iby ;
ibx = ( bottom ) ? ibx : ( iby + gridDim.y - 1 );
iby = ( bottom ) ? iby : ( blockIdx.x + minor + gridDim.y );
if( iby > ibx ) iby = in +1 ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
__syncthreads();
lda = 0 ;
if ( iby < ibx ){
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
if( (ibx + threadIdx.x ) >= m )
tx = -1;
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_even_special(int flag ,
float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
if ( flag ==1 )
iby = (iby+ibx ) % gridDim.y ;
const int minor = iby&1 ;
const bool bottom = ibx >iby ;
ibx = ( bottom ) ? (ibx-1) : ( iby + gridDim.y );
iby = ( bottom ) ? iby : ( blockIdx.x +minor + gridDim.y );
if( iby > ibx ) iby = in ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
// -- 2nd Half
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
/* #pragma unroll 16
for (int i = 0; i < unroll_f; i++, C += ldc)
C[0] =alpha*Cb[i] + beta * C[0];
*/
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
__global__ void
Ssyr2k_v16_ts_odd_special(int flag,
float *C, const float *A, const float *B,
int m, int in, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x ;
int iby = blockIdx.y ;
if ( flag ==1 )
iby = (iby+ibx ) % gridDim.y ;
int minor = iby &1 ;
bool bottom = ibx >=iby ;
ibx = ( bottom ) ? ibx : ( iby + gridDim.y - 1 );
iby = ( bottom ) ? iby : ( blockIdx.x + minor + gridDim.y );
if( iby > ibx ) iby = in +1 ;
ibx =ibx * block_M;
iby =iby * block_N;
const float *A1 = A ;
const float *B1 = B ;
if( iby > ibx) {
return ;
}
else{
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
C += ibx +tx +__mul24( iby+ty* unroll_f,ldc);
float Ap[4]={A[0], A[lda], A[2*lda], A[3*lda]};
float b=B[0];
float b2=B[2*ldb];
const float *Bend = B + ldb*k;
B+=4*ldb;
A+=4*lda;
__shared__ float Bb[4][block_N];
float Cb[unroll_f] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
B=A1;
A=B1;
int tlda = lda ; lda = ldb ; ldb = tlda ;
B+= iby+tx;
B+= __mul24( ty,ldb);
A += ibx + tx;
Ap[0]=A[0] ;
Ap[1]=A[lda] ;
Ap[2]=A[2*lda] ;
Ap[3]=A[3*lda] ;
b=B[0];
b2=B[2*ldb];
const float *Bend1 = B + ldb*k;
B+=4*ldb;
A+=4*lda;
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[lda];
Ap[2] = A[2*lda];
Ap[3] = A[3*lda];
b=B[0];
b2=B[2*ldb];
saxpy(Ab[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ab[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ab[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ab[3], &Bb[3][ty*unroll_f], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend1);
Bb[ty][tx]=b;
Bb[ty+2][tx]=b2;
__syncthreads();
saxpy(Ap[0], &Bb[0][ty*unroll_f], Cb);
saxpy(Ap[1], &Bb[1][ty*unroll_f], Cb);
saxpy(Ap[2], &Bb[2][ty*unroll_f], Cb);
saxpy(Ap[3], &Bb[3][ty*unroll_f], Cb);
lda = 0 ;
if ( iby < ibx ){
/* #pragma unroll 16
for (int i = 0; i < unroll_f; i++, C += ldc)
C[0] =alpha*Cb[i] + beta * C[0];
*/
tx = 15 ;
}
else{
if ( tx > 15 ) {
if ( ty == 0 ){
lda = 1 ;
tx=15;
}
else{
lda = 1 ;
tx-=16;
}
}
else{
if ( ty == 0 ) {
lda = 1 ;
}
else {
lda = 2 ;
tx = 32 ;
}
}
}
{
switch(tx){
case 0:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
break;
case 1:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
break;
case 2:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
break;
case 3:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
break;
case 4:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
break;
case 5:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
break;
case 6:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
break;
case 7:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
break;
case 8:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
break;
case 9:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
break;
case 10:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
break;
case 11:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
break;
case 12:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
break;
case 13:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
break;
case 14:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
break;
case 15:
C[0] =alpha*Cb[0] + beta*C[0]; C+=ldc ;
C[0] =alpha*Cb[1] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[2] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[3] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[4] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[5] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[6] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[7] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[8] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[9] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[10] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[11] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[12] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[13] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[14] + beta * C[0];C+=ldc ;
C[0] =alpha*Cb[15] + beta * C[0];C+=ldc ;
break;
default:
break;
}
}
}
}
extern "C" void
magmablas_ssyr2k(char UPLO, char TRANS, magma_int_t m , magma_int_t k, float alpha,
const float *A, magma_int_t lda , const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SSYR2K performs one of the symmetric rank 2k operations
C := alpha*A*B' + alpha*B*A' + beta*C,
or
C := alpha*A'*B + alpha*B'*A + beta*C,
where alpha and beta are scalars, C is an n by n symmetric matrix
and A and B are n by k matrices in the first case and k by n
matrices in the second case.
This implementation is for UPLO == 'L' and TRANS == 'N'.
Assumptions
===========
Both lda and ldb must be multiple of 32.
Parameter k must be divisible by 8 - note that this algorithm was developed
for the tridiagonal factorization and k in that case would be the blocking size.
We always request the blocking size to be divisible by at least 16.
This kernel goes to about 300 GFlop/s on the GTX280.
====================================================================== */
int in = m / block_M;
int flag = 1 ;
if ( lda >=1024 && lda %256 == 0 )
flag = 1 ; // It was kept to reorder the GPUs internal scheduling of thread blocks.
if( m % block_M == 0 ) {
if ( in&1 )
{
dim3 grid( in, (in/2+1));
dim3 threads( thread_x, thread_y );
Ssyr2k_v16_ts_odd_special<<< grid, threads, 0, magma_stream >>>(flag,
C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
else
{
dim3 grid( in+1, (in/2));
dim3 threads( thread_x, thread_y );
Ssyr2k_v16_ts_even_special<<< grid, threads, 0, magma_stream >>>(flag,
C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
}
else{
in+=1;
if( in&1 )
{
dim3 grid( in, (in/2+1));
dim3 threads( thread_x, thread_y );
Ssyr2k_v16_ts_odd_generic<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
else
{
dim3 grid( in+1, (in/2));
dim3 threads( thread_x, thread_y );
Ssyr2k_v16_ts_even_generic<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, in/2, k,
lda, ldb, ldc,
alpha, beta);
}
}
}
|
bc3be238d11b4d0c99e7556e64ea439d73ec5afd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h> // needed for the function sqrtf()
#define TILE_SIZE 2 // NB // Block SIZE
#define chunk_size 2
//AUX FUNCTIONS
/*
* Function to perform Choleshky Factorization for a tile
* input is the pointer to shared memory for a tile given by t_A
*/
__device__ void spotrf_tile(float* t_A)
{
int ty = threadIdx.y; // col
int tx = threadIdx.z; // row
for(int k{0};k<TILE_SIZE;k++){
// square root of diagonal elements
if(tx==0 && ty==0)
t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]);
__syncthreads();
// division step done parallaly
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty == k)
{
t_A[(tx+1)*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k];
}
__syncthreads();
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty >= k)
{
t_A[(tx+1)*TILE_SIZE + (ty+1)]-= t_A[(tx+1)*TILE_SIZE + k]*t_A[(ty+1)*TILE_SIZE + k];
}
__syncthreads();
}
}
/*
* Function to perform triangular solve for a tile
* inputs are two shared memory pointer of tiles given by t_A1 and t_A2
* implemnting triangular solve on tile t_A2 using t_A1
*/
__device__ void strsm_tile(float *t_A1, float *t_A2)
{
// t_A2 is current unkonown
int ty = threadIdx.y; // access column
int tx = threadIdx.z; // access row
for(int i{0};i<TILE_SIZE;i++){
if(ty==0){
t_A2[tx*TILE_SIZE + i]/= t_A1[i*TILE_SIZE + i]; // divison step
}
__syncthreads();
if(ty>i && i<TILE_SIZE-1)
{
t_A2[tx*TILE_SIZE+ty]-= t_A2[tx*TILE_SIZE + i]*t_A1[ty*TILE_SIZE + i];
}
__syncthreads();
}
}
/*
* Function to perform rank-k update
* half of the threads working
* inputs are pointers to the shared memory for two tiles given by rA1 and rA2
* implementing rank-k update of the tile rA2 using tile rA1
*/
__device__ void ssyrk_tile(float* rA1, float* rA2)
{
int row = threadIdx.z;
int column = threadIdx.y;
if(column <= row)
{
float updatedValue = rA2[row * TILE_SIZE + column];
for(int k=0; k<TILE_SIZE; k++)
{
updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k];
}
rA2[row * TILE_SIZE + column] = updatedValue;
}
}
/*
* Function to perform general matrix multiplication
* DOUBT: I think calculation is given wrong in paper it should be rA2[k][n] we are taking in row major form
* inputs are pointers to the shared memory for three tiles given by rA1, rA2 and rA3
* implementing sgemm on tile rA3 using rA1 and rA2
*/
__device__ void sgemm_tile(const float* rA1, const float* rA2, float* rA3)
{
int row = threadIdx.z;
int column = threadIdx.y;
float updatedValue = rA3[row * TILE_SIZE + column];
for(int i=0; i<TILE_SIZE; i++)
{
updatedValue -= rA1[row * TILE_SIZE + i] * rA2[i * TILE_SIZE + column];
}
rA3[row * TILE_SIZE + column] = updatedValue;
}
/*
* Function to store full tile from shared memory back to global memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions
*/
__device__ void store_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0;
}
__syncthreads();
}
/*
* Function to store lower triangular tile from shared memory to global memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void store_lower(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0;
}
__syncthreads();
}
/*
* Function to load a full tile from global memory to shared memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void load_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.x; // local threadid in x
int ty = threadIdx.y; // local threadid in y
int tz = threadIdx.z;
//printf("%d %d %d \n",tx,ty,tz);
int row = tile_y * TILE_SIZE + tz; // access row
int column = tile_x * TILE_SIZE + ty; // access col
if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M)
{
int x = row*N + column;
int global_id = ((tx / chunk_size) *chunk_size)*N*N + x*chunk_size + (tx % chunk_size);
s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[global_id] : 0; // we need to think about access expression of global memory. //M: Number of matrices. N:dim of matrix
}
__syncthreads();
}
/*
* function to store 0 element in in global memory tile given by g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = 0;
}
__syncthreads();
}
/* LEFT LOOKING KERNEL FUNCTIONS */
__global__ void left_looking_kernel(float *g_in, int N, int M , int shared_size_single_matrix)
{
// (ceil(N / TILE_SIZE) + 2) * sizeof(TILE) amount of shared memory
extern __shared__ float s_current_panel[];
// Pointers for accessing shared memory locations
float *rA1 = NULL;
float *rA2 = NULL;
float *rA3 = NULL;
int tx = threadIdx.x;
// no of tiles in a column
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0); // ceil (N / TILE_SIZE)
// i: current panel
for(int i=0; i<no_of_tiles; i++)
{
// loading current panel in shared memory
for(int j=0; j<no_of_tiles; j++)
{
rA1 = &s_current_panel[j * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, j, i, N, M, shared_size_single_matrix);
}
__syncthreads();
// UPDATE CURRENT PANEL using preceding panels
// j: preceding panel no.
for(int j=0; j<i; j++)
{
// Loading data for rank-k update in shared memory
rA1 = &s_current_panel[no_of_tiles * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, i, j, N, M, shared_size_single_matrix);
__syncthreads();
// Rank-k update
rA1 = &s_current_panel[tx*shared_size_single_matrix +no_of_tiles * TILE_SIZE * TILE_SIZE];
rA2 = &s_current_panel[tx*shared_size_single_matrix +i * TILE_SIZE * TILE_SIZE];
ssyrk_tile(rA1, rA2);
__syncthreads();
// Applying SGEMM
for(int k=i+1; k<no_of_tiles; k++)
{
// Loading data for sgemm in shared memory
rA1 = &s_current_panel[(no_of_tiles + 1) * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, k, j, N , M, shared_size_single_matrix);
__syncthreads();
// sgemm
rA1 = &s_current_panel[tx*shared_size_single_matrix +no_of_tiles * TILE_SIZE * TILE_SIZE];
rA2 = &s_current_panel[tx*shared_size_single_matrix +(no_of_tiles + 1) * TILE_SIZE * TILE_SIZE];
rA3 = &s_current_panel[tx*shared_size_single_matrix +k * TILE_SIZE * TILE_SIZE];
sgemm_tile(rA1, rA2, rA3);
__syncthreads();
}
}
// FACTORIZE CURRENT PANEL
// applying spotrf on the tile (i, i)
rA1 = &s_current_panel[tx*shared_size_single_matrix +i * TILE_SIZE * TILE_SIZE];
spotrf_tile(rA1);
__syncthreads();
// Applying TRSM
for(int k=i+1; k<no_of_tiles; k++)
{
// trsm
rA2 = &s_current_panel[tx*shared_size_single_matrix +k * TILE_SIZE * TILE_SIZE];
strsm_tile(rA1, rA2);
__syncthreads();
}
// STORING the current panel back in the global memory
for (int k=0; k<no_of_tiles; k++)
{
rA1 = &s_current_panel[k * TILE_SIZE * TILE_SIZE];
// store zeros for tiles above the tile (i, i)
if(k < i)
{
store_zeros(g_in, k, i, N, M);
}
else
{
// store lower for tile (i, i)
if(k == i)
{
store_lower(g_in, rA1, k, i, N, M, shared_size_single_matrix);
}
else // store full for tiles below the tile (i, i)
{
store_full(g_in, rA1, k, i, N, M, shared_size_single_matrix);
}
}
}
__syncthreads();
}
}
__global__ void left_looking_kernel_less_mem(float *g_in, int N, int M , int shared_size_single_matrix)
{
extern __shared__ float s_current_panel[];
// Pointers for accessing shared memory locations
float *rA1 = NULL;
float *rA2 = NULL;
float *rA3 = NULL;
// no of tiles in a column
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0); // ceil(N / TILE_SIZE)
int tx = threadIdx.x;
// i: current panel
for(int i=0; i<no_of_tiles; i++)
{
// loading tile(i, i)
rA1 = &s_current_panel[0];
load_full(g_in, rA1, i, i, N, M, shared_size_single_matrix);
for(int j=0; j<no_of_tiles; j++)
{
if(j >= i)
{
if(j == i) // representing the tile on which spotrf will be carried out
{
for(int k=0; k<i; k++) // k iterates over tiles left of (i,i) tile
{
rA2 = &s_current_panel[2 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA2, j, k, N , M, shared_size_single_matrix);
rA2 = &s_current_panel[tx*shared_size_single_matrix + 2 * TILE_SIZE * TILE_SIZE];
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
ssyrk_tile(rA1, rA2); // rank-k update on rA1 using rA2
__syncthreads();
}
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
spotrf_tile(rA1);
__syncthreads();
rA1 = &s_current_panel[0];
store_lower(g_in, rA1, i, i, N, M, shared_size_single_matrix); // storing (i,i) tile back to global memory after calling sporf
}
else
{
rA3 = &s_current_panel[1 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA3, j, i, N, M, shared_size_single_matrix);
for(int k=0; k<i; k++) // k iterates over tile below (i,i) tile
{
rA1 = &s_current_panel[2 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, i, k, N, M, shared_size_single_matrix);
rA2 = &s_current_panel[tx*shared_size_single_matrix + 3 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, j, k, N, M, shared_size_single_matrix);
rA1 = &s_current_panel[tx*shared_size_single_matrix + 2 * TILE_SIZE * TILE_SIZE];
rA3 = &s_current_panel[tx*shared_size_single_matrix + 1 * TILE_SIZE * TILE_SIZE];
sgemm_tile(rA1, rA2, rA3); // sgemm on tile rA3 using tiles rA1 and rA2
__syncthreads();
}
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
rA2 = &s_current_panel[tx*shared_size_single_matrix + 1 * TILE_SIZE * TILE_SIZE];
strsm_tile(rA1, rA2); // strsm on tile rA2 using tile rA1
__syncthreads();
rA2 = &s_current_panel[1 * TILE_SIZE * TILE_SIZE];
store_full(g_in, rA2, j, i, N, M, shared_size_single_matrix); // storing back to global memory
}
}
else
{
store_zeros(g_in, j, i, N, M); // stores zero in the tile given by pointer g_in
}
}
__syncthreads();
}
}
//MAIN PROGRAM
int main() {
// READ FROM THE INPUT FILE
FILE *fptr;
fptr = fopen("./input_2.txt", "r");
int num_of_matrices, dim_of_matrix;
fscanf(fptr, "%d", &num_of_matrices);
fscanf(fptr, "%d", &dim_of_matrix);
float read_element;
float* h_A = NULL;
int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix;
size_t size = numElements * sizeof(float);
hipDeviceProp_t devp;
hipGetDeviceProperties(&devp, 0);
//Chunk size
// int chunk_size = 2;
h_A = (float *)malloc(size);
int global_id = 0;
for(int p = 0; p < (num_of_matrices/chunk_size); p++)
{
for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
fscanf(fptr, "%f", &read_element);
int x = row*dim_of_matrix + column;
global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index;
h_A[global_id] = read_element;
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr);
printf("\nPrinting the host-side input array read from the input file:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
printf("\n\n");
// COPY TO DEVICE
hipError_t err = hipSuccess;
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("Copied the h_A to device side successfully!\n\n");
}
// LAUNCH KERNEL
// int threadsPerBlock = 256;
// int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("Right-Looking CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//checkKernel <<<blocksPerGrid, threadsPerBlock>>> (d_A, numElements);
dim3 grid(1, 1, 1);
dim3 block(num_of_matrices, TILE_SIZE, TILE_SIZE);
// no of tiles in a column
int INPUT_SIZE = dim_of_matrix;
int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
if(TILE_SIZE == INPUT_SIZE)
{
// printf("The if statement works.\n");
hipLaunchKernelGGL(( left_looking_kernel), dim3(grid), dim3(block), num_of_matrices * 1 * TILE_SIZE * TILE_SIZE * sizeof(float), 0, d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
}
else if((no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float) < devp.sharedMemPerBlock)
{
//printf("The if statement works.\n");
hipLaunchKernelGGL(( left_looking_kernel_less_mem), dim3(grid), dim3(block), num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float), 0, d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// left_looking_kernel<<<grid, block,num_of_matrices * (no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,(no_of_tiles + 2) * TILE_SIZE * TILE_SIZE);
}
else
{
hipLaunchKernelGGL(( left_looking_kernel_less_mem), dim3(grid), dim3(block), num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float), 0, d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
}
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess) {
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
}
// COPY BACK TO HOST, FREE CUDA MEM, HOST MEM, AND RESET CUDA
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("\nCopied d_A to host side successfully!\n");
}
printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
err = hipFree(d_A);
if(err != hipSuccess)
{
fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipDeviceReset();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
FILE *fptr1;
fptr1 = fopen("./output.txt", "w+");
float write_element;
fprintf(fptr1, "%d\n", num_of_matrices);
fprintf(fptr1, "%d\n", dim_of_matrix);
for(int p = 0; p < (num_of_matrices/chunk_size); p++)
{
for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
// fscanf(fptr, "%f", &read_element);
int x = row*dim_of_matrix + column;
global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index;
write_element = h_A[global_id];
fprintf(fptr1, "%f ", write_element);
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
fprintf(fptr1, "\n");
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr1);
// free(h_A);
// printf("\n\nAll tasks completed successfully!\n\n");
return 0;
}
|
bc3be238d11b4d0c99e7556e64ea439d73ec5afd.cu
|
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h> // needed for the function sqrtf()
#define TILE_SIZE 2 // NB // Block SIZE
#define chunk_size 2
//AUX FUNCTIONS
/*
* Function to perform Choleshky Factorization for a tile
* input is the pointer to shared memory for a tile given by t_A
*/
__device__ void spotrf_tile(float* t_A)
{
int ty = threadIdx.y; // col
int tx = threadIdx.z; // row
for(int k{0};k<TILE_SIZE;k++){
// square root of diagonal elements
if(tx==0 && ty==0)
t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]);
__syncthreads();
// division step done parallaly
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty == k)
{
t_A[(tx+1)*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k];
}
__syncthreads();
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty >= k)
{
t_A[(tx+1)*TILE_SIZE + (ty+1)]-= t_A[(tx+1)*TILE_SIZE + k]*t_A[(ty+1)*TILE_SIZE + k];
}
__syncthreads();
}
}
/*
* Function to perform triangular solve for a tile
* inputs are two shared memory pointer of tiles given by t_A1 and t_A2
* implemnting triangular solve on tile t_A2 using t_A1
*/
__device__ void strsm_tile(float *t_A1, float *t_A2)
{
// t_A2 is current unkonown
int ty = threadIdx.y; // access column
int tx = threadIdx.z; // access row
for(int i{0};i<TILE_SIZE;i++){
if(ty==0){
t_A2[tx*TILE_SIZE + i]/= t_A1[i*TILE_SIZE + i]; // divison step
}
__syncthreads();
if(ty>i && i<TILE_SIZE-1)
{
t_A2[tx*TILE_SIZE+ty]-= t_A2[tx*TILE_SIZE + i]*t_A1[ty*TILE_SIZE + i];
}
__syncthreads();
}
}
/*
* Function to perform rank-k update
* half of the threads working
* inputs are pointers to the shared memory for two tiles given by rA1 and rA2
* implementing rank-k update of the tile rA2 using tile rA1
*/
__device__ void ssyrk_tile(float* rA1, float* rA2)
{
int row = threadIdx.z;
int column = threadIdx.y;
if(column <= row)
{
float updatedValue = rA2[row * TILE_SIZE + column];
for(int k=0; k<TILE_SIZE; k++)
{
updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k];
}
rA2[row * TILE_SIZE + column] = updatedValue;
}
}
/*
* Function to perform general matrix multiplication
* DOUBT: I think calculation is given wrong in paper it should be rA2[k][n] we are taking in row major form
* inputs are pointers to the shared memory for three tiles given by rA1, rA2 and rA3
* implementing sgemm on tile rA3 using rA1 and rA2
*/
__device__ void sgemm_tile(const float* rA1, const float* rA2, float* rA3)
{
int row = threadIdx.z;
int column = threadIdx.y;
float updatedValue = rA3[row * TILE_SIZE + column];
for(int i=0; i<TILE_SIZE; i++)
{
updatedValue -= rA1[row * TILE_SIZE + i] * rA2[i * TILE_SIZE + column];
}
rA3[row * TILE_SIZE + column] = updatedValue;
}
/*
* Function to store full tile from shared memory back to global memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions
*/
__device__ void store_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0;
}
__syncthreads();
}
/*
* Function to store lower triangular tile from shared memory to global memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void store_lower(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0;
}
__syncthreads();
}
/*
* Function to load a full tile from global memory to shared memory
* inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void load_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix)
{
int tx = threadIdx.x; // local threadid in x
int ty = threadIdx.y; // local threadid in y
int tz = threadIdx.z;
//printf("%d %d %d \n",tx,ty,tz);
int row = tile_y * TILE_SIZE + tz; // access row
int column = tile_x * TILE_SIZE + ty; // access col
if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M)
{
int x = row*N + column;
int global_id = ((tx / chunk_size) *chunk_size)*N*N + x*chunk_size + (tx % chunk_size);
s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[global_id] : 0; // we need to think about access expression of global memory. //M: Number of matrices. N:dim of matrix
}
__syncthreads();
}
/*
* function to store 0 element in in global memory tile given by g_mem
* tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size
*/
__device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M)
{
int tx = threadIdx.y; // local threadid in x
int ty = threadIdx.z; // local threadid in y
int row = tile_y * TILE_SIZE + ty; // access row
int column = tile_x * TILE_SIZE + tx; // access col
if(row < N && column < N)
{
int x = row*N + column;
int global_id = ((threadIdx.x / chunk_size) *chunk_size)*N*N + x*chunk_size + (threadIdx.x % chunk_size);
g_mem[global_id] = 0;
}
__syncthreads();
}
/* LEFT LOOKING KERNEL FUNCTIONS */
__global__ void left_looking_kernel(float *g_in, int N, int M , int shared_size_single_matrix)
{
// (ceil(N / TILE_SIZE) + 2) * sizeof(TILE) amount of shared memory
extern __shared__ float s_current_panel[];
// Pointers for accessing shared memory locations
float *rA1 = NULL;
float *rA2 = NULL;
float *rA3 = NULL;
int tx = threadIdx.x;
// no of tiles in a column
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0); // ceil (N / TILE_SIZE)
// i: current panel
for(int i=0; i<no_of_tiles; i++)
{
// loading current panel in shared memory
for(int j=0; j<no_of_tiles; j++)
{
rA1 = &s_current_panel[j * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, j, i, N, M, shared_size_single_matrix);
}
__syncthreads();
// UPDATE CURRENT PANEL using preceding panels
// j: preceding panel no.
for(int j=0; j<i; j++)
{
// Loading data for rank-k update in shared memory
rA1 = &s_current_panel[no_of_tiles * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, i, j, N, M, shared_size_single_matrix);
__syncthreads();
// Rank-k update
rA1 = &s_current_panel[tx*shared_size_single_matrix +no_of_tiles * TILE_SIZE * TILE_SIZE];
rA2 = &s_current_panel[tx*shared_size_single_matrix +i * TILE_SIZE * TILE_SIZE];
ssyrk_tile(rA1, rA2);
__syncthreads();
// Applying SGEMM
for(int k=i+1; k<no_of_tiles; k++)
{
// Loading data for sgemm in shared memory
rA1 = &s_current_panel[(no_of_tiles + 1) * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, k, j, N , M, shared_size_single_matrix);
__syncthreads();
// sgemm
rA1 = &s_current_panel[tx*shared_size_single_matrix +no_of_tiles * TILE_SIZE * TILE_SIZE];
rA2 = &s_current_panel[tx*shared_size_single_matrix +(no_of_tiles + 1) * TILE_SIZE * TILE_SIZE];
rA3 = &s_current_panel[tx*shared_size_single_matrix +k * TILE_SIZE * TILE_SIZE];
sgemm_tile(rA1, rA2, rA3);
__syncthreads();
}
}
// FACTORIZE CURRENT PANEL
// applying spotrf on the tile (i, i)
rA1 = &s_current_panel[tx*shared_size_single_matrix +i * TILE_SIZE * TILE_SIZE];
spotrf_tile(rA1);
__syncthreads();
// Applying TRSM
for(int k=i+1; k<no_of_tiles; k++)
{
// trsm
rA2 = &s_current_panel[tx*shared_size_single_matrix +k * TILE_SIZE * TILE_SIZE];
strsm_tile(rA1, rA2);
__syncthreads();
}
// STORING the current panel back in the global memory
for (int k=0; k<no_of_tiles; k++)
{
rA1 = &s_current_panel[k * TILE_SIZE * TILE_SIZE];
// store zeros for tiles above the tile (i, i)
if(k < i)
{
store_zeros(g_in, k, i, N, M);
}
else
{
// store lower for tile (i, i)
if(k == i)
{
store_lower(g_in, rA1, k, i, N, M, shared_size_single_matrix);
}
else // store full for tiles below the tile (i, i)
{
store_full(g_in, rA1, k, i, N, M, shared_size_single_matrix);
}
}
}
__syncthreads();
}
}
__global__ void left_looking_kernel_less_mem(float *g_in, int N, int M , int shared_size_single_matrix)
{
extern __shared__ float s_current_panel[];
// Pointers for accessing shared memory locations
float *rA1 = NULL;
float *rA2 = NULL;
float *rA3 = NULL;
// no of tiles in a column
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0); // ceil(N / TILE_SIZE)
int tx = threadIdx.x;
// i: current panel
for(int i=0; i<no_of_tiles; i++)
{
// loading tile(i, i)
rA1 = &s_current_panel[0];
load_full(g_in, rA1, i, i, N, M, shared_size_single_matrix);
for(int j=0; j<no_of_tiles; j++)
{
if(j >= i)
{
if(j == i) // representing the tile on which spotrf will be carried out
{
for(int k=0; k<i; k++) // k iterates over tiles left of (i,i) tile
{
rA2 = &s_current_panel[2 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA2, j, k, N , M, shared_size_single_matrix);
rA2 = &s_current_panel[tx*shared_size_single_matrix + 2 * TILE_SIZE * TILE_SIZE];
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
ssyrk_tile(rA1, rA2); // rank-k update on rA1 using rA2
__syncthreads();
}
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
spotrf_tile(rA1);
__syncthreads();
rA1 = &s_current_panel[0];
store_lower(g_in, rA1, i, i, N, M, shared_size_single_matrix); // storing (i,i) tile back to global memory after calling sporf
}
else
{
rA3 = &s_current_panel[1 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA3, j, i, N, M, shared_size_single_matrix);
for(int k=0; k<i; k++) // k iterates over tile below (i,i) tile
{
rA1 = &s_current_panel[2 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, i, k, N, M, shared_size_single_matrix);
rA2 = &s_current_panel[tx*shared_size_single_matrix + 3 * TILE_SIZE * TILE_SIZE];
load_full(g_in, rA1, j, k, N, M, shared_size_single_matrix);
rA1 = &s_current_panel[tx*shared_size_single_matrix + 2 * TILE_SIZE * TILE_SIZE];
rA3 = &s_current_panel[tx*shared_size_single_matrix + 1 * TILE_SIZE * TILE_SIZE];
sgemm_tile(rA1, rA2, rA3); // sgemm on tile rA3 using tiles rA1 and rA2
__syncthreads();
}
rA1 = &s_current_panel[tx*shared_size_single_matrix + 0];
rA2 = &s_current_panel[tx*shared_size_single_matrix + 1 * TILE_SIZE * TILE_SIZE];
strsm_tile(rA1, rA2); // strsm on tile rA2 using tile rA1
__syncthreads();
rA2 = &s_current_panel[1 * TILE_SIZE * TILE_SIZE];
store_full(g_in, rA2, j, i, N, M, shared_size_single_matrix); // storing back to global memory
}
}
else
{
store_zeros(g_in, j, i, N, M); // stores zero in the tile given by pointer g_in
}
}
__syncthreads();
}
}
//MAIN PROGRAM
int main() {
// READ FROM THE INPUT FILE
FILE *fptr;
fptr = fopen("./input_2.txt", "r");
int num_of_matrices, dim_of_matrix;
fscanf(fptr, "%d", &num_of_matrices);
fscanf(fptr, "%d", &dim_of_matrix);
float read_element;
float* h_A = NULL;
int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix;
size_t size = numElements * sizeof(float);
cudaDeviceProp devp;
cudaGetDeviceProperties(&devp, 0);
//Chunk size
// int chunk_size = 2;
h_A = (float *)malloc(size);
int global_id = 0;
for(int p = 0; p < (num_of_matrices/chunk_size); p++)
{
for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
fscanf(fptr, "%f", &read_element);
int x = row*dim_of_matrix + column;
global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index;
h_A[global_id] = read_element;
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr);
printf("\nPrinting the host-side input array read from the input file:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
printf("\n\n");
// COPY TO DEVICE
cudaError_t err = cudaSuccess;
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("Copied the h_A to device side successfully!\n\n");
}
// LAUNCH KERNEL
// int threadsPerBlock = 256;
// int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("Right-Looking CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//checkKernel <<<blocksPerGrid, threadsPerBlock>>> (d_A, numElements);
dim3 grid(1, 1, 1);
dim3 block(num_of_matrices, TILE_SIZE, TILE_SIZE);
// no of tiles in a column
int INPUT_SIZE = dim_of_matrix;
int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
if(TILE_SIZE == INPUT_SIZE)
{
// printf("The if statement works.\n");
left_looking_kernel<<<grid, block, num_of_matrices * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
}
else if((no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float) < devp.sharedMemPerBlock)
{
//printf("The if statement works.\n");
left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// left_looking_kernel<<<grid, block,num_of_matrices * (no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,(no_of_tiles + 2) * TILE_SIZE * TILE_SIZE);
}
else
{
left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
}
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
// COPY BACK TO HOST, FREE CUDA MEM, HOST MEM, AND RESET CUDA
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("\nCopied d_A to host side successfully!\n");
}
printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
err = cudaFree(d_A);
if(err != cudaSuccess)
{
fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
FILE *fptr1;
fptr1 = fopen("./output.txt", "w+");
float write_element;
fprintf(fptr1, "%d\n", num_of_matrices);
fprintf(fptr1, "%d\n", dim_of_matrix);
for(int p = 0; p < (num_of_matrices/chunk_size); p++)
{
for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
// fscanf(fptr, "%f", &read_element);
int x = row*dim_of_matrix + column;
global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index;
write_element = h_A[global_id];
fprintf(fptr1, "%f ", write_element);
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
fprintf(fptr1, "\n");
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr1);
// free(h_A);
// printf("\n\nAll tasks completed successfully!\n\n");
return 0;
}
|
ca465f86c70edd0090d31efc2cc54ced4c524c3a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<time.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"CG.h"
/* *********************************************************** *
* Block-Jacobi Preconditioner
* y = M^{-1}.b
* *********************************************************** */
__global__ void gpu_bj_kernel(const struct N *n,const float* b,float* y) {
int ix, iy, iz;
// Grid spacings in all dirctions
float hx = 1./n->x;
float hy = 1./n->y;
float hz = 1./n->z;
float hx_inv2 = 1./(hx*hx);
float hy_inv2 = 1./(hy*hy);
float hz_inv2 = 1./(hz*hz);
//float *c,*d;
// Temporary arrays for Thomas algorithm
//hipMalloc((void**)&c,n->z*sizeof(float));
//hipMalloc((void**)&d,n->z*sizeof(float));
float c[1000],d[1000];
//float* c=malloc(n.z*sizeof(float));
//float* d=malloc(n.z*sizeof(float));
// Loop over number of relaxation steps
ix=blockIdx.x*BLOCK_SIZE+threadIdx.x;
iy=blockIdx.y*BLOCK_SIZE+threadIdx.y;
// for (ix = 0; ix<n->x; ix++) {
// for (iy = 0; iy<n->y; iy++) {
// Do a tridiagonal solve in the vertical direction
// STEP 1: Calculate modified coefficients
c[0] = (-omega2*lambda2*hz_inv2)/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
d[0] = b[GLINIDX(n, ix,iy,0)]/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
for (iz = 1; iz<n->z; iz++) {
c[iz] = (-omega2*lambda2*hz_inv2)/( (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2) * c[iz-1]);
d[iz] = (b[GLINIDX(n, ix,iy,iz)]-(-omega2*lambda2*hz_inv2)*d[iz-1])/((delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2)*c[iz-1]);
}
// STEP 2: Back-substitution.
y[GLINIDX(n, ix,iy,n->z-1)] = d[n->z-1];
for (iz = n->z-2; iz>=0; iz--) {
y[GLINIDX(n, ix,iy,iz)]=d[iz] - c[iz]*y[GLINIDX(n, ix,iy,iz+1)];
}
// }
// }
}
int gpu_bj(const N *n, const REAL *dev_b, REAL *dev_y ){
N *dev_n;
hipMalloc((void**)&dev_n,sizeof(N));
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(n->x/BLOCK_SIZE,n->y/BLOCK_SIZE);
hipLaunchKernelGGL(( gpu_bj_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_n,dev_b,dev_y);
hipFree(dev_n);
return(0);
}
|
ca465f86c70edd0090d31efc2cc54ced4c524c3a.cu
|
#include<time.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include"CG.h"
/* *********************************************************** *
* Block-Jacobi Preconditioner
* y = M^{-1}.b
* *********************************************************** */
__global__ void gpu_bj_kernel(const struct N *n,const float* b,float* y) {
int ix, iy, iz;
// Grid spacings in all dirctions
float hx = 1./n->x;
float hy = 1./n->y;
float hz = 1./n->z;
float hx_inv2 = 1./(hx*hx);
float hy_inv2 = 1./(hy*hy);
float hz_inv2 = 1./(hz*hz);
//float *c,*d;
// Temporary arrays for Thomas algorithm
//cudaMalloc((void**)&c,n->z*sizeof(float));
//cudaMalloc((void**)&d,n->z*sizeof(float));
float c[1000],d[1000];
//float* c=malloc(n.z*sizeof(float));
//float* d=malloc(n.z*sizeof(float));
// Loop over number of relaxation steps
ix=blockIdx.x*BLOCK_SIZE+threadIdx.x;
iy=blockIdx.y*BLOCK_SIZE+threadIdx.y;
// for (ix = 0; ix<n->x; ix++) {
// for (iy = 0; iy<n->y; iy++) {
// Do a tridiagonal solve in the vertical direction
// STEP 1: Calculate modified coefficients
c[0] = (-omega2*lambda2*hz_inv2)/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
d[0] = b[GLINIDX(n, ix,iy,0)]/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
for (iz = 1; iz<n->z; iz++) {
c[iz] = (-omega2*lambda2*hz_inv2)/( (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2) * c[iz-1]);
d[iz] = (b[GLINIDX(n, ix,iy,iz)]-(-omega2*lambda2*hz_inv2)*d[iz-1])/((delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2)*c[iz-1]);
}
// STEP 2: Back-substitution.
y[GLINIDX(n, ix,iy,n->z-1)] = d[n->z-1];
for (iz = n->z-2; iz>=0; iz--) {
y[GLINIDX(n, ix,iy,iz)]=d[iz] - c[iz]*y[GLINIDX(n, ix,iy,iz+1)];
}
// }
// }
}
int gpu_bj(const N *n, const REAL *dev_b, REAL *dev_y ){
N *dev_n;
cudaMalloc((void**)&dev_n,sizeof(N));
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(n->x/BLOCK_SIZE,n->y/BLOCK_SIZE);
gpu_bj_kernel<<<dimGrid,dimBlock>>>(dev_n,dev_b,dev_y);
cudaFree(dev_n);
return(0);
}
|
4db20b8d3d6ee403af80037a019c9613045077fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_3(float *d_data_in, float *d_data_out, int data_size)
{
__shared__ float s_data[BLKSIZE];
int tid = threadIdx.x;
int index = tid + blockIdx.x*blockDim.x;
s_data[tid] = 0.0;
if (index < data_size){
s_data[tid] = d_data_in[index];
}
__syncthreads();
for (int s = blockDim.x/2; s >= 1; s = s >> 1){
if (tid<s){
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0){
d_data_out[blockIdx.x] = s_data[tid];
}
}
|
4db20b8d3d6ee403af80037a019c9613045077fa.cu
|
#include "includes.h"
__global__ void kernel_3(float *d_data_in, float *d_data_out, int data_size)
{
__shared__ float s_data[BLKSIZE];
int tid = threadIdx.x;
int index = tid + blockIdx.x*blockDim.x;
s_data[tid] = 0.0;
if (index < data_size){
s_data[tid] = d_data_in[index];
}
__syncthreads();
for (int s = blockDim.x/2; s >= 1; s = s >> 1){
if (tid<s){
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0){
d_data_out[blockIdx.x] = s_data[tid];
}
}
|
262c7190f1e6a28739a77c403dd468ade98bc3c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include "../include/NVStrings.h"
#include "../include/NVCategory.h"
#include "../include/ipc_transfer.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/ipc_test.cu -L. -lNVStrings -lNVCategory -o ipc_test --linker-options -rpath,.:
//
int strings_test( std::string& mode )
{
NVStrings* strs = 0;
if( mode.compare("client")==0 )
{
nvstrings_ipc_transfer ipc;
FILE* fh = fopen("ipctx.bin","rb");
fread(&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size);
strs = NVStrings::create_from_ipc(ipc);
strs->print();
printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() );
}
else
{
const char* hstrs[] = { "John Smith", "Joe Blow", "Jane Smith" };
strs = NVStrings::create_from_array(hstrs,3);
nvstrings_ipc_transfer ipc;
strs->create_ipc_transfer(ipc);
//printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size);
strs->print();
printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() );
FILE* fh = fopen("ipctx.bin","wb");
fwrite((void*)&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("Server ready. Press enter to terminate.\n");
std::cin.ignore();
// just checking
strs->print();
}
NVStrings::destroy(strs);
return 0;
}
int category_test( std::string& mode )
{
NVCategory* cat = 0;
if( mode.compare("client")==0 )
{
nvcategory_ipc_transfer ipc;
FILE* fh = fopen("ipctx.bin","rb");
fread(&ipc,1,sizeof(ipc),fh);
fclose(fh);
cat = NVCategory::create_from_ipc(ipc);
//printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size);
NVStrings* strs = cat->get_keys();
strs->print();
NVStrings::destroy(strs);
}
else
{
const char* hstrs[] = { "John", "Jane", "John", "Jane", "Bob" };
NVStrings* strs = NVStrings::create_from_array(hstrs,5);
cat = NVCategory::create_from_strings(*strs);
nvcategory_ipc_transfer ipc;
cat->create_ipc_transfer(ipc);
//printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size);
NVStrings::destroy(strs);
strs = cat->get_keys();
strs->print();
NVStrings::destroy(strs);
FILE* fh = fopen("ipctx.bin","wb");
fwrite((void*)&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("Server ready. Press enter to terminate.\n");
std::cin.ignore();
}
NVCategory::destroy(cat);
return 0;
}
int main( int argc, const char** argv )
{
if( argc < 2 )
{
printf("require parameter: 'server' or values for pointers\n");
return 0;
}
std::string mode = argv[1];
//strings_test(mode);
category_test(mode);
}
|
262c7190f1e6a28739a77c403dd468ade98bc3c7.cu
|
#include <stdio.h>
#include <string>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include "../include/NVStrings.h"
#include "../include/NVCategory.h"
#include "../include/ipc_transfer.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/ipc_test.cu -L. -lNVStrings -lNVCategory -o ipc_test --linker-options -rpath,.:
//
int strings_test( std::string& mode )
{
NVStrings* strs = 0;
if( mode.compare("client")==0 )
{
nvstrings_ipc_transfer ipc;
FILE* fh = fopen("ipctx.bin","rb");
fread(&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size);
strs = NVStrings::create_from_ipc(ipc);
strs->print();
printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() );
}
else
{
const char* hstrs[] = { "John Smith", "Joe Blow", "Jane Smith" };
strs = NVStrings::create_from_array(hstrs,3);
nvstrings_ipc_transfer ipc;
strs->create_ipc_transfer(ipc);
//printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size);
strs->print();
printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() );
FILE* fh = fopen("ipctx.bin","wb");
fwrite((void*)&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("Server ready. Press enter to terminate.\n");
std::cin.ignore();
// just checking
strs->print();
}
NVStrings::destroy(strs);
return 0;
}
int category_test( std::string& mode )
{
NVCategory* cat = 0;
if( mode.compare("client")==0 )
{
nvcategory_ipc_transfer ipc;
FILE* fh = fopen("ipctx.bin","rb");
fread(&ipc,1,sizeof(ipc),fh);
fclose(fh);
cat = NVCategory::create_from_ipc(ipc);
//printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size);
NVStrings* strs = cat->get_keys();
strs->print();
NVStrings::destroy(strs);
}
else
{
const char* hstrs[] = { "John", "Jane", "John", "Jane", "Bob" };
NVStrings* strs = NVStrings::create_from_array(hstrs,5);
cat = NVCategory::create_from_strings(*strs);
nvcategory_ipc_transfer ipc;
cat->create_ipc_transfer(ipc);
//printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size);
NVStrings::destroy(strs);
strs = cat->get_keys();
strs->print();
NVStrings::destroy(strs);
FILE* fh = fopen("ipctx.bin","wb");
fwrite((void*)&ipc,1,sizeof(ipc),fh);
fclose(fh);
printf("Server ready. Press enter to terminate.\n");
std::cin.ignore();
}
NVCategory::destroy(cat);
return 0;
}
int main( int argc, const char** argv )
{
if( argc < 2 )
{
printf("require parameter: 'server' or values for pointers\n");
return 0;
}
std::string mode = argv[1];
//strings_test(mode);
category_test(mode);
}
|
c29f51e1c82752e116d3df1959cf237016ab7e57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "custom_cuda_layers.h"
inline __device__ float gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
}
inline __device__ float d_gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
float x2mul = x * x * mul_param;
float tan_h = tanhf(sqrt_param * (x + x * x2mul));
float dg1 = 0.5f * (1.0f + tan_h);
float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h);
float dg3 = dg2 * 3 * x2mul;
return (dg1 + dg2 + dg3);
}
/*
Fused bias add with GELU
Loads a vector of 4 elements each iteration, for stride
iterations. It was written with the intention to launch 256 thread
threadblocks, so to launch for bert-large, we would set ITERATIONS
to 4. This is currently done automatically as a heuristic, setting
the number of iterations as blocks of 1024.
For FP16, the values are loaded from memory as __half, but converted
to FP32 for the arithmetic itself, to prevent numerous overflow on
the intermediate hyperbolic tangent, since there's no intrinsic
that computes it directly.
*/
__global__ void gelu_kernel(const float* input, float* vals, int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float4* input_cast = reinterpret_cast<const float4*>(input);
float4* vals_cast = reinterpret_cast<float4*>(vals);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 data = input_cast[row * row_stride + i * loop_stride + id];
data.x = gelu(data.x);
data.y = gelu(data.y);
data.z = gelu(data.z);
data.w = gelu(data.w);
vals_cast[row * row_stride + i * loop_stride + id] = data;
}
}
}
__global__ void gelu_kernel(const __half* input, __half* vals, int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float2* input_cast = reinterpret_cast<const float2*>(input);
float2* vals_cast = reinterpret_cast<float2*>(vals);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
float2 low_data = __half22float2(vals_half[0]);
float2 high_data = __half22float2(vals_half[1]);
low_data.x = gelu(low_data.x);
low_data.y = gelu(low_data.y);
high_data.x = gelu(high_data.x);
high_data.y = gelu(high_data.y);
vals_half[0] = __float22half2_rn(low_data);
vals_half[1] = __float22half2_rn(high_data);
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
}
}
#endif
}
__global__ void fused_bias_gelu(const float* input,
const float* bias,
float* vals,
int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float4* input_cast = reinterpret_cast<const float4*>(input);
float4* vals_cast = reinterpret_cast<float4*>(vals);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 data = input_cast[row * row_stride + i * loop_stride + id];
float4 bias_data = bias_cast[i * loop_stride + id];
data.x += bias_data.x;
data.y += bias_data.y;
data.z += bias_data.z;
data.w += bias_data.w;
data.x = gelu(data.x);
data.y = gelu(data.y);
data.z = gelu(data.z);
data.w = gelu(data.w);
vals_cast[row * row_stride + i * loop_stride + id] = data;
}
}
}
__global__ void fused_bias_gelu(const __half* input,
const __half* bias,
__half* vals,
int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float2* input_cast = reinterpret_cast<const float2*>(input);
float2* vals_cast = reinterpret_cast<float2*>(vals);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
float2 bias_vec = bias_cast[i * loop_stride + id];
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
float2 low_data = __half22float2(vals_half[0]);
float2 high_data = __half22float2(vals_half[1]);
float2 low_bias = __half22float2(bias_half[0]);
float2 high_bias = __half22float2(bias_half[1]);
low_data.x += low_bias.x;
low_data.y += low_bias.y;
high_data.x += high_bias.x;
high_data.y += high_bias.y;
low_data.x = gelu(low_data.x);
low_data.y = gelu(low_data.y);
high_data.x = gelu(high_data.x);
high_data.y = gelu(high_data.y);
vals_half[0] = __float22half2_rn(low_data);
vals_half[1] = __float22half2_rn(high_data);
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
}
}
#endif
}
__global__ void d_gelu_func(float* d_output,
const float* gelu_input,
const float* bias,
int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
float4* d_output_cast = reinterpret_cast<float4*>(d_output);
const float4* gelu_input_cast = reinterpret_cast<const float4*>(gelu_input);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
float4 bias_data = bias_cast[i * loop_stride + id];
gelu_input_data.x += bias_data.x;
gelu_input_data.y += bias_data.y;
gelu_input_data.z += bias_data.z;
gelu_input_data.w += bias_data.w;
output_data.x *= d_gelu(gelu_input_data.x);
output_data.y *= d_gelu(gelu_input_data.y);
output_data.z *= d_gelu(gelu_input_data.z);
output_data.w *= d_gelu(gelu_input_data.w);
d_output_cast[row * row_stride + i * loop_stride + id] = output_data;
}
}
}
__global__ void d_gelu_func(__half* d_output,
const __half* gelu_input,
const __half* bias,
int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
float2* d_output_cast = reinterpret_cast<float2*>(d_output);
const float2* gelu_input_cast = reinterpret_cast<const float2*>(gelu_input);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
#pragma unroll
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
float2 bias_vec = bias_cast[i * loop_stride + id];
__half2* output_data_half = reinterpret_cast<__half2*>(&output_data);
__half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data);
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
float2 output_half_0 = __half22float2(output_data_half[0]);
float2 output_half_1 = __half22float2(output_data_half[1]);
float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]);
float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]);
float2 bias_half_0 = __half22float2(bias_half[0]);
float2 bias_half_1 = __half22float2(bias_half[1]);
gelu_input_half_0.x += bias_half_0.x;
gelu_input_half_0.y += bias_half_0.y;
gelu_input_half_1.x += bias_half_1.x;
gelu_input_half_1.y += bias_half_1.y;
output_half_0.x *= d_gelu(gelu_input_half_0.x);
output_half_0.y *= d_gelu(gelu_input_half_0.y);
output_half_1.x *= d_gelu(gelu_input_half_1.x);
output_half_1.y *= d_gelu(gelu_input_half_1.y);
float2 result;
__half2* result_half2 = reinterpret_cast<__half2*>(&result);
result_half2[0] = __float22half2_rn(output_half_0);
result_half2[1] = __float22half2_rn(output_half_1);
d_output_cast[row * row_stride + i * loop_stride + id] = result;
}
}
#endif
}
template <typename T>
void launch_bias_gelu(const T* input,
const T* bias,
T* output,
int intermediate_size,
int batch_size,
int sequence_length,
hipStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, output, intermediate_size);
}
template <typename T>
void launch_gelu(const T* input,
T* output,
int intermediate_size,
int batch_size,
int sequence_length,
hipStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
hipLaunchKernelGGL(( gelu_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, input, output, intermediate_size);
}
template void
launch_bias_gelu<float>(const float*, const float*, float*, int, int, int, hipStream_t);
template void
launch_bias_gelu<__half>(const __half*, const __half*, __half*, int, int, int, hipStream_t);
template void launch_gelu<float>(const float*, float*, int, int, int, hipStream_t);
template void launch_gelu<__half>(const __half*, __half*, int, int, int, hipStream_t);
template <typename T>
void launch_d_gelu(T* d_output,
const T* input,
const T* bias,
int intermediate_size,
int batch_size,
int sequence_length,
hipStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
hipLaunchKernelGGL(( d_gelu_func), dim3(grid_dims), dim3(block_dims), 0, stream, d_output, input, bias, intermediate_size);
}
template void launch_d_gelu<float>(float*, const float*, const float*, int, int, int, hipStream_t);
template void
launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, int, hipStream_t);
|
c29f51e1c82752e116d3df1959cf237016ab7e57.cu
|
#include "custom_cuda_layers.h"
inline __device__ float gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
}
inline __device__ float d_gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
float x2mul = x * x * mul_param;
float tan_h = tanhf(sqrt_param * (x + x * x2mul));
float dg1 = 0.5f * (1.0f + tan_h);
float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h);
float dg3 = dg2 * 3 * x2mul;
return (dg1 + dg2 + dg3);
}
/*
Fused bias add with GELU
Loads a vector of 4 elements each iteration, for stride
iterations. It was written with the intention to launch 256 thread
threadblocks, so to launch for bert-large, we would set ITERATIONS
to 4. This is currently done automatically as a heuristic, setting
the number of iterations as blocks of 1024.
For FP16, the values are loaded from memory as __half, but converted
to FP32 for the arithmetic itself, to prevent numerous overflow on
the intermediate hyperbolic tangent, since there's no intrinsic
that computes it directly.
*/
__global__ void gelu_kernel(const float* input, float* vals, int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float4* input_cast = reinterpret_cast<const float4*>(input);
float4* vals_cast = reinterpret_cast<float4*>(vals);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 data = input_cast[row * row_stride + i * loop_stride + id];
data.x = gelu(data.x);
data.y = gelu(data.y);
data.z = gelu(data.z);
data.w = gelu(data.w);
vals_cast[row * row_stride + i * loop_stride + id] = data;
}
}
}
__global__ void gelu_kernel(const __half* input, __half* vals, int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float2* input_cast = reinterpret_cast<const float2*>(input);
float2* vals_cast = reinterpret_cast<float2*>(vals);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
float2 low_data = __half22float2(vals_half[0]);
float2 high_data = __half22float2(vals_half[1]);
low_data.x = gelu(low_data.x);
low_data.y = gelu(low_data.y);
high_data.x = gelu(high_data.x);
high_data.y = gelu(high_data.y);
vals_half[0] = __float22half2_rn(low_data);
vals_half[1] = __float22half2_rn(high_data);
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
}
}
#endif
}
__global__ void fused_bias_gelu(const float* input,
const float* bias,
float* vals,
int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float4* input_cast = reinterpret_cast<const float4*>(input);
float4* vals_cast = reinterpret_cast<float4*>(vals);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 data = input_cast[row * row_stride + i * loop_stride + id];
float4 bias_data = bias_cast[i * loop_stride + id];
data.x += bias_data.x;
data.y += bias_data.y;
data.z += bias_data.z;
data.w += bias_data.w;
data.x = gelu(data.x);
data.y = gelu(data.y);
data.z = gelu(data.z);
data.w = gelu(data.w);
vals_cast[row * row_stride + i * loop_stride + id] = data;
}
}
}
__global__ void fused_bias_gelu(const __half* input,
const __half* bias,
__half* vals,
int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
const float2* input_cast = reinterpret_cast<const float2*>(input);
float2* vals_cast = reinterpret_cast<float2*>(vals);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
float2 bias_vec = bias_cast[i * loop_stride + id];
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
float2 low_data = __half22float2(vals_half[0]);
float2 high_data = __half22float2(vals_half[1]);
float2 low_bias = __half22float2(bias_half[0]);
float2 high_bias = __half22float2(bias_half[1]);
low_data.x += low_bias.x;
low_data.y += low_bias.y;
high_data.x += high_bias.x;
high_data.y += high_bias.y;
low_data.x = gelu(low_data.x);
low_data.y = gelu(low_data.y);
high_data.x = gelu(high_data.x);
high_data.y = gelu(high_data.y);
vals_half[0] = __float22half2_rn(low_data);
vals_half[1] = __float22half2_rn(high_data);
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
}
}
#endif
}
__global__ void d_gelu_func(float* d_output,
const float* gelu_input,
const float* bias,
int intermediate_size)
{
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
float4* d_output_cast = reinterpret_cast<float4*>(d_output);
const float4* gelu_input_cast = reinterpret_cast<const float4*>(gelu_input);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
float4 bias_data = bias_cast[i * loop_stride + id];
gelu_input_data.x += bias_data.x;
gelu_input_data.y += bias_data.y;
gelu_input_data.z += bias_data.z;
gelu_input_data.w += bias_data.w;
output_data.x *= d_gelu(gelu_input_data.x);
output_data.y *= d_gelu(gelu_input_data.y);
output_data.z *= d_gelu(gelu_input_data.z);
output_data.w *= d_gelu(gelu_input_data.w);
d_output_cast[row * row_stride + i * loop_stride + id] = output_data;
}
}
}
__global__ void d_gelu_func(__half* d_output,
const __half* gelu_input,
const __half* bias,
int intermediate_size)
{
#if __CUDA_ARCH__ >= 700
int row = blockIdx.x;
int id = threadIdx.x;
int loop_stride = blockDim.x;
int iterations = intermediate_size / blockDim.x / 4;
int row_stride = intermediate_size / 4;
float2* d_output_cast = reinterpret_cast<float2*>(d_output);
const float2* gelu_input_cast = reinterpret_cast<const float2*>(gelu_input);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
#pragma unroll
for (int i = 0; i < iterations; i++) {
if (i * loop_stride + id < row_stride) {
float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
float2 bias_vec = bias_cast[i * loop_stride + id];
__half2* output_data_half = reinterpret_cast<__half2*>(&output_data);
__half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data);
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
float2 output_half_0 = __half22float2(output_data_half[0]);
float2 output_half_1 = __half22float2(output_data_half[1]);
float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]);
float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]);
float2 bias_half_0 = __half22float2(bias_half[0]);
float2 bias_half_1 = __half22float2(bias_half[1]);
gelu_input_half_0.x += bias_half_0.x;
gelu_input_half_0.y += bias_half_0.y;
gelu_input_half_1.x += bias_half_1.x;
gelu_input_half_1.y += bias_half_1.y;
output_half_0.x *= d_gelu(gelu_input_half_0.x);
output_half_0.y *= d_gelu(gelu_input_half_0.y);
output_half_1.x *= d_gelu(gelu_input_half_1.x);
output_half_1.y *= d_gelu(gelu_input_half_1.y);
float2 result;
__half2* result_half2 = reinterpret_cast<__half2*>(&result);
result_half2[0] = __float22half2_rn(output_half_0);
result_half2[1] = __float22half2_rn(output_half_1);
d_output_cast[row * row_stride + i * loop_stride + id] = result;
}
}
#endif
}
template <typename T>
void launch_bias_gelu(const T* input,
const T* bias,
T* output,
int intermediate_size,
int batch_size,
int sequence_length,
cudaStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>(input, bias, output, intermediate_size);
}
template <typename T>
void launch_gelu(const T* input,
T* output,
int intermediate_size,
int batch_size,
int sequence_length,
cudaStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
gelu_kernel<<<grid_dims, block_dims, 0, stream>>>(input, output, intermediate_size);
}
template void
launch_bias_gelu<float>(const float*, const float*, float*, int, int, int, cudaStream_t);
template void
launch_bias_gelu<__half>(const __half*, const __half*, __half*, int, int, int, cudaStream_t);
template void launch_gelu<float>(const float*, float*, int, int, int, cudaStream_t);
template void launch_gelu<__half>(const __half*, __half*, int, int, int, cudaStream_t);
template <typename T>
void launch_d_gelu(T* d_output,
const T* input,
const T* bias,
int intermediate_size,
int batch_size,
int sequence_length,
cudaStream_t stream)
{
int iterations = (intermediate_size + 1023) / 1024;
int threads = intermediate_size / iterations / 4;
dim3 block_dims(threads);
dim3 grid_dims(sequence_length * batch_size);
d_gelu_func<<<grid_dims, block_dims, 0, stream>>>(d_output, input, bias, intermediate_size);
}
template void launch_d_gelu<float>(float*, const float*, const float*, int, int, int, cudaStream_t);
template void
launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, int, cudaStream_t);
|
65c8e1c13438a04de7cf2fbfc2b457a8f64e0d33.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy_sym_out.cu, normal z -> c, Tue Aug 30 09:38:30 2016
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_out_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_out_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
CLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_clacpy_sym_out_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( clacpy_sym_out_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( clacpy_sym_out_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( clacpy_sym_out_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
}
|
65c8e1c13438a04de7cf2fbfc2b457a8f64e0d33.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy_sym_out.cu, normal z -> c, Tue Aug 30 09:38:30 2016
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_out_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_out_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
CLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_clacpy_sym_out_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
clacpy_sym_out_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
clacpy_sym_out_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
else {
clacpy_sym_out_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
}
|
a62aeadb720ce18b132f95fbe9ed9b5ed42c8615.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] > 0) ? 1 : -1;
}
|
a62aeadb720ce18b132f95fbe9ed9b5ed42c8615.cu
|
#include "includes.h"
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] > 0) ? 1 : -1;
}
|
f47fef66806f41943741378f3b87f39e0671dac6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 20-Dec-2011 15:33:39
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg4_maps[n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
|
f47fef66806f41943741378f3b87f39e0671dac6.cu
|
//
// auto-generated by op2.m on 20-Dec-2011 15:33:39
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg4_maps[n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
|
a6af707a2a478003de19179dd2ac4fdf36ad23f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Kernel for caculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial arary
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Device compute capability (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
hipError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size,
const unsigned int compute_capability)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// bind the texture on pre sm 35 arches
if (compute_capability < 350)
{
hipError_t error = hipBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != hipSuccess)
return error;
}
// run the kernel
hipLaunchKernelGGL(( gpu_compute_harmonic_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return hipSuccess;
}
|
a6af707a2a478003de19179dd2ac4fdf36ad23f1.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Kernel for caculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial arary
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Device compute capability (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
cudaError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size,
const unsigned int compute_capability)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// bind the texture on pre sm 35 arches
if (compute_capability < 350)
{
cudaError_t error = cudaBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != cudaSuccess)
return error;
}
// run the kernel
gpu_compute_harmonic_angle_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return cudaSuccess;
}
|
8420860d5249903bc7fa23b3d85c221fd8578272.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zcompact.cu normal z -> c, Tue Feb 9 16:05:45 2016
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
ccompact_kernel(
int m, int n,
magmaFloatComplex *dA,
int ldda,
float *dnorms,
float tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
ccompactactive_kernel(
int m,
int n,
magmaFloatComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms REAL array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccompact(
magma_int_t m,
magma_int_t n,
magmaFloatComplex_ptr dA,
magma_int_t ldda,
magmaFloat_ptr dnorms,
float tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( ccompact_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_ccompactActive(
magma_int_t m,
magma_int_t n,
magmaFloatComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( ccompactactive_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
8420860d5249903bc7fa23b3d85c221fd8578272.cu
|
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zcompact.cu normal z -> c, Tue Feb 9 16:05:45 2016
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
ccompact_kernel(
int m, int n,
magmaFloatComplex *dA,
int ldda,
float *dnorms,
float tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
ccompactactive_kernel(
int m,
int n,
magmaFloatComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms REAL array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccompact(
magma_int_t m,
magma_int_t n,
magmaFloatComplex_ptr dA,
magma_int_t ldda,
magmaFloat_ptr dnorms,
float tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
ccompact_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_ccompactActive(
magma_int_t m,
magma_int_t n,
magmaFloatComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
ccompactactive_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
54f8d07ffb5fb1b09cd8d913020843230e5f8e61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include <iostream>
#include <chrono>
#include <sstream>
const long N = 400000;
const int THREADS = 256;
const int BLOCKS = ::ceil(double(N)/double(THREADS));
void dot(int* a, int* b, int* sum) {
int tid = 0;
*sum = 0;
while (tid < N) {
*sum += a[tid]*b[tid];
tid++;
}
}
__global__
void dot_gpu(int* a, int* b, int* c) {
__shared__ float cache[THREADS];
float tmp_sum = 0;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < N) {
tmp_sum = a[tid]*b[tid];
tid += blockDim.x*gridDim.x;
}
int cacheIndex = threadIdx.x;
cache[cacheIndex] = tmp_sum;
// wait for all the threads
__syncthreads();
// Now use these threads to sum the elements in parallel
int i = THREADS/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
i = i/2;
__syncthreads();
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::milliseconds ms;
typedef std::chrono::duration<float> fsec;
void cpu_test() {
int* a = new int[N];
int* b = new int[N];
int* sum = new int;
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
printf("Starting CPU benchmark...\n");
auto t0 = Time::now();
dot(a,b,sum);
auto t1 = Time::now();
fsec fs = t1 - t0;
std::cout << "CPU took: " << fs.count() << " s\n";
delete[] a;
delete[] b;
delete sum;
}
void gpu_test() {
printf("Starting GPU benchmark...\n");
int* a = new int[N];
int* b = new int[N];
int* c = new int[BLOCKS];
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
auto t_init = Time::now();
int* dev_a, *dev_b, *dev_c;
HANDLE_ERROR(hipMalloc((void**)&dev_a, N*sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N*sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, BLOCKS*sizeof(int)));
HANDLE_ERROR(hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_c, c, BLOCKS*sizeof(int), hipMemcpyHostToDevice));
// COMPUTE PARTIAL SUM
auto t0 = Time::now();
hipLaunchKernelGGL(( dot_gpu), dim3(BLOCKS),dim3(THREADS), 0, 0, dev_a, dev_b, dev_c);
// TRANSFER
auto t1 = Time::now();
HANDLE_ERROR(hipMemcpy(c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
auto t2 = Time::now();
// FINAL SUM
int sum = 0;
for (int i = 0; i < BLOCKS; i++)
sum += c[i];
auto t3 = Time::now();
fsec fs_init = t0 - t_init;
fsec fs = t1 - t0;
fsec fs2 = t2 - t1;
fsec fs3 = t3 - t2;
printf("GPU took %f s (%f to load input onto GPU, %f to compute + %f to retrieve data from device + %f to finalize)", (fs + fs2 + fs3).count(), fs_init.count(), fs.count(), fs2.count(), fs3.count());
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
delete[] a;
delete[] b;
delete[] c;
}
int main(int argc, char** argv) {
std::cout << "----------- Dot product of vectors of size " << N << "-----------" << std::endl;
cpu_test();
printf("##################################\n\n");
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
for (int i = 0; i < count; i++) {
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
printf("Starting GPU benchmark on device %d with name: %s\n", i, prop.name);
hipSetDevice(i);
gpu_test();
printf("\n###############################\n");
}
return 0;
}
|
54f8d07ffb5fb1b09cd8d913020843230e5f8e61.cu
|
#include "../common/book.h"
#include <iostream>
#include <chrono>
#include <sstream>
const long N = 400000;
const int THREADS = 256;
const int BLOCKS = std::ceil(double(N)/double(THREADS));
void dot(int* a, int* b, int* sum) {
int tid = 0;
*sum = 0;
while (tid < N) {
*sum += a[tid]*b[tid];
tid++;
}
}
__global__
void dot_gpu(int* a, int* b, int* c) {
__shared__ float cache[THREADS];
float tmp_sum = 0;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < N) {
tmp_sum = a[tid]*b[tid];
tid += blockDim.x*gridDim.x;
}
int cacheIndex = threadIdx.x;
cache[cacheIndex] = tmp_sum;
// wait for all the threads
__syncthreads();
// Now use these threads to sum the elements in parallel
int i = THREADS/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
i = i/2;
__syncthreads();
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::milliseconds ms;
typedef std::chrono::duration<float> fsec;
void cpu_test() {
int* a = new int[N];
int* b = new int[N];
int* sum = new int;
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
printf("Starting CPU benchmark...\n");
auto t0 = Time::now();
dot(a,b,sum);
auto t1 = Time::now();
fsec fs = t1 - t0;
std::cout << "CPU took: " << fs.count() << " s\n";
delete[] a;
delete[] b;
delete sum;
}
void gpu_test() {
printf("Starting GPU benchmark...\n");
int* a = new int[N];
int* b = new int[N];
int* c = new int[BLOCKS];
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
auto t_init = Time::now();
int* dev_a, *dev_b, *dev_c;
HANDLE_ERROR(cudaMalloc((void**)&dev_a, N*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, BLOCKS*sizeof(int)));
HANDLE_ERROR(cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_c, c, BLOCKS*sizeof(int), cudaMemcpyHostToDevice));
// COMPUTE PARTIAL SUM
auto t0 = Time::now();
dot_gpu<<<BLOCKS,THREADS>>>(dev_a, dev_b, dev_c);
// TRANSFER
auto t1 = Time::now();
HANDLE_ERROR(cudaMemcpy(c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
auto t2 = Time::now();
// FINAL SUM
int sum = 0;
for (int i = 0; i < BLOCKS; i++)
sum += c[i];
auto t3 = Time::now();
fsec fs_init = t0 - t_init;
fsec fs = t1 - t0;
fsec fs2 = t2 - t1;
fsec fs3 = t3 - t2;
printf("GPU took %f s (%f to load input onto GPU, %f to compute + %f to retrieve data from device + %f to finalize)", (fs + fs2 + fs3).count(), fs_init.count(), fs.count(), fs2.count(), fs3.count());
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete[] a;
delete[] b;
delete[] c;
}
int main(int argc, char** argv) {
std::cout << "----------- Dot product of vectors of size " << N << "-----------" << std::endl;
cpu_test();
printf("##################################\n\n");
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++) {
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
printf("Starting GPU benchmark on device %d with name: %s\n", i, prop.name);
cudaSetDevice(i);
gpu_test();
printf("\n###############################\n");
}
return 0;
}
|
ebb3ad506cb0391c95c012893acef936de0c8f5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// hipMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
for (int i=0; i<partitions; i++) {
//
// TODO: copy input arrays to the GPU using hipMemcpy
//
//
// TODO: insert time here to begin timing only the kernel
//
// compute number of blocks and threads per block
// run saxpy_kernel on the GPU
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
// hipDeviceSynchronize();
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: copy result from GPU using hipMemcpy
//
}
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
//
// TODO free memory buffers on the GPU
//
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
ebb3ad506cb0391c95c012893acef936de0c8f5b.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// cudaMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
for (int i=0; i<partitions; i++) {
//
// TODO: copy input arrays to the GPU using cudaMemcpy
//
//
// TODO: insert time here to begin timing only the kernel
//
// compute number of blocks and threads per block
// run saxpy_kernel on the GPU
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
// cudaDeviceSynchronize();
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: copy result from GPU using cudaMemcpy
//
}
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
//
// TODO free memory buffers on the GPU
//
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
e01eab0eaf1c0cd27f70a8ccc1cfa74bc546e977.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
|
e01eab0eaf1c0cd27f70a8ccc1cfa74bc546e977.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
|
ee8a11b49d2f218f2c9d0d34b95f0919cc4e7798.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * TILE_WIDTH + ty,
Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int m = 0; m < (numAColumns - 1) / TILE_WIDTH + 1; ++m) {
if (Row < numARows && m*TILE_WIDTH + tx < numAColumns)
ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if (Col < numBColumns && m*TILE_WIDTH + ty < numBRows)
ds_N[ty][tx] = B[(m*TILE_WIDTH + ty)*numBColumns + Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < numCRows && Col < numCColumns)
C[Row*numCColumns + Col] = Pvalue;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(sizeof(float)* numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc(&deviceA, sizeof(float)* numARows * numAColumns);
hipMalloc(&deviceB, sizeof(float)* numBRows * numBColumns);
hipMalloc(&deviceC, sizeof(float)* numCRows * numCColumns);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA, hostA, sizeof(float)* numARows * numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(float)* numBRows * numBColumns, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid((numCColumns - 1) / TILE_WIDTH + 1, (numCRows - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, sizeof(float)* numCRows * numCColumns, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
ee8a11b49d2f218f2c9d0d34b95f0919cc4e7798.cu
|
#include <wb.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * TILE_WIDTH + ty,
Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int m = 0; m < (numAColumns - 1) / TILE_WIDTH + 1; ++m) {
if (Row < numARows && m*TILE_WIDTH + tx < numAColumns)
ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if (Col < numBColumns && m*TILE_WIDTH + ty < numBRows)
ds_N[ty][tx] = B[(m*TILE_WIDTH + ty)*numBColumns + Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < numCRows && Col < numCColumns)
C[Row*numCColumns + Col] = Pvalue;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(sizeof(float)* numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc(&deviceA, sizeof(float)* numARows * numAColumns);
cudaMalloc(&deviceB, sizeof(float)* numBRows * numBColumns);
cudaMalloc(&deviceC, sizeof(float)* numCRows * numCColumns);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA, sizeof(float)* numARows * numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float)* numBRows * numBColumns, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid((numCColumns - 1) / TILE_WIDTH + 1, (numCRows - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, sizeof(float)* numCRows * numCColumns, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
fae83f92de965f8b522c028c8e8ce4b08f5af28b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(float _x, float _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(box[4]), angle_sin = sin(box[4]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
//#ifdef DEBUG
// printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]);
// printf("center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, %.3f)\n", center_x, center_y,
// angle_cos, angle_sin, p.x, p.y, (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x, rot_y);
//#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = -box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = -box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
//#ifdef DEBUG
// printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
// b_x1, b_y1, b_x2, b_y2, b_angle);
// printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
//#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
//#ifdef DEBUG
// printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
//#ifdef DEBUG
// printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
//#ifdef DEBUG
// printf("cnt=%d\n", cnt);
// for (int i = 0; i < cnt; i++){
// printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
// }
//#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
// iou_bev => iou3d
__device__ inline float iou3d(const float *box_a, const float *box_b, const float* box_a_z, const float* box_b_z, bool ignore_height){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
// params: box_a_z (2) [z1min, z1max]
// params: box_b_z (2) [z2min, z2max]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
// compute the height
float intersection_height = fminf(box_a_z[1], box_b_z[1]) - fmaxf(box_a_z[0], box_b_z[0]);
//compute the volume
if (!ignore_height) {
float vol_a = sa * (box_a_z[1] - box_a_z[0]);
float vol_b = sb * (box_b_z[1] - box_b_z[0]);
float vol_overlap = s_overlap * intersection_height;
//#ifdef DEBUG
// printf("sa, sb, s_overlap, vol_a, vol_b, vol_overlap: (%.3f, %.3f, %.3f, %.3f, %.3f, %.3f)\n", sa, sb, s_overlap, vol_a, vol_b, vol_overlap);
//#endif
return vol_overlap / fmaxf(vol_a + vol_b - vol_overlap, EPS);
} else {
return s_overlap / fmaxf(sa + sb, EPS);
}
}
__global__ void boxes_iou_3d_kernel(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
// params: num_a : number of boxes in boxes_a
// params: boxes_a (M, 7) [x, y, z, w, l, h, angle]
// params: num_b : number of boxes in boxes_b
// params: boxes_b (N, 7) [x, y, z, w, l, h, angle]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float box_a_tmp[5];
float box_b_tmp[5];
float box_a_z_tmp[2];
float box_b_z_tmp[2];
// [x, y, z, w, l ,h, r]
box_a_tmp[0] = cur_box_a[3] - cur_box_a[0] / 2; // x1,
box_a_tmp[1] = cur_box_a[4] - cur_box_a[1] / 2; // y1
box_a_tmp[2] = cur_box_a[3] + cur_box_a[0] / 2; // x2
box_a_tmp[3] = cur_box_a[4] + cur_box_a[1] / 2; // y2
box_a_tmp[4] = cur_box_a[6]; // ry
box_a_z_tmp[0] = cur_box_a[5] - cur_box_a[2] / 2; // z1min
box_a_z_tmp[1] = cur_box_a[5] + cur_box_a[2] / 2; // z1max
box_b_tmp[0] = cur_box_b[3] - cur_box_b[0] / 2; // x1,
box_b_tmp[1] = cur_box_b[4] - cur_box_b[1] / 2; // y1
box_b_tmp[2] = cur_box_b[3] + cur_box_b[0] / 2; // x2
box_b_tmp[3] = cur_box_b[4] + cur_box_b[1] / 2; // y2
box_b_tmp[4] = cur_box_b[6]; // ry
box_b_z_tmp[0] = cur_box_b[5] - cur_box_b[2] / 2; // z1min
box_b_z_tmp[1] = cur_box_b[5] + cur_box_b[2] / 2; // z1max
float cur_iou_3d = iou3d( &box_a_tmp[0], &box_b_tmp[0], &box_a_z_tmp[0], &box_b_z_tmp[0], ignore_height);
ans_iou[a_idx * num_b + b_idx] = cur_iou_3d;
}
__global__ void nms3d_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, w, l ,h, ry] z-up coordinate system
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
float cur_box_tmp[5];
float cur_box_z_tmp[2];
cur_box_tmp[0] = cur_box[3] - cur_box[0] / 2; // x1,
cur_box_tmp[1] = cur_box[4] - cur_box[1] / 2; // y1
cur_box_tmp[2] = cur_box[3] + cur_box[0] / 2; // x2
cur_box_tmp[3] = cur_box[4] + cur_box[1] / 2; // y2
cur_box_tmp[4] = cur_box[6]; // ry
cur_box_z_tmp[0] = cur_box[5] - cur_box[2] / 2; // z1min
cur_box_z_tmp[1] = cur_box[5] + cur_box[2] / 2; // z1max
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
const float *block_box_ptr = block_boxes + i * 7;
float block_box_tmp[5];
float block_box_z_tmp[2];
block_box_tmp[0] = block_box_ptr[3] - block_box_ptr[0] / 2; // x1,
block_box_tmp[1] = block_box_ptr[4] - block_box_ptr[1] / 2; // y1
block_box_tmp[2] = block_box_ptr[3] + block_box_ptr[0] / 2; // x2
block_box_tmp[3] = block_box_ptr[4] + block_box_ptr[1] / 2; // y2
block_box_tmp[4] = block_box_ptr[6]; // ry
block_box_z_tmp[0] = block_box_ptr[5] - block_box_ptr[2] / 2; // z1min
block_box_z_tmp[1] = block_box_ptr[5] + block_box_ptr[2] / 2; // z1max
float cur_iou_3d = iou3d(&cur_box_tmp[0], &block_box_tmp[0], &cur_box_z_tmp[0], &block_box_z_tmp[0], false);
if (cur_iou_3d > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesIouGPUKernelLauncher(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); // (256, 256)
hipLaunchKernelGGL(( boxes_iou_3d_kernel), dim3(blocks), dim3(threads), 0, 0, ignore_height, num_a, boxes_a, num_b, boxes_b, ans_iou);
}
void nms3dGPUKernelLauncher(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long * mask ){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms3d_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask);
}
|
fae83f92de965f8b522c028c8e8ce4b08f5af28b.cu
|
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(float _x, float _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(box[4]), angle_sin = sin(box[4]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
//#ifdef DEBUG
// printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]);
// printf("center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, %.3f)\n", center_x, center_y,
// angle_cos, angle_sin, p.x, p.y, (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x, rot_y);
//#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = -box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = -box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
//#ifdef DEBUG
// printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
// b_x1, b_y1, b_x2, b_y2, b_angle);
// printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
//#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
//#ifdef DEBUG
// printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
//#ifdef DEBUG
// printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
//#ifdef DEBUG
// printf("cnt=%d\n", cnt);
// for (int i = 0; i < cnt; i++){
// printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
// }
//#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
// iou_bev => iou3d
__device__ inline float iou3d(const float *box_a, const float *box_b, const float* box_a_z, const float* box_b_z, bool ignore_height){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
// params: box_a_z (2) [z1min, z1max]
// params: box_b_z (2) [z2min, z2max]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
// compute the height
float intersection_height = fminf(box_a_z[1], box_b_z[1]) - fmaxf(box_a_z[0], box_b_z[0]);
//compute the volume
if (!ignore_height) {
float vol_a = sa * (box_a_z[1] - box_a_z[0]);
float vol_b = sb * (box_b_z[1] - box_b_z[0]);
float vol_overlap = s_overlap * intersection_height;
//#ifdef DEBUG
// printf("sa, sb, s_overlap, vol_a, vol_b, vol_overlap: (%.3f, %.3f, %.3f, %.3f, %.3f, %.3f)\n", sa, sb, s_overlap, vol_a, vol_b, vol_overlap);
//#endif
return vol_overlap / fmaxf(vol_a + vol_b - vol_overlap, EPS);
} else {
return s_overlap / fmaxf(sa + sb, EPS);
}
}
__global__ void boxes_iou_3d_kernel(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
// params: num_a : number of boxes in boxes_a
// params: boxes_a (M, 7) [x, y, z, w, l, h, angle]
// params: num_b : number of boxes in boxes_b
// params: boxes_b (N, 7) [x, y, z, w, l, h, angle]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float box_a_tmp[5];
float box_b_tmp[5];
float box_a_z_tmp[2];
float box_b_z_tmp[2];
// [x, y, z, w, l ,h, r]
box_a_tmp[0] = cur_box_a[3] - cur_box_a[0] / 2; // x1,
box_a_tmp[1] = cur_box_a[4] - cur_box_a[1] / 2; // y1
box_a_tmp[2] = cur_box_a[3] + cur_box_a[0] / 2; // x2
box_a_tmp[3] = cur_box_a[4] + cur_box_a[1] / 2; // y2
box_a_tmp[4] = cur_box_a[6]; // ry
box_a_z_tmp[0] = cur_box_a[5] - cur_box_a[2] / 2; // z1min
box_a_z_tmp[1] = cur_box_a[5] + cur_box_a[2] / 2; // z1max
box_b_tmp[0] = cur_box_b[3] - cur_box_b[0] / 2; // x1,
box_b_tmp[1] = cur_box_b[4] - cur_box_b[1] / 2; // y1
box_b_tmp[2] = cur_box_b[3] + cur_box_b[0] / 2; // x2
box_b_tmp[3] = cur_box_b[4] + cur_box_b[1] / 2; // y2
box_b_tmp[4] = cur_box_b[6]; // ry
box_b_z_tmp[0] = cur_box_b[5] - cur_box_b[2] / 2; // z1min
box_b_z_tmp[1] = cur_box_b[5] + cur_box_b[2] / 2; // z1max
float cur_iou_3d = iou3d( &box_a_tmp[0], &box_b_tmp[0], &box_a_z_tmp[0], &box_b_z_tmp[0], ignore_height);
ans_iou[a_idx * num_b + b_idx] = cur_iou_3d;
}
__global__ void nms3d_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, w, l ,h, ry] z-up coordinate system
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
float cur_box_tmp[5];
float cur_box_z_tmp[2];
cur_box_tmp[0] = cur_box[3] - cur_box[0] / 2; // x1,
cur_box_tmp[1] = cur_box[4] - cur_box[1] / 2; // y1
cur_box_tmp[2] = cur_box[3] + cur_box[0] / 2; // x2
cur_box_tmp[3] = cur_box[4] + cur_box[1] / 2; // y2
cur_box_tmp[4] = cur_box[6]; // ry
cur_box_z_tmp[0] = cur_box[5] - cur_box[2] / 2; // z1min
cur_box_z_tmp[1] = cur_box[5] + cur_box[2] / 2; // z1max
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
const float *block_box_ptr = block_boxes + i * 7;
float block_box_tmp[5];
float block_box_z_tmp[2];
block_box_tmp[0] = block_box_ptr[3] - block_box_ptr[0] / 2; // x1,
block_box_tmp[1] = block_box_ptr[4] - block_box_ptr[1] / 2; // y1
block_box_tmp[2] = block_box_ptr[3] + block_box_ptr[0] / 2; // x2
block_box_tmp[3] = block_box_ptr[4] + block_box_ptr[1] / 2; // y2
block_box_tmp[4] = block_box_ptr[6]; // ry
block_box_z_tmp[0] = block_box_ptr[5] - block_box_ptr[2] / 2; // z1min
block_box_z_tmp[1] = block_box_ptr[5] + block_box_ptr[2] / 2; // z1max
float cur_iou_3d = iou3d(&cur_box_tmp[0], &block_box_tmp[0], &cur_box_z_tmp[0], &block_box_z_tmp[0], false);
if (cur_iou_3d > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesIouGPUKernelLauncher(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); // (256, 256)
boxes_iou_3d_kernel<<<blocks, threads>>>(ignore_height, num_a, boxes_a, num_b, boxes_b, ans_iou);
}
void nms3dGPUKernelLauncher(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long * mask ){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms3d_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
|
d6bfb2b9c3c6faa05e5020ad40e6ed6175571798.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THH/THHGeneral.h>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
}} // namespace at::native
|
d6bfb2b9c3c6faa05e5020ad40e6ed6175571798.cu
|
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THC/THCGeneral.h>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
}} // namespace at::native
|
96643f603ebfffc6cea10ee59d1040302787e6fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at::native {
template <typename scalar_t, typename out_t=scalar_t>
void std_var_kernel_impl(TensorIterator& iter, int32_t correction, bool take_sqrt) {
// reducing unrolling factor to 2 for welford kernel
// This is necessary to lower register usage that leads to register spills.
using accscalar_t = at::acc_type<scalar_t, true>;
using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, float, thrust::pair<out_t, out_t>>;
gpu_reduce_kernel<scalar_t, out_t, 2>(
iter, ops_t{correction, take_sqrt}, typename ops_t::acc_t{});
}
static void std_var_kernel_cuda(TensorIterator& iter, int64_t correction, bool take_sqrt) {
using limits = std::numeric_limits<int32_t>;
TORCH_CHECK(
correction < limits::max() && correction > limits::min(),
"The correction argument for std and var computation on CUDA must "
"fit within a 32-bit integer, but got ", correction);
const auto input_dtype = iter.input_dtype();
if (input_dtype == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt);
} else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt);
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "std_cuda", [&]() {
std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt);
});
}
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
// returns acc_t for all non-complex dtypes and returns T for c10::complex<T>
using factor_t = typename c10::scalar_value_type<acc_t>::type;
factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
mean_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::Half, float, float>(iter);
} else if(iter.dtype() == kBFloat16) {
mean_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::BFloat16, float, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
} // namespace at::native
|
96643f603ebfffc6cea10ee59d1040302787e6fe.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at::native {
template <typename scalar_t, typename out_t=scalar_t>
void std_var_kernel_impl(TensorIterator& iter, int32_t correction, bool take_sqrt) {
// reducing unrolling factor to 2 for welford kernel
// This is necessary to lower register usage that leads to register spills.
using accscalar_t = at::acc_type<scalar_t, true>;
using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, float, thrust::pair<out_t, out_t>>;
gpu_reduce_kernel<scalar_t, out_t, 2>(
iter, ops_t{correction, take_sqrt}, typename ops_t::acc_t{});
}
static void std_var_kernel_cuda(TensorIterator& iter, int64_t correction, bool take_sqrt) {
using limits = std::numeric_limits<int32_t>;
TORCH_CHECK(
correction < limits::max() && correction > limits::min(),
"The correction argument for std and var computation on CUDA must "
"fit within a 32-bit integer, but got ", correction);
const auto input_dtype = iter.input_dtype();
if (input_dtype == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt);
} else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt);
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "std_cuda", [&]() {
std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt);
});
}
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
// returns acc_t for all non-complex dtypes and returns T for c10::complex<T>
using factor_t = typename c10::scalar_value_type<acc_t>::type;
factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
mean_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::Half, float, float>(iter);
} else if(iter.dtype() == kBFloat16) {
mean_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::BFloat16, float, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
} // namespace at::native
|
315574ec27c63f4046f756fddfb87d0177647203.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D texture with a moving red/green hatch pattern on a
* strobing blue background. Note that this kernel reads to and
* writes from the texture, hence why this texture was not mapped
* as WriteDiscard.
*/
__global__ void cuda_kernel_texture_2d(unsigned char* surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float* pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
pixel = (float*)(surface + y * pitch) + 4 * x;
// populate it
float value_x = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * x) / width - 1.0f));
float value_y = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * y) / height - 1.0f));
pixel[0] = 0.5 * pixel[0] + 0.5 * pow(value_x, 3.0f); // red
pixel[1] = 0.5 * pixel[1] + 0.5 * pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f * cos(t); // blue
pixel[3] = 1; // alpha
}
extern "C"
void cuda_texture_2d(void* surface, int width, int height, size_t pitch, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_2d << <Dg, Db >> > ((unsigned char*)surface, width, height, pitch, t);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("cuda_kernel_texture_2d() failed to launch error = %d\n", error);
}
}
/*
* Paint a 3D texture with a gradient in X (blue) and Z (green), and have every
* other Z slice have full red.
*/
__global__ void cuda_kernel_texture_3d(unsigned char* surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// walk across the Z slices of this texture. it should be noted that
// this is far from optimal data access.
for (int z = 0; z < depth; ++z)
{
// get a pointer to this pixel
unsigned char* pixel = surface + z * pitchSlice + y * pitch + 4 * x;
pixel[0] = (unsigned char)(255.f * (0.5f + 0.5f * cos(t + (x * x + y * y + z * z) * 0.0001f * 3.14f))); // red
pixel[1] = (unsigned char)(255.f * (0.5f + 0.5f * sin(t + (x * x + y * y + z * z) * 0.0001f * 3.14f))); // green
pixel[2] = (unsigned char)0; // blue
pixel[3] = 255; // alpha
}
}
extern "C"
void cuda_texture_3d(void* surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_3d << <Dg, Db >> > ((unsigned char*)surface, width, height, depth, pitch, pitchSlice, t);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("cuda_kernel_texture_3d() failed to launch error = %d\n", error);
}
}
/*
* Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
* between 6 different colors to use. We will use a different color on each face of a
* cube map.
*/
__global__ void cuda_kernel_texture_cube(char* surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned char* pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char*)(surface + y * pitch) + 4 * x;
// populate it
float theta_x = (2.0f * x) / width - 1.0f;
float theta_y = (2.0f * y) / height - 1.0f;
float theta = 2.0f * PI * sqrt(theta_x * theta_x + theta_y * theta_y);
unsigned char value = 255 * (0.6f + 0.4f * cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
}
extern "C"
void cuda_texture_cube(void* surface, int width, int height, size_t pitch, int face, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_cube << <Dg, Db >> > ((char*)surface, width, height, pitch, face, t);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("cuda_kernel_texture_cube() failed to launch error = %d\n", error);
}
}
|
315574ec27c63f4046f756fddfb87d0177647203.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D texture with a moving red/green hatch pattern on a
* strobing blue background. Note that this kernel reads to and
* writes from the texture, hence why this texture was not mapped
* as WriteDiscard.
*/
__global__ void cuda_kernel_texture_2d(unsigned char* surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float* pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
pixel = (float*)(surface + y * pitch) + 4 * x;
// populate it
float value_x = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * x) / width - 1.0f));
float value_y = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * y) / height - 1.0f));
pixel[0] = 0.5 * pixel[0] + 0.5 * pow(value_x, 3.0f); // red
pixel[1] = 0.5 * pixel[1] + 0.5 * pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f * cos(t); // blue
pixel[3] = 1; // alpha
}
extern "C"
void cuda_texture_2d(void* surface, int width, int height, size_t pitch, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_2d << <Dg, Db >> > ((unsigned char*)surface, width, height, pitch, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_2d() failed to launch error = %d\n", error);
}
}
/*
* Paint a 3D texture with a gradient in X (blue) and Z (green), and have every
* other Z slice have full red.
*/
__global__ void cuda_kernel_texture_3d(unsigned char* surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// walk across the Z slices of this texture. it should be noted that
// this is far from optimal data access.
for (int z = 0; z < depth; ++z)
{
// get a pointer to this pixel
unsigned char* pixel = surface + z * pitchSlice + y * pitch + 4 * x;
pixel[0] = (unsigned char)(255.f * (0.5f + 0.5f * cos(t + (x * x + y * y + z * z) * 0.0001f * 3.14f))); // red
pixel[1] = (unsigned char)(255.f * (0.5f + 0.5f * sin(t + (x * x + y * y + z * z) * 0.0001f * 3.14f))); // green
pixel[2] = (unsigned char)0; // blue
pixel[3] = 255; // alpha
}
}
extern "C"
void cuda_texture_3d(void* surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_3d << <Dg, Db >> > ((unsigned char*)surface, width, height, depth, pitch, pitchSlice, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_3d() failed to launch error = %d\n", error);
}
}
/*
* Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
* between 6 different colors to use. We will use a different color on each face of a
* cube map.
*/
__global__ void cuda_kernel_texture_cube(char* surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned char* pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char*)(surface + y * pitch) + 4 * x;
// populate it
float theta_x = (2.0f * x) / width - 1.0f;
float theta_y = (2.0f * y) / height - 1.0f;
float theta = 2.0f * PI * sqrt(theta_x * theta_x + theta_y * theta_y);
unsigned char value = 255 * (0.6f + 0.4f * cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
}
extern "C"
void cuda_texture_cube(void* surface, int width, int height, size_t pitch, int face, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_cube << <Dg, Db >> > ((char*)surface, width, height, pitch, face, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_cube() failed to launch error = %d\n", error);
}
}
|
4683a513ce36d09b9de77899f369cd9a91b4b451.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/adam_kernel.h"
#include <math.h> // for sqrt in CPU and CUDA
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1,
MT beta2,
MT epsilon,
MT beta1_pow_,
MT beta2_pow_,
const MT* moment1,
MT* moment1_out,
const MT* moment2,
MT* moment2_out,
const MT* lr_,
const T* grad,
const T* param,
T* param_out,
const MT* master_param,
MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1,
MT beta2,
MT epsilon,
const MT* beta1_pow_,
const MT* beta2_pow_,
const MT* moment1,
MT* moment1_out,
const MT* moment2,
MT* moment2_out,
const MT* lr_,
const T* grad,
const T* param,
T* param_out,
const MT* master_param,
MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename Context>
void AdamDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
// update param and moment
int threads = 512;
int blocks = (param.numel() + threads - 1) / threads;
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
// Compute with betapow in REG
hipLaunchKernelGGL(( AdamKernelREG<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad.data<T>(),
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
param.numel());
if (!use_global_beta_pow) {
// Cpu update
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
hipLaunchKernelGGL(( AdamKernelMEM<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad.data<T>(),
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
param.numel());
if (!use_global_beta_pow) {
// Update with gpu
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(1), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
template <typename T, typename Context>
void MergedAdamKernel(
const Context& dev_ctx,
const std::vector<const DenseTensor*>& param,
const std::vector<const DenseTensor*>& grad,
const std::vector<const DenseTensor*>& learning_rate,
const std::vector<const DenseTensor*>& moment1,
const std::vector<const DenseTensor*>& moment2,
const std::vector<const DenseTensor*>& beta1_pow,
const std::vector<const DenseTensor*>& beta2_pow,
const paddle::optional<std::vector<const DenseTensor*>>& master_param,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool multi_precision,
bool use_global_beta_pow,
std::vector<DenseTensor*> param_out,
std::vector<DenseTensor*> moment1_out,
std::vector<DenseTensor*> moment2_out,
std::vector<DenseTensor*> beta1_pow_out,
std::vector<DenseTensor*> beta2_pow_out,
std::vector<DenseTensor*> master_param_out) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
size_t param_num = param.size();
for (size_t idx = 0; idx < param_num; idx++) {
const MPDType* master_in_data =
multi_precision ? master_param.get()[idx]->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_out[idx])
: nullptr;
// update param and moment
int threads = 512;
int blocks = (param[idx]->numel() + threads - 1) / threads;
if (beta1_pow[idx]->place() == CPUPlace() &&
beta2_pow[idx]->place() == CPUPlace()) {
// Compute with betapow in REG
hipLaunchKernelGGL(( AdamKernelREG<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
*beta1_pow[idx]->data<MPDType>(),
*beta2_pow[idx]->data<MPDType>(),
moment1[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out[idx]),
moment2[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out[idx]),
learning_rate[idx]->data<MPDType>(),
grad[idx]->data<T>(),
param[idx]->data<T>(),
dev_ctx.template Alloc<T>(param_out[idx]),
master_in_data,
master_out_data,
param[idx]->numel());
if (!use_global_beta_pow) {
// Cpu update
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out[idx])[0] =
beta1_ * beta1_pow[idx]->data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out[idx])[0] =
beta2_ * beta2_pow[idx]->data<MPDType>()[0];
}
} else {
hipLaunchKernelGGL(( AdamKernelMEM<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
beta1_pow[idx]->data<MPDType>(),
beta2_pow[idx]->data<MPDType>(),
moment1[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out[idx]),
moment2[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out[idx]),
learning_rate[idx]->data<MPDType>(),
grad[idx]->data<T>(),
param[idx]->data<T>(),
dev_ctx.template Alloc<T>(param_out[idx]),
master_in_data,
master_out_data,
param[idx]->numel());
if (!use_global_beta_pow) {
// Update with gpu
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(1), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow[idx]->data<MPDType>(),
beta2_pow[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out[idx]),
dev_ctx.template Alloc<MPDType>(beta2_pow_out[idx]));
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(adam,
GPU,
ALL_LAYOUT,
phi::AdamDenseKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
PD_REGISTER_KERNEL(merged_adam,
GPU,
ALL_LAYOUT,
phi::MergedAdamKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
// Skip beta1_pow, beta2_pow data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
|
4683a513ce36d09b9de77899f369cd9a91b4b451.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/adam_kernel.h"
#include <math.h> // for sqrt in CPU and CUDA
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
template <typename T, typename MT>
__global__ void AdamKernelREG(MT beta1,
MT beta2,
MT epsilon,
MT beta1_pow_,
MT beta2_pow_,
const MT* moment1,
MT* moment1_out,
const MT* moment2,
MT* moment2_out,
const MT* lr_,
const T* grad,
const T* param,
T* param_out,
const MT* master_param,
MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = beta1_pow_;
MT beta2_pow = beta2_pow_;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T, typename MT>
__global__ void AdamKernelMEM(MT beta1,
MT beta2,
MT epsilon,
const MT* beta1_pow_,
const MT* beta2_pow_,
const MT* moment1,
MT* moment1_out,
const MT* moment2,
MT* moment2_out,
const MT* lr_,
const T* grad,
const T* param,
T* param_out,
const MT* master_param,
MT* master_param_out,
int ndim) {
MT lr = *lr_;
MT beta1_pow = *beta1_pow_;
MT beta2_pow = *beta2_pow_;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (; id < ndim; id += gridDim.x * blockDim.x) {
MT p = master_param ? master_param[id] : static_cast<MT>(param[id]);
MT g = static_cast<MT>(grad[id]);
MT mom1 = static_cast<MT>(moment1[id]);
MT mom2 = static_cast<MT>(moment2[id]);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
moment1_out[id] = mom1;
moment2_out[id] = mom2;
param_out[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename Context>
void AdamDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
// update param and moment
int threads = 512;
int blocks = (param.numel() + threads - 1) / threads;
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
// Compute with betapow in REG
AdamKernelREG<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad.data<T>(),
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
param.numel());
if (!use_global_beta_pow) {
// Cpu update
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
AdamKernelMEM<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad.data<T>(),
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
param.numel());
if (!use_global_beta_pow) {
// Update with gpu
UpdateBetaPow<MPDType><<<1, 1, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
template <typename T, typename Context>
void MergedAdamKernel(
const Context& dev_ctx,
const std::vector<const DenseTensor*>& param,
const std::vector<const DenseTensor*>& grad,
const std::vector<const DenseTensor*>& learning_rate,
const std::vector<const DenseTensor*>& moment1,
const std::vector<const DenseTensor*>& moment2,
const std::vector<const DenseTensor*>& beta1_pow,
const std::vector<const DenseTensor*>& beta2_pow,
const paddle::optional<std::vector<const DenseTensor*>>& master_param,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool multi_precision,
bool use_global_beta_pow,
std::vector<DenseTensor*> param_out,
std::vector<DenseTensor*> moment1_out,
std::vector<DenseTensor*> moment2_out,
std::vector<DenseTensor*> beta1_pow_out,
std::vector<DenseTensor*> beta2_pow_out,
std::vector<DenseTensor*> master_param_out) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
size_t param_num = param.size();
for (size_t idx = 0; idx < param_num; idx++) {
const MPDType* master_in_data =
multi_precision ? master_param.get()[idx]->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_out[idx])
: nullptr;
// update param and moment
int threads = 512;
int blocks = (param[idx]->numel() + threads - 1) / threads;
if (beta1_pow[idx]->place() == CPUPlace() &&
beta2_pow[idx]->place() == CPUPlace()) {
// Compute with betapow in REG
AdamKernelREG<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
*beta1_pow[idx]->data<MPDType>(),
*beta2_pow[idx]->data<MPDType>(),
moment1[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out[idx]),
moment2[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out[idx]),
learning_rate[idx]->data<MPDType>(),
grad[idx]->data<T>(),
param[idx]->data<T>(),
dev_ctx.template Alloc<T>(param_out[idx]),
master_in_data,
master_out_data,
param[idx]->numel());
if (!use_global_beta_pow) {
// Cpu update
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out[idx])[0] =
beta1_ * beta1_pow[idx]->data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out[idx])[0] =
beta2_ * beta2_pow[idx]->data<MPDType>()[0];
}
} else {
AdamKernelMEM<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
beta1_pow[idx]->data<MPDType>(),
beta2_pow[idx]->data<MPDType>(),
moment1[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out[idx]),
moment2[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out[idx]),
learning_rate[idx]->data<MPDType>(),
grad[idx]->data<T>(),
param[idx]->data<T>(),
dev_ctx.template Alloc<T>(param_out[idx]),
master_in_data,
master_out_data,
param[idx]->numel());
if (!use_global_beta_pow) {
// Update with gpu
UpdateBetaPow<MPDType><<<1, 1, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow[idx]->data<MPDType>(),
beta2_pow[idx]->data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out[idx]),
dev_ctx.template Alloc<MPDType>(beta2_pow_out[idx]));
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(adam,
GPU,
ALL_LAYOUT,
phi::AdamDenseKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
PD_REGISTER_KERNEL(merged_adam,
GPU,
ALL_LAYOUT,
phi::MergedAdamKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
// Skip beta1_pow, beta2_pow data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
|
d6a9773eb663ca0c5d4f695d13b4d0a7e1f7767a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define FILENAME "./dblp-co-authors.txt"
#define NumAuthor 317080
#define DataLen 1049866
#define BlockSize 1024
#define GridSize int(DataLen/BlockSize) + 1
int dataset[DataLen * 2];// array to store the raw dataset
void dataset_read(int * dataset);
__global__ void dataset_parse(int * dataset, int * output);
int dataset_maxCoAuthor(int * output, int lenght);
void dataset_plot(int * output, int lenght, int max);
int main(int argc, char * argv[])
{
int output[NumAuthor] = { 0 };
int * cu_output;//array to store the co-authors number of each author
dataset_read(dataset);
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
float time;
int * cu_dataset;
hipEventRecord(start,0);
hipMalloc((void**)&cu_output, NumAuthor * sizeof(int) );
hipMalloc((void**)&cu_dataset, DataLen * 2 * sizeof(int));
hipMemcpy(cu_dataset, dataset, DataLen * 2 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cu_output, output, NumAuthor * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dataset_parse), dim3(GridSize), dim3(BlockSize), 0, 0, cu_dataset, cu_output);
hipDeviceSynchronize();
//hipEventSynchronize(stop);
//hipEventElapsedTime(&time, start, stop);
hipMemcpy(output, cu_output, NumAuthor * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventElapsedTime(&time, start, stop);
int max = dataset_maxCoAuthor(output, NumAuthor);
printf("Time elapsed: %f\n", time);
dataset_plot(output, NumAuthor, max);
return 0;
}
void dataset_read( int * dataset)
{
FILE * datafile;
datafile = fopen( FILENAME, "r");
char line[255];
while (true)
{
fscanf(datafile, "%s", line);
if (atoi(line) == 1)
{
dataset[0] = 1;
break;
}
}
for(int i = 1; i < 2 * DataLen; i++){
fscanf(datafile, "%d", &dataset[i]);
}
fclose(datafile);
}
__global__ void dataset_parse(int * dataset, int * output)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x;
if(indx < DataLen){
atomicAdd(&(output[dataset[2*indx]-1]), 1);
atomicAdd(&(output[dataset[2*indx+1]-1]), 1);
//if (dataset[2*indx]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx]-1, output[dataset[2*indx]-1]);
//if (dataset[2*indx+1]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx+ 1]-1, output[dataset[2*indx+1]-1]);
}
}
int dataset_maxCoAuthor(int * output, int lenght)
{
int max =0;
int max_num = 0;
int max_ind[1000] = { 0 };
//memset(max_ind, 0, 1000);
for(int i = 0; i < lenght; i++)
{
//printf("output:%d, %d", i, output[i]);
if(max < output[i])
{
// printf("Max right now:%d, %d\n", i, output[i]);
max = output[i];
max_num = 0;
memset(max_ind, 0, 1000);
max_ind[max_num] = i;
}
else if(max == output[i])
{
max_num++;
max_ind[max_num] = i;
}
//else{
//printf("max is:%d, %d\n", max, max_ind[0]);
//}
}
printf("The list of authors with most co-authors:\n");
for(int i = 0; i <= max_num; i++)
{
printf("Author: %6d has %6d co-authors.\n", max_ind[i] + 1, output[max_ind[i]]);
}
return output[max_ind[0]];
}
void dataset_plot(int * output, int lenght, int max)
{
//int* numCoAuthorList;
int* numCoAuthorList = (int*)malloc(max * sizeof(int));
memset(numCoAuthorList, 0, max);
for(int i = 0; i < lenght; i++)
{
if(output[i] <= max)
{
numCoAuthorList[output[i] - 1]++;
}
else{
printf("\nError in Finding MAX!!!\n");
}
}
/*
int total = 0;
for(int i =0; i< max; i++)
{ total += numCoAuthorList[i];
printf("%6d\t",numCoAuthorList[i]);
}
printf("Total author:%d", total);
*/
FILE *fp;
fp = fopen("./output.txt", "wb");
fwrite(numCoAuthorList, sizeof(int), max, fp);
fclose(fp);
}
|
d6a9773eb663ca0c5d4f695d13b4d0a7e1f7767a.cu
|
#include <stdlib.h>
#include <stdio.h>
#define FILENAME "./dblp-co-authors.txt"
#define NumAuthor 317080
#define DataLen 1049866
#define BlockSize 1024
#define GridSize int(DataLen/BlockSize) + 1
int dataset[DataLen * 2];// array to store the raw dataset
void dataset_read(int * dataset);
__global__ void dataset_parse(int * dataset, int * output);
int dataset_maxCoAuthor(int * output, int lenght);
void dataset_plot(int * output, int lenght, int max);
int main(int argc, char * argv[])
{
int output[NumAuthor] = { 0 };
int * cu_output;//array to store the co-authors number of each author
dataset_read(dataset);
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
float time;
int * cu_dataset;
cudaEventRecord(start,0);
cudaMalloc((void**)&cu_output, NumAuthor * sizeof(int) );
cudaMalloc((void**)&cu_dataset, DataLen * 2 * sizeof(int));
cudaMemcpy(cu_dataset, dataset, DataLen * 2 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cu_output, output, NumAuthor * sizeof(int), cudaMemcpyHostToDevice);
dataset_parse<<<GridSize, BlockSize>>>(cu_dataset, cu_output);
cudaDeviceSynchronize();
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(output, cu_output, NumAuthor * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventElapsedTime(&time, start, stop);
int max = dataset_maxCoAuthor(output, NumAuthor);
printf("Time elapsed: %f\n", time);
dataset_plot(output, NumAuthor, max);
return 0;
}
void dataset_read( int * dataset)
{
FILE * datafile;
datafile = fopen( FILENAME, "r");
char line[255];
while (true)
{
fscanf(datafile, "%s", line);
if (atoi(line) == 1)
{
dataset[0] = 1;
break;
}
}
for(int i = 1; i < 2 * DataLen; i++){
fscanf(datafile, "%d", &dataset[i]);
}
fclose(datafile);
}
__global__ void dataset_parse(int * dataset, int * output)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x;
if(indx < DataLen){
atomicAdd(&(output[dataset[2*indx]-1]), 1);
atomicAdd(&(output[dataset[2*indx+1]-1]), 1);
//if (dataset[2*indx]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx]-1, output[dataset[2*indx]-1]);
//if (dataset[2*indx+1]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx+ 1]-1, output[dataset[2*indx+1]-1]);
}
}
int dataset_maxCoAuthor(int * output, int lenght)
{
int max =0;
int max_num = 0;
int max_ind[1000] = { 0 };
//memset(max_ind, 0, 1000);
for(int i = 0; i < lenght; i++)
{
//printf("output:%d, %d", i, output[i]);
if(max < output[i])
{
// printf("Max right now:%d, %d\n", i, output[i]);
max = output[i];
max_num = 0;
memset(max_ind, 0, 1000);
max_ind[max_num] = i;
}
else if(max == output[i])
{
max_num++;
max_ind[max_num] = i;
}
//else{
//printf("max is:%d, %d\n", max, max_ind[0]);
//}
}
printf("The list of authors with most co-authors:\n");
for(int i = 0; i <= max_num; i++)
{
printf("Author: %6d has %6d co-authors.\n", max_ind[i] + 1, output[max_ind[i]]);
}
return output[max_ind[0]];
}
void dataset_plot(int * output, int lenght, int max)
{
//int* numCoAuthorList;
int* numCoAuthorList = (int*)malloc(max * sizeof(int));
memset(numCoAuthorList, 0, max);
for(int i = 0; i < lenght; i++)
{
if(output[i] <= max)
{
numCoAuthorList[output[i] - 1]++;
}
else{
printf("\nError in Finding MAX!!!\n");
}
}
/*
int total = 0;
for(int i =0; i< max; i++)
{ total += numCoAuthorList[i];
printf("%6d\t",numCoAuthorList[i]);
}
printf("Total author:%d", total);
*/
FILE *fp;
fp = fopen("./output.txt", "wb");
fwrite(numCoAuthorList, sizeof(int), max, fp);
fclose(fp);
}
|
374e44118f5e2030c1b7503db38d9da6a4d2196a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.hpp"
// host implementation of dot product
double dot_host(const double *x, const double* y, int n) {
double sum = 0;
for(auto i=0; i<n; ++i) {
sum += x[i]*y[i];
}
return sum;
}
// TODO implement dot product kernel
template <int THREADS>
__global__
void dot_gpu_kernel(const double *x, const double* y, double *result, int n) {
__shared__ double res[THREADS];
int i = threadIdx.x;
int gid = threadIdx.x + blockDim.x * blockIdx.x;
res[i] = gid < n ? x[gid] * y[gid] : 0;
int width = THREADS/2;
while (width > 1) {
__syncthreads();
if(i < width)
res[i] += res[i + width];
width /= 2;
}
if (i == 0)
atomicAdd(result, res[0]);
}
double dot_gpu(const double *x, const double* y, int n) {
static double* result = malloc_managed<double>(1);
// TODO call dot product kernel
const int t = 64;
*result = 0;
hipLaunchKernelGGL(( dot_gpu_kernel<t>), dim3((n + t - 1) / t), dim3(t), 0, 0, x, y, result, n);
hipDeviceSynchronize();
return *result;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 4);
size_t n = (1 << pow);
auto size_in_bytes = n * sizeof(double);
std::cout << "dot product CUDA of length n = " << n
<< " : " << size_in_bytes*1e-9 << "MB\n";
auto x_h = malloc_host<double>(n, 2.);
auto y_h = malloc_host<double>(n);
for(auto i=0; i<n; ++i) {
y_h[i] = rand()%10;
}
auto x_d = malloc_device<double>(n);
auto y_d = malloc_device<double>(n);
// copy initial conditions to device
copy_to_device<double>(x_h, x_d, n);
copy_to_device<double>(y_h, y_d, n);
auto result = dot_gpu(x_d, y_d, n);
auto expected = dot_host(x_h, y_h, n);
printf("expected %f got %f\n", (float)expected, (float)result);
return 0;
}
|
374e44118f5e2030c1b7503db38d9da6a4d2196a.cu
|
#include <iostream>
#include <cuda.h>
#include "util.hpp"
// host implementation of dot product
double dot_host(const double *x, const double* y, int n) {
double sum = 0;
for(auto i=0; i<n; ++i) {
sum += x[i]*y[i];
}
return sum;
}
// TODO implement dot product kernel
template <int THREADS>
__global__
void dot_gpu_kernel(const double *x, const double* y, double *result, int n) {
__shared__ double res[THREADS];
int i = threadIdx.x;
int gid = threadIdx.x + blockDim.x * blockIdx.x;
res[i] = gid < n ? x[gid] * y[gid] : 0;
int width = THREADS/2;
while (width > 1) {
__syncthreads();
if(i < width)
res[i] += res[i + width];
width /= 2;
}
if (i == 0)
atomicAdd(result, res[0]);
}
double dot_gpu(const double *x, const double* y, int n) {
static double* result = malloc_managed<double>(1);
// TODO call dot product kernel
const int t = 64;
*result = 0;
dot_gpu_kernel<t><<<(n + t - 1) / t, t>>>(x, y, result, n);
cudaDeviceSynchronize();
return *result;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 4);
size_t n = (1 << pow);
auto size_in_bytes = n * sizeof(double);
std::cout << "dot product CUDA of length n = " << n
<< " : " << size_in_bytes*1e-9 << "MB\n";
auto x_h = malloc_host<double>(n, 2.);
auto y_h = malloc_host<double>(n);
for(auto i=0; i<n; ++i) {
y_h[i] = rand()%10;
}
auto x_d = malloc_device<double>(n);
auto y_d = malloc_device<double>(n);
// copy initial conditions to device
copy_to_device<double>(x_h, x_d, n);
copy_to_device<double>(y_h, y_d, n);
auto result = dot_gpu(x_d, y_d, n);
auto expected = dot_host(x_h, y_h, n);
printf("expected %f got %f\n", (float)expected, (float)result);
return 0;
}
|
5c0ae06b7e2b2f24acb273c17e447983c6c88cbb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void matMul(int *matrixA, int *matrixB, int *matrixC, int matSize)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int k, sum = 0;
if(threadCol < matSize && threadRow < matSize)
{
for(k = 0 ; k < matSize ; k++)
sum += matrixA[threadRow*matSize+k]*matrixB[k*matSize+threadCol];
}
matrixC[threadRow*matSize+threadCol] = sum;
}
void printMatrix(int *matrix, int size, char * matrixName)
{
if(size > 10)
return;
int i = 0;
printf("Printing Matrix: %s\n", matrixName);
for( ; i < size * size ; i ++)
{
if(i % size == 0)
printf("\n");
printf("%-3d ", matrix[i]);
}
printf("\n\n");
}
void checkError(hipError_t error, char * function)
{
if(error != hipSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, hipGetErrorString(error));
exit(-1);
}
}
bool checkIfMatricesEqual(int * mat1, int * mat2, int matSize)
{
int i = 0;
for( ; i < matSize * matSize; i++)
if(mat1[i] != mat2[i]){
printf("values different for i: %d\n", i);
printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]);
return false;
}
return true;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
hipEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int matrixSize = 0;
size_t size; //variable to have the size of arrays on device
int *matA, *matB, *matC, *matCFromGPU; //matrices for host
int *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device
//initialize cuda timing variables
hipEventCreate(&hostStart);
hipEventCreate(&hostStop);
hipEventCreate(&deviceStart);
hipEventCreate(&deviceStop);
printf("Enter the size of the matrix: ");
scanf("%d", &matrixSize);
//calculate the size required on GPU
size = matrixSize * matrixSize * sizeof(int);
matA = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matB = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matC = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
for(int i = 0 ; i < matrixSize * matrixSize; i ++)
matA[i] = matB[i] = (i*2)%10;
printMatrix(matA, matrixSize, "Matrix A");
printMatrix(matB, matrixSize, "Matrix B");
printf("Multiplying matrices on CPU...\n");
hipEventRecord(hostStart, 0);
//matrix multiplication code goes here.
for(int i = 0 ; i < matrixSize ; i ++)
{
for(int j = 0 ; j < matrixSize ; j ++)
{
int sum = 0;
for(int k = 0 ; k < matrixSize ; k ++)
{
//printf("A index: %d, B index: %d\n", i*matrixSize + k, k*matrixSize + j);
sum += matA[i*matrixSize+k]*matB[k*matrixSize+j];
}
//printf("C index: %d\n", i*matrixSize+j);
matC[i*matrixSize+j] = sum;
}
}
hipEventRecord(hostStop, 0);
hipEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);
printMatrix(matC, matrixSize, "Summation Matrix");
//allocate memory on GPU
checkError(hipMalloc((void**)&gpuMatA, size), "Malloc for Matrix A");
checkError(hipMalloc((void**)&gpuMatB, size), "Malloc for Matrix B");
checkError(hipMalloc((void**)&gpuMatC, size), "Malloc for Matrix C");
//copy the matrix A and matrix B
checkError(hipMemcpy(gpuMatA, matA, size, hipMemcpyHostToDevice), "Matrix A Copy");
checkError(hipMemcpy(gpuMatB, matB, size, hipMemcpyHostToDevice), "Matrix B Copy");
bool done = false;
while(!done)
{
matCFromGPU = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Multiplying matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
hipEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
hipLaunchKernelGGL(( matMul), dim3(grid), dim3(blocks), 0, 0, gpuMatA, gpuMatB, gpuMatC, matrixSize);
hipEventRecord(deviceStop, 0);
hipEventSynchronize(deviceStop);
hipEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
//copy the result back into host memory
checkError(hipMemcpy(matCFromGPU, gpuMatC, size, hipMemcpyDeviceToHost), "Matrix C Copy from device to Host");
if(checkIfMatricesEqual(matC, matCFromGPU, matrixSize))
printf("Kernels correct!\n");
else
printf("Kernel logic wrong!\n");
printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
printMatrix(matCFromGPU, matrixSize, "Summation Matrix from GPU");
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(matCFromGPU);
}
free(matA);
free(matB);
free(matC);
hipEventDestroy(deviceStart);
hipEventDestroy(deviceStop);
hipEventDestroy(hostStart);
hipEventDestroy(hostStop);
return 0;
}
|
5c0ae06b7e2b2f24acb273c17e447983c6c88cbb.cu
|
#include<stdio.h>
#include<stdlib.h>
__global__ void matMul(int *matrixA, int *matrixB, int *matrixC, int matSize)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int k, sum = 0;
if(threadCol < matSize && threadRow < matSize)
{
for(k = 0 ; k < matSize ; k++)
sum += matrixA[threadRow*matSize+k]*matrixB[k*matSize+threadCol];
}
matrixC[threadRow*matSize+threadCol] = sum;
}
void printMatrix(int *matrix, int size, char * matrixName)
{
if(size > 10)
return;
int i = 0;
printf("Printing Matrix: %s\n", matrixName);
for( ; i < size * size ; i ++)
{
if(i % size == 0)
printf("\n");
printf("%-3d ", matrix[i]);
}
printf("\n\n");
}
void checkError(cudaError_t error, char * function)
{
if(error != cudaSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error));
exit(-1);
}
}
bool checkIfMatricesEqual(int * mat1, int * mat2, int matSize)
{
int i = 0;
for( ; i < matSize * matSize; i++)
if(mat1[i] != mat2[i]){
printf("values different for i: %d\n", i);
printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]);
return false;
}
return true;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int matrixSize = 0;
size_t size; //variable to have the size of arrays on device
int *matA, *matB, *matC, *matCFromGPU; //matrices for host
int *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device
//initialize cuda timing variables
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
printf("Enter the size of the matrix: ");
scanf("%d", &matrixSize);
//calculate the size required on GPU
size = matrixSize * matrixSize * sizeof(int);
matA = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matB = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matC = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
for(int i = 0 ; i < matrixSize * matrixSize; i ++)
matA[i] = matB[i] = (i*2)%10;
printMatrix(matA, matrixSize, "Matrix A");
printMatrix(matB, matrixSize, "Matrix B");
printf("Multiplying matrices on CPU...\n");
cudaEventRecord(hostStart, 0);
//matrix multiplication code goes here.
for(int i = 0 ; i < matrixSize ; i ++)
{
for(int j = 0 ; j < matrixSize ; j ++)
{
int sum = 0;
for(int k = 0 ; k < matrixSize ; k ++)
{
//printf("A index: %d, B index: %d\n", i*matrixSize + k, k*matrixSize + j);
sum += matA[i*matrixSize+k]*matB[k*matrixSize+j];
}
//printf("C index: %d\n", i*matrixSize+j);
matC[i*matrixSize+j] = sum;
}
}
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);
printMatrix(matC, matrixSize, "Summation Matrix");
//allocate memory on GPU
checkError(cudaMalloc((void**)&gpuMatA, size), "Malloc for Matrix A");
checkError(cudaMalloc((void**)&gpuMatB, size), "Malloc for Matrix B");
checkError(cudaMalloc((void**)&gpuMatC, size), "Malloc for Matrix C");
//copy the matrix A and matrix B
checkError(cudaMemcpy(gpuMatA, matA, size, cudaMemcpyHostToDevice), "Matrix A Copy");
checkError(cudaMemcpy(gpuMatB, matB, size, cudaMemcpyHostToDevice), "Matrix B Copy");
bool done = false;
while(!done)
{
matCFromGPU = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Multiplying matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
cudaEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
matMul<<<grid, blocks>>>(gpuMatA, gpuMatB, gpuMatC, matrixSize);
cudaEventRecord(deviceStop, 0);
cudaEventSynchronize(deviceStop);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
//copy the result back into host memory
checkError(cudaMemcpy(matCFromGPU, gpuMatC, size, cudaMemcpyDeviceToHost), "Matrix C Copy from device to Host");
if(checkIfMatricesEqual(matC, matCFromGPU, matrixSize))
printf("Kernels correct!\n");
else
printf("Kernel logic wrong!\n");
printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
printMatrix(matCFromGPU, matrixSize, "Summation Matrix from GPU");
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(matCFromGPU);
}
free(matA);
free(matB);
free(matC);
cudaEventDestroy(deviceStart);
cudaEventDestroy(deviceStop);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
return 0;
}
|
2213117c5d1b4b5a36574ffc90095173ecc94adc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <wb.h>
#define TILE_WIDTH 16 //do not change this value
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
float pValue = 0;
for (int p = 0; p < (numAColumns - 1) / TILE_WIDTH + 1; ++p) {
if ((Row < numARows) && ((p * TILE_WIDTH + tx) < numAColumns)) {
ds_A[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx];
}
else {
ds_A[ty][tx] = 0.0;
}
if (((p * TILE_WIDTH + ty) < numAColumns) && (Col < numBColumns)) {
ds_B[ty][tx] = B[(p * TILE_WIDTH + ty) * numBColumns + Col];
}
else {
ds_B[ty][tx] = 0.0;
}
__syncthreads();
if (Row < numARows && Col < numBColumns) {
for (int i = 0; i < TILE_WIDTH; ++i) {
pValue += ds_A[ty][i] * ds_B[i][tx];
}
}
__syncthreads();
}
if (Row < numARows && Col < numBColumns) {
C[Row * numBColumns + Col] = pValue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipError_t err = hipMalloc((void**)&deviceA, numARows * numAColumns * sizeof(float));
if (err == hipSuccess) {
err = hipMalloc((void**)&deviceB, numBRows * numBColumns * sizeof(float));
}
if (err == hipSuccess) {
err = hipMalloc((void**)&deviceC, numCRows * numCColumns * sizeof(float));
}
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
err = hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice);
err = hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid((numCColumns + DimBlock.x) / DimBlock.x, (numCRows + DimBlock.y) / DimBlock.y, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
2213117c5d1b4b5a36574ffc90095173ecc94adc.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <wb.h>
#define TILE_WIDTH 16 //do not change this value
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
float pValue = 0;
for (int p = 0; p < (numAColumns - 1) / TILE_WIDTH + 1; ++p) {
if ((Row < numARows) && ((p * TILE_WIDTH + tx) < numAColumns)) {
ds_A[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx];
}
else {
ds_A[ty][tx] = 0.0;
}
if (((p * TILE_WIDTH + ty) < numAColumns) && (Col < numBColumns)) {
ds_B[ty][tx] = B[(p * TILE_WIDTH + ty) * numBColumns + Col];
}
else {
ds_B[ty][tx] = 0.0;
}
__syncthreads();
if (Row < numARows && Col < numBColumns) {
for (int i = 0; i < TILE_WIDTH; ++i) {
pValue += ds_A[ty][i] * ds_B[i][tx];
}
}
__syncthreads();
}
if (Row < numARows && Col < numBColumns) {
C[Row * numBColumns + Col] = pValue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaError_t err = cudaMalloc((void**)&deviceA, numARows * numAColumns * sizeof(float));
if (err == cudaSuccess) {
err = cudaMalloc((void**)&deviceB, numBRows * numBColumns * sizeof(float));
}
if (err == cudaSuccess) {
err = cudaMalloc((void**)&deviceC, numCRows * numCColumns * sizeof(float));
}
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
err = cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 DimGrid((numCColumns + DimBlock.x) / DimBlock.x, (numCRows + DimBlock.y) / DimBlock.y, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<DimGrid, DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
5ccfae989c9a85d21b6a0fc476ad42ce5c505a3e.hip
|
// !!! This is a file automatically generated by hipify!!!
//#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include"cuda.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <vector>
using std::cout;
using std::generate;
using std::vector;
#include <chrono>
using std::cout;
using std::generate;
using std::vector;
using namespace std;
using namespace std::chrono;
#define Tile_size 16
#define TILE_SIZE 16
//Function To handle any errors occurred in the function calls
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C
int numCColumns; // number of columns in the matrix C
__global__ void matrixMultiply(float* A, float* B, float* C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if (numAColumns != numBRows) return;
if ((Row < numARows) && (Col < numBColumns)) {
float Cvalue = 0;
for (int k = 0; k < numAColumns; ++k)
Cvalue += A[Row * numAColumns + k] * B[k * numBColumns + Col];
C[Row * numCColumns + Col] = Cvalue;
}
}
// Compute C = A * B
//*************************************************************
//Kernel for shared memory/ Tiled execution
__global__ void matrixMultiplyShared(float* A, float* B, float* C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
__shared__ float sA[Tile_size][Tile_size]; // Tile size to store elements in shared memory
__shared__ float sB[Tile_size][Tile_size];
int Row = blockDim.y * blockIdx.y + threadIdx.y; //To generate ids of threads.
int Col = blockDim.x * blockIdx.x + threadIdx.x;
float Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((numAColumns - 1) / Tile_size) + 1); k++)
{
if ((Row < numARows) && (threadIdx.x + (k * Tile_size)) < numAColumns)//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
{
sA[threadIdx.y][threadIdx.x] = A[(Row * numAColumns) + threadIdx.x + (k * Tile_size)];
}
else
{
sA[threadIdx.y][threadIdx.x] = 0.0; //printf(" SA ! %d, %d ", Row, threadIdx.x + (k * Tile_size) );
}
if (Col < numBColumns && (threadIdx.y + k * Tile_size) < numBRows)//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
{
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k * Tile_size) * numBColumns + Col];
}
else
{
sB[threadIdx.y][threadIdx.x] = 0.0; //printf(" SB ! %d, %d ", Col, (threadIdx.y + k * Tile_size));
}
__syncthreads();
for (int j = 0; j < Tile_size; ++j)//Multiplying Elements present in tile
{
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < numCRows && Col < numCColumns)//Saving Final result into Matrix C
{
C[Row * numCColumns + Col] = Cvalue;
}
}
//*************************************************************
//*************************************************************
void Print_Mat(int Row, int Col, float* Mat)//Function To print the Matrix
{
for (int i = 0; i < Row * Col; i++)
{
float temp = * (Mat + i);
int temp2 = (int)temp;
printf("%d ", temp2);
if (((i+1) % Col) == 0 && i>2)
{
printf("\n");
}
}
}//Function close
//*************************************************************
//Normal CPU Matrix Multiplication
void matMultiplyOnHost(float* A, float* B, float* C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; i++)
{
for (int j = 0; j < numBColumns; j++)
{
C[i * numCColumns + j] = 0.0;
for (int k = 0; k < numBRows; k++)
{
C[i * numCColumns + j] += A[i * numAColumns + k] * B[k * numBColumns + j];
}
}
}
return;
}
void test();
__global__ void gpu_matrix_mult(float* a, float* b, float* c, int m, int n, int k);
__global__ void shared_matrix_mult(float* A, float* B, float* C, int m, int n, int k);
//*************************************************************
int main(int argc, char** argv) {
cout << "\n===========================test============================\n";
test();
cout << "\n===========================matrixMul============================\n";
float* hostA; // The A matrix
float* hostB; // The B matrix
float* hostC; // The output C matrix
float* hostComputedC;
float* deviceA;
float* deviceB;
float* deviceC;
// count the execution time
float shared_gpu_time_ms, gpu_elapsed_time_ms, cpu_elapsed_time_ms;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int rowDimA, colDimA, colDimB;
//testGetOpt(argc, argv, rowDimA, colDimA, colDimB);
rowDimA = 512; colDimA = 1024; colDimB = 10;
numARows = 512; numAColumns = 1024; numBRows = 1024; numBColumns = 640;
printf("Zehui Xie\n rowDimA: %d colDimA: %d colDimB: %d\n", numARows, numAColumns, numBColumns);
// MxN = MxK * KxN
int M = numARows; int K = numAColumns; int N = numBColumns;
numCRows = M; numCColumns = N;
hostA = (float*)malloc(sizeof(float) * numARows * numAColumns);
hostB = (float*)malloc(sizeof(float) * numBRows * numBColumns);
for (int i = 0; i < numARows * numAColumns; i++)//Matrix Initialization
{
hostA[i] = 1.0;
}
for (int i = 0; i < numBRows * numBColumns; i++)
{
hostB[i] = 1.0;
}
//printf("\nMatrix A Values:\n");
//Print_Mat(numARows, numAColumns, hostA);//Function Call
//printf("\n\nMatrix B Values:\n");
//Print_Mat(numBRows, numBColumns, hostB);//Function Call
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
hostComputedC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
// Allocating GPU memory
(hipMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns));
(hipMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns));
(hipMalloc((void**)&deviceC, sizeof(float) * numCRows * numCColumns));
// Copy memory to the GPU
(hipMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, hipMemcpyHostToDevice));
(hipMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, hipMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
// start to count execution time of GPU without using Shared Memory version
hipEventRecord(start, 0);
//@@ Launch the GPU Kernel here
for (int i = 0; i < 10; i++)
{
matrixMultiply << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
}
hipDeviceSynchronize();//To synchronize the device
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute time elapse on GPU computing
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
gpu_elapsed_time_ms = gpu_elapsed_time_ms / 10;
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU without shared memory: %f ms.\n\n", numARows, numAColumns, numBRows, numBColumns, gpu_elapsed_time_ms);
hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
// Copy the results in GPU memory back to the CPU
(hipMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, hipMemcpyDeviceToHost));
//printf("\nMatrix C From Device\n");
//Print_Mat(numCRows, numCColumns, hostC);//Function Call
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//printf("\nMatrix C From Host\n");
//Print_Mat(numCRows, numCColumns, hostComputedC);//Function Call
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
return 0;
}
}
printf("res correct!");
double flopsPerMatrixMul = 2.0 * static_cast<double>(numARows) *
static_cast<double>(numAColumns) *
static_cast<double>(numBColumns);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(gpu_elapsed_time_ms / 1000.0f);
printf(
"\nPerformance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f \n" ,
gigaFlops,
gpu_elapsed_time_ms ,
flopsPerMatrixMul);
cout << "\n===========================matrixMul_SharedMemory============================\n";
// start to count execution time of GPU without using Shared Memory version
hipEventRecord(start, 0);
for (int i = 0; i < 10; i++)
{
matrixMultiplyShared << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
}
hipDeviceSynchronize();//To synchronize the device
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute time elapse on GPU computing
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
gpu_elapsed_time_ms = gpu_elapsed_time_ms / 10;
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU with shared memory: %f ms.\n\n", numARows, numAColumns, numBRows, numBColumns, gpu_elapsed_time_ms);
err1 = hipPeekAtLastError();//To capture last error in function call
// Copy the results in GPU memory back to the CPU
(hipMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, hipMemcpyDeviceToHost));
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
return 0;
}
}
printf("res correct!");
gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(gpu_elapsed_time_ms / 1000.0f);
printf(
"\nPerformance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f \n",
gigaFlops,
gpu_elapsed_time_ms,
flopsPerMatrixMul);
///////////end
//exit(0);
// Free the GPU memory
(hipFree(deviceA));
(hipFree(deviceB));
(hipFree(deviceC));
//Free the Pointer Memory
free(hostA);
free(hostB);
free(hostC);
//free(hostComputedC);
//exit(0);
return 0;
}
void test() {
float* hostA; // The A matrix
float* hostB; // The B matrix
float* hostC; // The output C matrix
float* hostComputedC;
float* deviceA;
float* deviceB;
float* deviceC;
auto start = high_resolution_clock::now();
auto stop = high_resolution_clock::now();
auto duration = duration_cast<nanoseconds>(stop - start);
int rowDimA, colDimA, colDimB;
//testGetOpt(argc, argv, rowDimA, colDimA, colDimB);
rowDimA = 10; colDimA = 20; colDimB = 15;
numARows = 5; numAColumns = 10; numBRows = 10; numBColumns = 8;
printf("Zehui Xie\n rowDimA: %d colDimA: %d colDimB: %d\n", numARows, numAColumns, numBColumns);
// MxN = MxK * KxN
int M = numARows; int K = numAColumns; int N = numBRows;
numCRows = M; numCColumns = N;
hostA = (float*)malloc(sizeof(float) * numARows * numAColumns);
hostB = (float*)malloc(sizeof(float) * numBRows * numBColumns);
for (int i = 0; i < numARows * numAColumns; i++)//Matrix Initialization
{
hostA[i] = 1.0;
}
for (int i = 0; i < numBRows * numBColumns; i++)
{
hostB[i] = 1.0;
}
printf("\nMatrix A Values:\n");
Print_Mat(numARows, numAColumns, hostA);//Function Call
printf("\n\nMatrix B Values:\n");
Print_Mat(numBRows, numBColumns, hostB);//Function Call
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
hostComputedC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
// Allocating GPU memory
(hipMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns));
(hipMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns));
(hipMalloc((void**)&deviceC, sizeof(float) * numCRows * numCColumns));
// Copy memory to the GPU
(hipMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, hipMemcpyHostToDevice));
(hipMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, hipMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
//@@ Launch the GPU Kernel here
matrixMultiplyShared << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
(hipMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, hipMemcpyDeviceToHost));
printf("\nMatrix C From Device\n");
Print_Mat(numCRows, numCColumns, hostC);//Function Call
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
printf("\nMatrix C From Host\n");
Print_Mat(numCRows, numCColumns, hostComputedC);//Function Call
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
break;
}
}
// Free the GPU memory
(hipFree(deviceA));
(hipFree(deviceB));
(hipFree(deviceC));
//Free the Pointer Memory
free(hostA);
free(hostB);
free(hostC);
free(hostComputedC);
}
|
5ccfae989c9a85d21b6a0fc476ad42ce5c505a3e.cu
|
//#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <algorithm>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include"cuda.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <vector>
using std::cout;
using std::generate;
using std::vector;
#include <chrono>
using std::cout;
using std::generate;
using std::vector;
using namespace std;
using namespace std::chrono;
#define Tile_size 16
#define TILE_SIZE 16
//Function To handle any errors occurred in the function calls
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C
int numCColumns; // number of columns in the matrix C
__global__ void matrixMultiply(float* A, float* B, float* C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if (numAColumns != numBRows) return;
if ((Row < numARows) && (Col < numBColumns)) {
float Cvalue = 0;
for (int k = 0; k < numAColumns; ++k)
Cvalue += A[Row * numAColumns + k] * B[k * numBColumns + Col];
C[Row * numCColumns + Col] = Cvalue;
}
}
// Compute C = A * B
//*************************************************************
//Kernel for shared memory/ Tiled execution
__global__ void matrixMultiplyShared(float* A, float* B, float* C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
__shared__ float sA[Tile_size][Tile_size]; // Tile size to store elements in shared memory
__shared__ float sB[Tile_size][Tile_size];
int Row = blockDim.y * blockIdx.y + threadIdx.y; //To generate ids of threads.
int Col = blockDim.x * blockIdx.x + threadIdx.x;
float Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((numAColumns - 1) / Tile_size) + 1); k++)
{
if ((Row < numARows) && (threadIdx.x + (k * Tile_size)) < numAColumns)//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
{
sA[threadIdx.y][threadIdx.x] = A[(Row * numAColumns) + threadIdx.x + (k * Tile_size)];
}
else
{
sA[threadIdx.y][threadIdx.x] = 0.0; //printf(" SA ! %d, %d ", Row, threadIdx.x + (k * Tile_size) );
}
if (Col < numBColumns && (threadIdx.y + k * Tile_size) < numBRows)//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
{
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k * Tile_size) * numBColumns + Col];
}
else
{
sB[threadIdx.y][threadIdx.x] = 0.0; //printf(" SB ! %d, %d ", Col, (threadIdx.y + k * Tile_size));
}
__syncthreads();
for (int j = 0; j < Tile_size; ++j)//Multiplying Elements present in tile
{
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < numCRows && Col < numCColumns)//Saving Final result into Matrix C
{
C[Row * numCColumns + Col] = Cvalue;
}
}
//*************************************************************
//*************************************************************
void Print_Mat(int Row, int Col, float* Mat)//Function To print the Matrix
{
for (int i = 0; i < Row * Col; i++)
{
float temp = * (Mat + i);
int temp2 = (int)temp;
printf("%d ", temp2);
if (((i+1) % Col) == 0 && i>2)
{
printf("\n");
}
}
}//Function close
//*************************************************************
//Normal CPU Matrix Multiplication
void matMultiplyOnHost(float* A, float* B, float* C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; i++)
{
for (int j = 0; j < numBColumns; j++)
{
C[i * numCColumns + j] = 0.0;
for (int k = 0; k < numBRows; k++)
{
C[i * numCColumns + j] += A[i * numAColumns + k] * B[k * numBColumns + j];
}
}
}
return;
}
void test();
__global__ void gpu_matrix_mult(float* a, float* b, float* c, int m, int n, int k);
__global__ void shared_matrix_mult(float* A, float* B, float* C, int m, int n, int k);
//*************************************************************
int main(int argc, char** argv) {
cout << "\n===========================test============================\n";
test();
cout << "\n===========================matrixMul============================\n";
float* hostA; // The A matrix
float* hostB; // The B matrix
float* hostC; // The output C matrix
float* hostComputedC;
float* deviceA;
float* deviceB;
float* deviceC;
// count the execution time
float shared_gpu_time_ms, gpu_elapsed_time_ms, cpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int rowDimA, colDimA, colDimB;
//testGetOpt(argc, argv, rowDimA, colDimA, colDimB);
rowDimA = 512; colDimA = 1024; colDimB = 10;
numARows = 512; numAColumns = 1024; numBRows = 1024; numBColumns = 640;
printf("Zehui Xie\n rowDimA: %d colDimA: %d colDimB: %d\n", numARows, numAColumns, numBColumns);
// MxN = MxK * KxN
int M = numARows; int K = numAColumns; int N = numBColumns;
numCRows = M; numCColumns = N;
hostA = (float*)malloc(sizeof(float) * numARows * numAColumns);
hostB = (float*)malloc(sizeof(float) * numBRows * numBColumns);
for (int i = 0; i < numARows * numAColumns; i++)//Matrix Initialization
{
hostA[i] = 1.0;
}
for (int i = 0; i < numBRows * numBColumns; i++)
{
hostB[i] = 1.0;
}
//printf("\nMatrix A Values:\n");
//Print_Mat(numARows, numAColumns, hostA);//Function Call
//printf("\n\nMatrix B Values:\n");
//Print_Mat(numBRows, numBColumns, hostB);//Function Call
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
hostComputedC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
// Allocating GPU memory
(cudaMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns));
(cudaMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns));
(cudaMalloc((void**)&deviceC, sizeof(float) * numCRows * numCColumns));
// Copy memory to the GPU
(cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice));
(cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
// start to count execution time of GPU without using Shared Memory version
cudaEventRecord(start, 0);
//@@ Launch the GPU Kernel here
for (int i = 0; i < 10; i++)
{
matrixMultiply << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
}
cudaDeviceSynchronize();//To synchronize the device
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
gpu_elapsed_time_ms = gpu_elapsed_time_ms / 10;
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU without shared memory: %f ms.\n\n", numARows, numAColumns, numBRows, numBColumns, gpu_elapsed_time_ms);
cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
// Copy the results in GPU memory back to the CPU
(cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost));
//printf("\nMatrix C From Device\n");
//Print_Mat(numCRows, numCColumns, hostC);//Function Call
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//printf("\nMatrix C From Host\n");
//Print_Mat(numCRows, numCColumns, hostComputedC);//Function Call
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
return 0;
}
}
printf("res correct!");
double flopsPerMatrixMul = 2.0 * static_cast<double>(numARows) *
static_cast<double>(numAColumns) *
static_cast<double>(numBColumns);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(gpu_elapsed_time_ms / 1000.0f);
printf(
"\nPerformance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f \n" ,
gigaFlops,
gpu_elapsed_time_ms ,
flopsPerMatrixMul);
cout << "\n===========================matrixMul_SharedMemory============================\n";
// start to count execution time of GPU without using Shared Memory version
cudaEventRecord(start, 0);
for (int i = 0; i < 10; i++)
{
matrixMultiplyShared << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
}
cudaDeviceSynchronize();//To synchronize the device
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
gpu_elapsed_time_ms = gpu_elapsed_time_ms / 10;
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU with shared memory: %f ms.\n\n", numARows, numAColumns, numBRows, numBColumns, gpu_elapsed_time_ms);
err1 = cudaPeekAtLastError();//To capture last error in function call
// Copy the results in GPU memory back to the CPU
(cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost));
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
return 0;
}
}
printf("res correct!");
gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(gpu_elapsed_time_ms / 1000.0f);
printf(
"\nPerformance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f \n",
gigaFlops,
gpu_elapsed_time_ms,
flopsPerMatrixMul);
///////////end
//exit(0);
// Free the GPU memory
(cudaFree(deviceA));
(cudaFree(deviceB));
(cudaFree(deviceC));
//Free the Pointer Memory
free(hostA);
free(hostB);
free(hostC);
//free(hostComputedC);
//exit(0);
return 0;
}
void test() {
float* hostA; // The A matrix
float* hostB; // The B matrix
float* hostC; // The output C matrix
float* hostComputedC;
float* deviceA;
float* deviceB;
float* deviceC;
auto start = high_resolution_clock::now();
auto stop = high_resolution_clock::now();
auto duration = duration_cast<nanoseconds>(stop - start);
int rowDimA, colDimA, colDimB;
//testGetOpt(argc, argv, rowDimA, colDimA, colDimB);
rowDimA = 10; colDimA = 20; colDimB = 15;
numARows = 5; numAColumns = 10; numBRows = 10; numBColumns = 8;
printf("Zehui Xie\n rowDimA: %d colDimA: %d colDimB: %d\n", numARows, numAColumns, numBColumns);
// MxN = MxK * KxN
int M = numARows; int K = numAColumns; int N = numBRows;
numCRows = M; numCColumns = N;
hostA = (float*)malloc(sizeof(float) * numARows * numAColumns);
hostB = (float*)malloc(sizeof(float) * numBRows * numBColumns);
for (int i = 0; i < numARows * numAColumns; i++)//Matrix Initialization
{
hostA[i] = 1.0;
}
for (int i = 0; i < numBRows * numBColumns; i++)
{
hostB[i] = 1.0;
}
printf("\nMatrix A Values:\n");
Print_Mat(numARows, numAColumns, hostA);//Function Call
printf("\n\nMatrix B Values:\n");
Print_Mat(numBRows, numBColumns, hostB);//Function Call
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
hostComputedC = (float*)malloc(sizeof(float) * numCRows * numCColumns);
// Allocating GPU memory
(cudaMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns));
(cudaMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns));
(cudaMalloc((void**)&deviceC, sizeof(float) * numCRows * numCColumns));
// Copy memory to the GPU
(cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice));
(cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
//@@ Launch the GPU Kernel here
matrixMultiplyShared << <dimGrid, dimBlock >> > (deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
(cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost));
printf("\nMatrix C From Device\n");
Print_Mat(numCRows, numCColumns, hostC);//Function Call
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
printf("\nMatrix C From Host\n");
Print_Mat(numCRows, numCColumns, hostComputedC);//Function Call
for (int i = 0; i < numCColumns * numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i])
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
break;
}
}
// Free the GPU memory
(cudaFree(deviceA));
(cudaFree(deviceB));
(cudaFree(deviceC));
//Free the Pointer Memory
free(hostA);
free(hostB);
free(hostC);
free(hostComputedC);
}
|
6e7712149df7d4163ae809e65a11be58464c3732.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-26
* $Update by: Lin Ye (email: [email protected]) 2019-07-01 float16 added
*/
#include "LogSoftmax.h"
#include "LogSoftmax.cuh"
#include "Loss.cuh"
#include "../core/arithmetic/MultiplyDim.h"
#include "../core/reduce/ReduceSum.cuh"
#include "../core/reduce/ReduceMax.cuh"
#include "../core/shape/IsSameShaped.h"
#include "../XDevice.h"
#include <device_launch_parameters.h>
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version)
>> x - input vector
>> y - result
>> leadDim - leading dimension (along which we perform reduction)
*/
void _CudaLogSoftmax(const XTensor * x, XTensor * y, int leadDim)
{
ShowNTErrors("You should call LogSoftmax instead!");
}
/*
log softmax forward computation (Cuda kernel)
for each column j, let y_{i,j} and x_{i,j} are the output
and state value for the i-th element of column j. We have
y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j})
>> x - input tensor (in matrix)
>> max - the max value for each column j
>> sum - \sum_{i} e^{x_{i,j}) for each column j
>> y - output tensor (in matrix)
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByRow(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
/* we keep the sum and max number in the shared memory for each column */
if (threadIdx.y == 0) {
inputSum[threadIdx.x] = sum[j];
inputMax[threadIdx.x] = max[j];
}
/* synchronize to make sure the values of max and sum are loaded */
__syncthreads();
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
if (dataType == DEFAULT_DTYPE) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.x])) / (DTYPE)inputSum[threadIdx.x]);
if (isnan(r))
r = LOGPROB_MIN;
if (isinf(r))
r = LOGPROB_MIN;
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
#if __CUDA_ARCH__ >= 600
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
#endif
}
}
}
/*
log softmax forward computation (Cuda kernel)
for each row i, let y_{i,j} and x_{i,j} are the output
and state value for the j-th element of row i. We have
y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j})
>> x - input tensor (in matrix)
>> max - the max value for each row i
>> sum - \sum_{j} e^{x_{i,j}) for each row i
>> y - output tensor (in matrix)
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByCol(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
/* we keep the sum and max number in the shared memory for each row */
if (threadIdx.x == 0) {
inputSum[threadIdx.y] = sum[i];
inputMax[threadIdx.y] = max[i];
}
/* synchronize to make sure the values of max and sum are loaded */
__syncthreads();
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
if (dataType == DEFAULT_DTYPE) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.y])) / (DTYPE)inputSum[threadIdx.y]);
/*if (r < LOGPROB_MIN)
{
printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]);
}*/
if (isnan(r))
r = LOGPROB_MIN;
if (isinf(r))
r = LOGPROB_MIN;
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
#if __CUDA_ARCH__ >= 600
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
#endif
}
}
}
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version)
>> x - input vector
>> y - result
>> leadDim - leading dimension (along which we perform reduction)
>> sum - \sum_{i} e^{x_i}
>> max - \max_{i} e^{x_i}
*/
void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max)
{
CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU.");
CheckNTErrors((x->order == y->order), "Input tensors must be of the same size.");
CheckNTErrors((x->order == 2), "Input tensors must be of order 2.");
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
int gridSize[3], blockSize[3];
int n = x->dimSize[0];
int m = x->dimSize[1];
/* allocate the buffer */
DTYPE * maxData = (DTYPE*)max->data;
DTYPE * sumData = (DTYPE*)sum->data;
if (leadDim == 0) {
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
hipLaunchKernelGGL(( KernelLogSoftmaxComputeByRow<DTYPE, DEFAULT_DTYPE>) , dim3(dim3(gridSize[1], gridSize[0])), dim3(dim3(blockSize[1], blockSize[0])), 0, 0,
(DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
hipLaunchKernelGGL(( KernelLogSoftmaxComputeByCol<DTYPE, DEFAULT_DTYPE>) , dim3(dim3(gridSize[0], gridSize[1])), dim3(dim3(blockSize[0], blockSize[1])), 0, 0,
(DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
int gridSize[3], blockSize[3];
int n = x->dimSize[0];
int m = x->dimSize[1];
/* allocate the buffer */
__half * maxData = (half*)max->data;
__half * sumData = (half*)sum->data;
if (leadDim == 0) {
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
hipLaunchKernelGGL(( KernelLogSoftmaxComputeByRow<half, X_FLOAT16>) , dim3(dim3(gridSize[1], gridSize[0])), dim3(dim3(blockSize[1], blockSize[0])), 0, 0,
(half*)x->data, maxData, sumData, (half *)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
hipLaunchKernelGGL(( KernelLogSoftmaxComputeByCol<half, X_FLOAT16>) , dim3(dim3(gridSize[0], gridSize[1])), dim3(dim3(blockSize[0], blockSize[1])), 0, 0,
(half*)x->data, maxData, sumData, (half*)y->data, n, m);
}
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
set dE/dx = exp(y)
>> dedy - dE/dy
>> dedx - dE/dx
>> y - output of the function
>> size - size of output
>> lossName - name of the loss function
*/
__global__
void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
/* dE/dx_j = exp(y_j) */
if (lossName == CROSSENTROPY)
dedx[i] = exp(y[i]);
/* dE/dx_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
dedx[i] = exp(y[i]);
else if (lossName == ONEHOTERROR)
dedx[i] = 0;
else
dedx[i] = 0;
}
}
/*
backward computation for log softmax
dE/dx = dE/dy * dy/dx
>> dedy - dE/dy
>> dedx - dE/dx
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> size - size of input/output
>> lossName - name of the loss function
*/
__global__
void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x,
int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
DTYPE r = 0;
/* dE/ds_j = exp(y_j) */
if (lossName == CROSSENTROPY)
r = -gold[i] + exp(y[i]);
/* dE/ds_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
r = -gold[i] + exp(y[i]);
else if (lossName == ONEHOTERROR) {
if (gold[i] == 1.0F)
r = -gold[i] + exp(y[i]);
else
r = 0;
}
else {
r = dedy[i];
}
if (isnan(r))
r = 0;
if (isinf(r))
r = 0;
dedx[i] = r;
}
}
/*
backward computation for log softmax (sparse matrices) for each column
dE/dx_j += -gold_j
(for dE/dx = dE/dy * dy/dx)
>> dedy - dE/dy
>> dedx - dE/dx
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
>> gNonZeroNum -
>> lossName - name of the loss function
*/
__global__
void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x,
int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName)
{
int tupleSize = sizeof(int) + sizeof(DTYPE);
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k < gNonZeroNum) {
/* load the sub-block of the sparse matrix b */
int key = *(int*)((char*)gold + tupleSize * k);
int ni = key / colNum;
int mi = key % colNum;
int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int));
if (lossName == CROSSENTROPY)
dedx[colNum * ni + mi] += -value;
else if (lossName == SQUAREDERROR)
dedx[colNum * ni + mi] += -value;
else if (lossName == ONEHOTERROR) {
int offset = colNum * ni + mi;
if (value == 1.0F)
dedx[offset] += (-value + exp(y[offset]));
//dedx[offset] += -value * 0.005;
}
}
}
/*
backward computation for dense matrics with default data type
dE/dx = dE/dy * dy/dx
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
dy_i/dx_j
= d{log(e^{x_i} / \sum_{k} e^{x_k})}/dx_j
= d{log(e^{x_i})}/dx_j - d{log(\sum_{k} e^{x_k})}/dx_j
= \delta(i,j) - e^{x_j}/\sum_{k} e^{x_k})
= \delta(i,j) - exp(y_j)
where \delta(i,j) = 1 if i = j, and \delta(i,j) = 0 otherwise
if loss E is defined as cross entropy, i.e., E = -\sum_{k} (gold_k * y_k), we have
dE/dy_i = -gold_i
(where {gold_k} is the gold standard distribution)
then
dE/dx_j
= \sum_{i} {dE/dy_i * dy_i/dx_j}
= \sum_{i} {-gold_i * (\delta(i,j) - exp(y_j))}
= \sum_{i} {-gold_i * \delta{i,j)} + \sum_{i} {gold_i * exp(y_j)}
= -gold_i * \delta(i,j) + \sum_{i} {gold_i * exp(y_j)}
= -gold_j + exp(y_j)
Note: gold_i is a distribution, i.e., \sum_{i} gold_i = 1
if gold is with a one-hot representation (gold_i = 1 for only one dimension),
we can reformulize it as dE/dx_j = -\delta(i,j) + exp(y_j)
There are two ways to implement this process.
Method 1. we compute dE/dy and dy/dx resepectively, and then reach dE/dx by dE/dx = dE/dy * dy/dx
(or more precisely dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j})
Method 2. we compute dE/dx (or dE/dx_j) in a single step, rather than resorting to the
sub-models dE/dy and dy/dx. We can do this by using dE/dx_j = -gold_j + exp(y_j)
Here we choose Method 2, i.e., we straightforwardly compute dE/dx_j by
dE/dx_j = -gold_j + exp(y_j)
(or dE/dx_j = -\delta(i,j) + exp(y_j) for a Maximum A Posteriori Estimation (MAP))
Method 1 is also fine but is more time consuming due to the summation over dimensions.
Note that this method is not good for the standard version softmax when working with
the cross entropy loss. Because it is numerical unstable. When we use a usual method to
define softmax, we have softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}). It is trivial to
know that dy_i/dx_j = y_i * \delta(i,j) - y_i * y_j. As y_i and y_j could be a small number,
y_i * y_i would result in a much smaller one with a risk of lossing precision. This is even
worse we multiply dy_i/dx_j with dE/dy_i. So it is in general to use log softmax instead for
better numerical stability.
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> deds - dE/dx
>> lossName - type of loss function, e.g., cross entropy
>> leadDim - leading dimension (along which we perform reduction)
*/
void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
XTensor * padding, int leadDim,
LOSS_FUNCTION_NAME lossName)
{
leadDim = leadDim < 0 ? y->order - 1 : leadDim;
CheckNTErrors((x->devID >= 0), "Backward computation of log softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID && gold->devID == y->devID),
"Tensors used in log softmax are not on the same GPU.");
CheckNTErrors((gold != NULL), "No x gold standard is found!");
int dimensionSize = y->dimSize[leadDim];
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for (int i = leadDim + 1; i < y->order; i++)
stride *= y->dimSize[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS),
"Unknown loss function.");
int cudaGridSize[3], cudaBlockSize[3];
if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) {
if (gold->isSparse) {
CheckNTErrors((gold->order == 2), "TODO!")
CheckNTErrors((leadDim == 0), "TODO!");
GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize);
/* dE/ds_j = exp(y_j) */
hipLaunchKernelGGL(( KernelExpLoss) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0,
NULL,
(DTYPE*)dedx->data,
(DTYPE*)y->data,
dimensionSize * stride,
lossName);
GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize);
/* dE/ds_j += -gold_j */
hipLaunchKernelGGL(( KernelLogSoftmaxBackwardDEDSSparseByRow) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0,
NULL,
(DTYPE*)dedx->data,
(char*)gold->data + sizeof(int),
(DTYPE*)y->data,
(DTYPE*)x->data,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
else {
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
/* dE/ds_j = -gold_j + exp(y_j) */
hipLaunchKernelGGL(( KernelLogSoftmaxBackwardDEDS) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0,
NULL,
(DTYPE*)dedx->data + k * blockSize,
(DTYPE*)gold->data + k * blockSize,
(DTYPE*)y->data + k * blockSize,
(DTYPE*)x->data + k * blockSize,
dimensionSize * stride, lossName);
}
}
if(padding != NULL) {
int n = leadDim;
int paddingOrder = padding->order;
int * paddingDims = new int[paddingOrder];
memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int));
padding->Reshape(padding->unitNum);
int order = dedx->order;
int * dims = new int[order];
memcpy(dims, dedx->dimSize, dedx->order * sizeof(int));
dedx->Reshape(dedx->unitNum/dedx->GetDim(n), dedx->GetDim(n));
_MultiplyDimMe(dedx, padding, 0);
padding->Reshape(paddingOrder, paddingDims);
dedx->Reshape(order, dims);
delete[] paddingDims;
delete[] dims;
}
}
else {
ShowNTErrors("TODO!");
}
}
else{
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
6e7712149df7d4163ae809e65a11be58464c3732.cu
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-26
* $Update by: Lin Ye (email: [email protected]) 2019-07-01 float16 added
*/
#include "LogSoftmax.h"
#include "LogSoftmax.cuh"
#include "Loss.cuh"
#include "../core/arithmetic/MultiplyDim.h"
#include "../core/reduce/ReduceSum.cuh"
#include "../core/reduce/ReduceMax.cuh"
#include "../core/shape/IsSameShaped.h"
#include "../XDevice.h"
#include <device_launch_parameters.h>
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version)
>> x - input vector
>> y - result
>> leadDim - leading dimension (along which we perform reduction)
*/
void _CudaLogSoftmax(const XTensor * x, XTensor * y, int leadDim)
{
ShowNTErrors("You should call LogSoftmax instead!");
}
/*
log softmax forward computation (Cuda kernel)
for each column j, let y_{i,j} and x_{i,j} are the output
and state value for the i-th element of column j. We have
y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j})
>> x - input tensor (in matrix)
>> max - the max value for each column j
>> sum - \sum_{i} e^{x_{i,j}) for each column j
>> y - output tensor (in matrix)
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByRow(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
/* we keep the sum and max number in the shared memory for each column */
if (threadIdx.y == 0) {
inputSum[threadIdx.x] = sum[j];
inputMax[threadIdx.x] = max[j];
}
/* synchronize to make sure the values of max and sum are loaded */
__syncthreads();
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
if (dataType == DEFAULT_DTYPE) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.x])) / (DTYPE)inputSum[threadIdx.x]);
if (isnan(r))
r = LOGPROB_MIN;
if (isinf(r))
r = LOGPROB_MIN;
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
#if __CUDA_ARCH__ >= 600
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
#endif
}
}
}
/*
log softmax forward computation (Cuda kernel)
for each row i, let y_{i,j} and x_{i,j} are the output
and state value for the j-th element of row i. We have
y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j})
>> x - input tensor (in matrix)
>> max - the max value for each row i
>> sum - \sum_{j} e^{x_{i,j}) for each row i
>> y - output tensor (in matrix)
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByCol(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
/* we keep the sum and max number in the shared memory for each row */
if (threadIdx.x == 0) {
inputSum[threadIdx.y] = sum[i];
inputMax[threadIdx.y] = max[i];
}
/* synchronize to make sure the values of max and sum are loaded */
__syncthreads();
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
if (dataType == DEFAULT_DTYPE) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.y])) / (DTYPE)inputSum[threadIdx.y]);
/*if (r < LOGPROB_MIN)
{
printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]);
}*/
if (isnan(r))
r = LOGPROB_MIN;
if (isinf(r))
r = LOGPROB_MIN;
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
#if __CUDA_ARCH__ >= 600
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
#endif
}
}
}
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version)
>> x - input vector
>> y - result
>> leadDim - leading dimension (along which we perform reduction)
>> sum - \sum_{i} e^{x_i}
>> max - \max_{i} e^{x_i}
*/
void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max)
{
CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU.");
CheckNTErrors((x->order == y->order), "Input tensors must be of the same size.");
CheckNTErrors((x->order == 2), "Input tensors must be of order 2.");
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
int gridSize[3], blockSize[3];
int n = x->dimSize[0];
int m = x->dimSize[1];
/* allocate the buffer */
DTYPE * maxData = (DTYPE*)max->data;
DTYPE * sumData = (DTYPE*)sum->data;
if (leadDim == 0) {
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
KernelLogSoftmaxComputeByRow<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>>
((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
KernelLogSoftmaxComputeByCol<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>>
((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
int gridSize[3], blockSize[3];
int n = x->dimSize[0];
int m = x->dimSize[1];
/* allocate the buffer */
__half * maxData = (half*)max->data;
__half * sumData = (half*)sum->data;
if (leadDim == 0) {
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
KernelLogSoftmaxComputeByRow<half, X_FLOAT16> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>>
((half*)x->data, maxData, sumData, (half *)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
KernelLogSoftmaxComputeByCol<half, X_FLOAT16> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>>
((half*)x->data, maxData, sumData, (half*)y->data, n, m);
}
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
set dE/dx = exp(y)
>> dedy - dE/dy
>> dedx - dE/dx
>> y - output of the function
>> size - size of output
>> lossName - name of the loss function
*/
__global__
void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
/* dE/dx_j = exp(y_j) */
if (lossName == CROSSENTROPY)
dedx[i] = exp(y[i]);
/* dE/dx_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
dedx[i] = exp(y[i]);
else if (lossName == ONEHOTERROR)
dedx[i] = 0;
else
dedx[i] = 0;
}
}
/*
backward computation for log softmax
dE/dx = dE/dy * dy/dx
>> dedy - dE/dy
>> dedx - dE/dx
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> size - size of input/output
>> lossName - name of the loss function
*/
__global__
void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x,
int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
DTYPE r = 0;
/* dE/ds_j = exp(y_j) */
if (lossName == CROSSENTROPY)
r = -gold[i] + exp(y[i]);
/* dE/ds_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
r = -gold[i] + exp(y[i]);
else if (lossName == ONEHOTERROR) {
if (gold[i] == 1.0F)
r = -gold[i] + exp(y[i]);
else
r = 0;
}
else {
r = dedy[i];
}
if (isnan(r))
r = 0;
if (isinf(r))
r = 0;
dedx[i] = r;
}
}
/*
backward computation for log softmax (sparse matrices) for each column
dE/dx_j += -gold_j
(for dE/dx = dE/dy * dy/dx)
>> dedy - dE/dy
>> dedx - dE/dx
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
>> gNonZeroNum -
>> lossName - name of the loss function
*/
__global__
void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x,
int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName)
{
int tupleSize = sizeof(int) + sizeof(DTYPE);
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k < gNonZeroNum) {
/* load the sub-block of the sparse matrix b */
int key = *(int*)((char*)gold + tupleSize * k);
int ni = key / colNum;
int mi = key % colNum;
int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int));
if (lossName == CROSSENTROPY)
dedx[colNum * ni + mi] += -value;
else if (lossName == SQUAREDERROR)
dedx[colNum * ni + mi] += -value;
else if (lossName == ONEHOTERROR) {
int offset = colNum * ni + mi;
if (value == 1.0F)
dedx[offset] += (-value + exp(y[offset]));
//dedx[offset] += -value * 0.005;
}
}
}
/*
backward computation for dense matrics with default data type
dE/dx = dE/dy * dy/dx
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
dy_i/dx_j
= d{log(e^{x_i} / \sum_{k} e^{x_k})}/dx_j
= d{log(e^{x_i})}/dx_j - d{log(\sum_{k} e^{x_k})}/dx_j
= \delta(i,j) - e^{x_j}/\sum_{k} e^{x_k})
= \delta(i,j) - exp(y_j)
where \delta(i,j) = 1 if i = j, and \delta(i,j) = 0 otherwise
if loss E is defined as cross entropy, i.e., E = -\sum_{k} (gold_k * y_k), we have
dE/dy_i = -gold_i
(where {gold_k} is the gold standard distribution)
then
dE/dx_j
= \sum_{i} {dE/dy_i * dy_i/dx_j}
= \sum_{i} {-gold_i * (\delta(i,j) - exp(y_j))}
= \sum_{i} {-gold_i * \delta{i,j)} + \sum_{i} {gold_i * exp(y_j)}
= -gold_i * \delta(i,j) + \sum_{i} {gold_i * exp(y_j)}
= -gold_j + exp(y_j)
Note: gold_i is a distribution, i.e., \sum_{i} gold_i = 1
if gold is with a one-hot representation (gold_i = 1 for only one dimension),
we can reformulize it as dE/dx_j = -\delta(i,j) + exp(y_j)
There are two ways to implement this process.
Method 1. we compute dE/dy and dy/dx resepectively, and then reach dE/dx by dE/dx = dE/dy * dy/dx
(or more precisely dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j})
Method 2. we compute dE/dx (or dE/dx_j) in a single step, rather than resorting to the
sub-models dE/dy and dy/dx. We can do this by using dE/dx_j = -gold_j + exp(y_j)
Here we choose Method 2, i.e., we straightforwardly compute dE/dx_j by
dE/dx_j = -gold_j + exp(y_j)
(or dE/dx_j = -\delta(i,j) + exp(y_j) for a Maximum A Posteriori Estimation (MAP))
Method 1 is also fine but is more time consuming due to the summation over dimensions.
Note that this method is not good for the standard version softmax when working with
the cross entropy loss. Because it is numerical unstable. When we use a usual method to
define softmax, we have softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}). It is trivial to
know that dy_i/dx_j = y_i * \delta(i,j) - y_i * y_j. As y_i and y_j could be a small number,
y_i * y_i would result in a much smaller one with a risk of lossing precision. This is even
worse we multiply dy_i/dx_j with dE/dy_i. So it is in general to use log softmax instead for
better numerical stability.
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> deds - dE/dx
>> lossName - type of loss function, e.g., cross entropy
>> leadDim - leading dimension (along which we perform reduction)
*/
void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
XTensor * padding, int leadDim,
LOSS_FUNCTION_NAME lossName)
{
leadDim = leadDim < 0 ? y->order - 1 : leadDim;
CheckNTErrors((x->devID >= 0), "Backward computation of log softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID && gold->devID == y->devID),
"Tensors used in log softmax are not on the same GPU.");
CheckNTErrors((gold != NULL), "No x gold standard is found!");
int dimensionSize = y->dimSize[leadDim];
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for (int i = leadDim + 1; i < y->order; i++)
stride *= y->dimSize[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS),
"Unknown loss function.");
int cudaGridSize[3], cudaBlockSize[3];
if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) {
if (gold->isSparse) {
CheckNTErrors((gold->order == 2), "TODO!")
CheckNTErrors((leadDim == 0), "TODO!");
GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize);
/* dE/ds_j = exp(y_j) */
KernelExpLoss <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data,
(DTYPE*)y->data,
dimensionSize * stride,
lossName);
GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize);
/* dE/ds_j += -gold_j */
KernelLogSoftmaxBackwardDEDSSparseByRow <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data,
(char*)gold->data + sizeof(int),
(DTYPE*)y->data,
(DTYPE*)x->data,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
else {
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
/* dE/ds_j = -gold_j + exp(y_j) */
KernelLogSoftmaxBackwardDEDS <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data + k * blockSize,
(DTYPE*)gold->data + k * blockSize,
(DTYPE*)y->data + k * blockSize,
(DTYPE*)x->data + k * blockSize,
dimensionSize * stride, lossName);
}
}
if(padding != NULL) {
int n = leadDim;
int paddingOrder = padding->order;
int * paddingDims = new int[paddingOrder];
memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int));
padding->Reshape(padding->unitNum);
int order = dedx->order;
int * dims = new int[order];
memcpy(dims, dedx->dimSize, dedx->order * sizeof(int));
dedx->Reshape(dedx->unitNum/dedx->GetDim(n), dedx->GetDim(n));
_MultiplyDimMe(dedx, padding, 0);
padding->Reshape(paddingOrder, paddingDims);
dedx->Reshape(order, dims);
delete[] paddingDims;
delete[] dims;
}
}
else {
ShowNTErrors("TODO!");
}
}
else{
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
be5d97008f7dc5c81c75b6078100c956806a2eda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "glob.hpp"
#include "GpuMat.hpp"
#include <hip/hip_vector_types.h>
using namespace cudev;
#define USE_MSDN
//void RGB_to_YV12(const GpuMat & src, GpuMat& dst);
namespace{
__device__ __forceinline__ void rgb_to_y(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y){
#ifndef USE_MSDN
y = static_cast<unsigned char>(((int)(30 * r) + (int)(59 * g)+(int)(11 *b))/100);
#else
y = static_cast<unsigned char>((((int)(66 * r) + (int)(129 * g) + (int)( 25 * b) + 128) >> 8) + 16);
#endif
}
__device__ __forceinline__ void rgb_to_yuv(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y, unsigned char & u, unsigned char & v){
rgb_to_y(b, g, r, y);
#ifndef USE_MSDN
u = static_cast<unsigned char>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<unsigned char>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
#else
u = static_cast<unsigned char>((((int)(-38 * r) - (int)(74 * g) + (int)(112 * b) + 128)>>8)+128);
v = static_cast<unsigned char>((((int)(112 * r) - (int)(94 * g) - (int)(19 * b) + 128)>>8)+ 128);
#endif
}
__global__ void Gray_to_YV12(const GlobPtrSz<unsigned char> src, GlobPtr<unsigned char > dst){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if(x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
unsigned char pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#if 1
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
#ifdef USE_UV_PLANE
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
// the u,v plane is not right, because there is padding
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
#else
unsigned char *y_plane = dst.data;
unsigned char *u_plane = y_plane + planeSize;
unsigned char *v_plane = u_plane + (planeSize>>2);
int uvOff = 0;
#endif
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
#ifdef BGR
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
#else
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y, x) = y_val;
#else
y_plane[ y * dst.step + x ] = y_val;
#endif
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE] = y_val;
y_plane(y, x + 1) = y_val;
#else
y_plane[ y * dst.step + x + 1] = y_val;
#endif
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y + 1, x) = y_val;
#else
y_plane[ (y + 1) * dst.step + x ]= y_val;
#endif
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
#endif
#ifdef USE_UV_PLANE
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
#else
y_plane[( y + 1) * dst.step + x + 1]= y_val;
// here, consider the padding
uvOff = y / 4 * dst.step + ((y/2)%2)* src.cols / 2 + x /2;
u_plane[uvOff] = u_val;
v_plane[uvOff] = v_val;
#endif
}
template <typename T>
__global__ void RGB_to_NV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
#ifdef USE_UV_PLANE
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
// the u,v plane is not right, because there is padding
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
#else
unsigned char *y_plane = dst.data;
unsigned char *u_plane = y_plane + planeSize;
int uvOff = 0;
#endif
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
#ifdef BGR
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
#else
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y, x) = y_val;
#else
y_plane[ y * dst.step + x ] = y_val;
#endif
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE] = y_val;
y_plane(y, x + 1) = y_val;
#else
y_plane[ y * dst.step + x + 1] = y_val;
#endif
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y + 1, x) = y_val;
#else
y_plane[ (y + 1) * dst.step + x ]= y_val;
#endif
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
#endif
#ifdef USE_UV_PLANE
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
#else
y_plane[( y + 1) * dst.step + x + 1]= y_val;
uvOff = y / 2 * dst.step + x/ 2 * 2;
u_plane[uvOff] = u_val;
u_plane[uvOff + 1] = v_val;
// here, consider the padding
//uvOff = y / 4 * dst.step + ((y/2)%2)* src.cols / 2 + x /2;
//u_plane[uvOff] = u_val;
//v_plane[uvOff] = v_val;
#endif
}
#else
// use the elem size to solve the cuda pitch not equal surface pitch
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
}
__global__ void RGB_to_YV12_2(const unsigned char * pARGB, unsigned char * pYV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
// if (x + 1 >= src.cols || y + 1 >= src.rows)
//return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pYV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char * v_plane = u_plane + (planeSize >> 2);
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int rgbaSize = 4;
int uv_off = 0;
// the (x, y)
r = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
b = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 4) * dstPitch + (( y / 2) % 2) * width /2 + x /2;
u_plane[ uv_off ] = u_val;
v_plane[ uv_off ] = v_val;
}
extern "C"
void RGB_to_YV12(const GpuMat& src, GpuMat& dst)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch (src.channels())
{
case 1:
hipLaunchKernelGGL(( Gray_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar>(src), globPtr<uchar>(dst));
break;
case 3:
hipLaunchKernelGGL(( RGB_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
hipLaunchKernelGGL(( RGB_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
hipGetLastError() ;
hipDeviceSynchronize() ;
}
// use the plane pointer to achieve the RGB to YV12 convertion
extern "C"
void RGB_to_YV12_plane(const GpuMat& src, GpuMat& dst){
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch(src.channels()){
case 4:
//RGB_to_YV12_plane<<<grid, block>>>(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
hipGetLastError();
hipDeviceSynchronize();
}
extern "C"
void RGBA_to_NV12(const GpuMat & src, GpuMat & dst){
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch(src.channels()){
case 4:
hipLaunchKernelGGL(( RGB_to_NV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
hipGetLastError();
hipDeviceSynchronize();
}
|
be5d97008f7dc5c81c75b6078100c956806a2eda.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "glob.hpp"
#include "GpuMat.hpp"
#include <vector_types.h>
using namespace cudev;
#define USE_MSDN
//void RGB_to_YV12(const GpuMat & src, GpuMat& dst);
namespace{
__device__ __forceinline__ void rgb_to_y(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y){
#ifndef USE_MSDN
y = static_cast<unsigned char>(((int)(30 * r) + (int)(59 * g)+(int)(11 *b))/100);
#else
y = static_cast<unsigned char>((((int)(66 * r) + (int)(129 * g) + (int)( 25 * b) + 128) >> 8) + 16);
#endif
}
__device__ __forceinline__ void rgb_to_yuv(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y, unsigned char & u, unsigned char & v){
rgb_to_y(b, g, r, y);
#ifndef USE_MSDN
u = static_cast<unsigned char>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<unsigned char>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
#else
u = static_cast<unsigned char>((((int)(-38 * r) - (int)(74 * g) + (int)(112 * b) + 128)>>8)+128);
v = static_cast<unsigned char>((((int)(112 * r) - (int)(94 * g) - (int)(19 * b) + 128)>>8)+ 128);
#endif
}
__global__ void Gray_to_YV12(const GlobPtrSz<unsigned char> src, GlobPtr<unsigned char > dst){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if(x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
unsigned char pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#if 1
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
#ifdef USE_UV_PLANE
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
// the u,v plane is not right, because there is padding
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
#else
unsigned char *y_plane = dst.data;
unsigned char *u_plane = y_plane + planeSize;
unsigned char *v_plane = u_plane + (planeSize>>2);
int uvOff = 0;
#endif
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
#ifdef BGR
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
#else
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y, x) = y_val;
#else
y_plane[ y * dst.step + x ] = y_val;
#endif
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE] = y_val;
y_plane(y, x + 1) = y_val;
#else
y_plane[ y * dst.step + x + 1] = y_val;
#endif
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y + 1, x) = y_val;
#else
y_plane[ (y + 1) * dst.step + x ]= y_val;
#endif
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
#endif
#ifdef USE_UV_PLANE
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
#else
y_plane[( y + 1) * dst.step + x + 1]= y_val;
// here, consider the padding
uvOff = y / 4 * dst.step + ((y/2)%2)* src.cols / 2 + x /2;
u_plane[uvOff] = u_val;
v_plane[uvOff] = v_val;
#endif
}
template <typename T>
__global__ void RGB_to_NV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
#ifdef USE_UV_PLANE
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
// the u,v plane is not right, because there is padding
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
#else
unsigned char *y_plane = dst.data;
unsigned char *u_plane = y_plane + planeSize;
int uvOff = 0;
#endif
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
#ifdef BGR
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
#else
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y, x) = y_val;
#else
y_plane[ y * dst.step + x ] = y_val;
#endif
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE] = y_val;
y_plane(y, x + 1) = y_val;
#else
y_plane[ y * dst.step + x + 1] = y_val;
#endif
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
#ifdef USE_UV_PLANE
y_plane(y + 1, x) = y_val;
#else
y_plane[ (y + 1) * dst.step + x ]= y_val;
#endif
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
#endif
#ifdef USE_UV_PLANE
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
#else
y_plane[( y + 1) * dst.step + x + 1]= y_val;
uvOff = y / 2 * dst.step + x/ 2 * 2;
u_plane[uvOff] = u_val;
u_plane[uvOff + 1] = v_val;
// here, consider the padding
//uvOff = y / 4 * dst.step + ((y/2)%2)* src.cols / 2 + x /2;
//u_plane[uvOff] = u_val;
//v_plane[uvOff] = v_val;
#endif
}
#else
// use the elem size to solve the cuda pitch not equal surface pitch
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<unsigned char> y_plane = globPtr(dst.data, dst.step);
GlobPtr<unsigned char> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<unsigned char> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
// not right here, cause the cuda pitch is not equal the surface pitch
T pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
}
__global__ void RGB_to_YV12_2(const unsigned char * pARGB, unsigned char * pYV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
// if (x + 1 >= src.cols || y + 1 >= src.rows)
//return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pYV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char * v_plane = u_plane + (planeSize >> 2);
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int rgbaSize = 4;
int uv_off = 0;
// the (x, y)
r = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
b = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 4) * dstPitch + (( y / 2) % 2) * width /2 + x /2;
u_plane[ uv_off ] = u_val;
v_plane[ uv_off ] = v_val;
}
extern "C"
void RGB_to_YV12(const GpuMat& src, GpuMat& dst)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch (src.channels())
{
case 1:
Gray_to_YV12<<<grid, block>>>(globPtr<uchar>(src), globPtr<uchar>(dst));
break;
case 3:
RGB_to_YV12<<<grid, block>>>(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
RGB_to_YV12<<<grid, block>>>(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
cudaGetLastError() ;
cudaDeviceSynchronize() ;
}
// use the plane pointer to achieve the RGB to YV12 convertion
extern "C"
void RGB_to_YV12_plane(const GpuMat& src, GpuMat& dst){
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch(src.channels()){
case 4:
//RGB_to_YV12_plane<<<grid, block>>>(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
cudaGetLastError();
cudaDeviceSynchronize();
}
extern "C"
void RGBA_to_NV12(const GpuMat & src, GpuMat & dst){
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch(src.channels()){
case 4:
RGB_to_NV12<<<grid, block>>>(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
cudaGetLastError();
cudaDeviceSynchronize();
}
|
0a5c393412bb108dc60d9d82ac00c89c9f57e9bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "squareFunc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
unsigned int *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
squareFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
squareFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
squareFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0a5c393412bb108dc60d9d82ac00c89c9f57e9bb.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "squareFunc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
unsigned int *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
squareFunc<<<gridBlock,threadBlock>>>(d_in,d_out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
squareFunc<<<gridBlock,threadBlock>>>(d_in,d_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
squareFunc<<<gridBlock,threadBlock>>>(d_in,d_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c4b39b9bd1bcca0a9f664c5f427e2d1a4743b239.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
__global__ void mul64(double* A, double* B, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = A[idx] * B[idx];
}
#ifdef __cplusplus
}
#endif
|
c4b39b9bd1bcca0a9f664c5f427e2d1a4743b239.cu
|
#ifdef __cplusplus
extern "C" {
#endif
__global__ void mul64(double* A, double* B, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = A[idx] * B[idx];
}
#ifdef __cplusplus
}
#endif
|
926c6ed96befc0bf86883ddd48be278b2690671e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w1a8(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<M_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j));
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// for(int i=0; i<N_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j));
// }
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/8) * 64;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/8) * 8;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT;
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + (warpId-4)*2*X_bit_offset];
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
if (warpId < 4) {
lane_ptr += 8*ROW_BIT*4;
} else {
lane_ptr += X_bit_offset*4;
}
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<128; i++) {
// printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=62; i<64; i++) {
// for(int j=0; j<64; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/8 * 64 + threadIdx.x%8;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val = 0;
int multiplier = 1;
#pragma unroll
for(int j=0; j<8; j++) {
int tmp = *(shmem_warp_stream_ptr+8*j);
val += (multiplier*tmp);
multiplier *= 2;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/8)*N_GLOBAL + threadIdx.x%8;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
D[gmem_idx] = val;
idx += 32*64;
shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
val = 0;
multiplier = 1;
#pragma unroll
for(int j=0; j<8; j++) {
int tmp = *(shmem_warp_stream_ptr+8*j);
val += (multiplier*tmp);
multiplier *= 2;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
gmem_idx += 32*N_GLOBAL;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
D[gmem_idx] = val;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
if (dst > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
if (dst > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int X_BIT = 8;
int W_BIT = 1;
int M_GLOBAL = 64;
for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) {
int K_GLOBAL = N_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
apmm_w1a8, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (apmm_w1a8), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V77, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(hipFree(reinterpret_cast<void *>(W)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(X)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
926c6ed96befc0bf86883ddd48be278b2690671e.cu
|
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w1a8(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<M_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j));
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// for(int i=0; i<N_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j));
// }
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/8) * 64;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/8) * 8;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT;
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + (warpId-4)*2*X_bit_offset];
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
if (warpId < 4) {
lane_ptr += 8*ROW_BIT*4;
} else {
lane_ptr += X_bit_offset*4;
}
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<128; i++) {
// printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=62; i<64; i++) {
// for(int j=0; j<64; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/8 * 64 + threadIdx.x%8;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val = 0;
int multiplier = 1;
#pragma unroll
for(int j=0; j<8; j++) {
int tmp = *(shmem_warp_stream_ptr+8*j);
val += (multiplier*tmp);
multiplier *= 2;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/8)*N_GLOBAL + threadIdx.x%8;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
D[gmem_idx] = val;
idx += 32*64;
shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
val = 0;
multiplier = 1;
#pragma unroll
for(int j=0; j<8; j++) {
int tmp = *(shmem_warp_stream_ptr+8*j);
val += (multiplier*tmp);
multiplier *= 2;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
gmem_idx += 32*N_GLOBAL;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
D[gmem_idx] = val;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
if (dst > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
if (dst > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int X_BIT = 8;
int W_BIT = 1;
int M_GLOBAL = 64;
for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) {
int K_GLOBAL = N_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
apmm_w1a8, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(apmm_w1a8<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V77, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(cudaFree(reinterpret_cast<void *>(W)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(X)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
f101e25071cc052ac5ac560ff263316d9a553555.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <set>
#include <fstream>
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <math.h>
#include <random>
#include <chrono>
#include <ratio>
#include <thread>
#include <mutex>
//#define MODULUS_PRIME 1073741827// 30 bit prime
#define MODULUS_PRIME 536870909 //29 bit prime
//#define MODULUS_PRIME 9973 //14 bit prime
//#define MODULUS_PRIME 11 //4 bit prime
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD);
static const int numberOfThreads = 500;
std::mutex mtxKernel;
void generateQVector(unsigned *queryVector, int p){
int i;
for(i=0;i<p;i++){
queryVector[i] = rand() % MODULUS_PRIME + 1;
}
}
void printMatrix(int **a,int r, int c) {
int i=0,j=0;
for(;i<r;i++){
for(j=0;j<c;j++){
printf("%d ",a[i][j]);
}
printf("\n");
}
printf("\n");
}
void printVector(unsigned *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
void printVector2(unsigned long long int *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
int checkIfEqual(unsigned long long int *resultSparseVectorValueArray, unsigned long long int *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
int checkIfEqual2(unsigned *resultSparseVectorValueArray, unsigned *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
__global__ void dvsmMultKernel(int lengthOfResultVectorReduced, int *columnPtr,int *rowIndex,unsigned *valueArray,
unsigned *queryVector,unsigned long long int *resultSparseVectorValueArray){
int col = blockDim.x * blockIdx.x + threadIdx.x;
if(col<lengthOfResultVectorReduced){
unsigned long long int temp = 0;
int j;
for(j=columnPtr[col];j<columnPtr[col+1];j++) {
temp += valueArray[j]*queryVector[rowIndex[j]];
}
resultSparseVectorValueArray[col]= temp % MODULUS_PRIME; //mul_m(1,temp,MODULUS_PRIME,INVK);//
}
}
int main()
{
std::ofstream myfile;
myfile.open ("testDataStatNOPsE.txt");
srand (time(NULL));
const long max_u = 16, r = 1L << 18;
for (long p = 2; p <= r; p <<= 1)
{
for (long u = 1; u <= max_u; ++u)
{
//top:
std::cout << "************************************************************************\n";
std::cout << "p: " << p << "; r: " << r << "; u: " << u << "\n";
myfile << "************************************************************************\n";
myfile << "p: " << p << "; r: " << r << "; u: " << u << "\n";
// long k = 0;
std::vector<std::set<long>> cols;
std::vector<std::set<long>*> cols2;
cols.resize(r);
for (auto it = begin(cols); it != end(cols); ++it) cols2.push_back(&(*it));
for (long i = 1; i <= p; ++i)
{
for (long j = 1; j <= u; )
{
long c = rand() % cols2.size();
if (cols2[c]->size() < u && cols2[c]->insert(i).second)
{
j++;
}
else
{
long a = rand() % r;
if (cols[a].size() > 0 && cols[a].find(i) == end(cols[a]))
{
auto elt = begin(cols[a]);
std::advance(elt, rand() % cols[a].size());
long tmp = *elt;
if (cols2[c]->find(tmp) == end(*(cols2[c])))
{
cols[a].erase(elt);
cols[a].insert(i);
cols2[c]->insert(tmp);
j++;
}
}
}
if (cols2[c]->size() == u) cols2.erase(begin(cols2) + c);
}
}
int numberOfNonZeroElements = p*u;
int lengthOfColumnPtr = r+1;
unsigned *valueArray = (unsigned*)malloc(sizeof(unsigned)*numberOfNonZeroElements);
int *rowIndex = (int*)malloc(sizeof(int)*numberOfNonZeroElements);
int *columnPtr = (int*)malloc(sizeof(int)*(lengthOfColumnPtr));
//std::cout << "\nval (" << numberOfNonZeroElements << "): ";
for (long i = 0; i < p * u; i++) {
valueArray[i] = (rand() % MODULUS_PRIME);
//std::cout << valueArray[i] << ",";
}
//std::cout << "\n\nRow: ";
int t=0;
int sum=0;
columnPtr[0] = 0;
int lengthOfCPReduced = 0;
for (int i = 0; i < r; ++i)
{
for (auto it = begin(cols[i]); it != end(cols[i]); ++it)
{
rowIndex[t++] = (*it)-1;
//std::cout << rowIndex[t-1] << ",";
}
if (cols[i].size())
{
columnPtr[lengthOfCPReduced+1]=columnPtr[lengthOfCPReduced]+cols[i].size();
lengthOfCPReduced++;
}
sum+=cols[i].size();
}
//std::cout << "\n\nCol (" << cols.size() <<"): ";
/*
* CUDA started
*
**/
int lengthOfResultVectorReduced = lengthOfCPReduced-1;
int THREADSPERBLOCKDVSM = lengthOfResultVectorReduced < 1024 ? lengthOfResultVectorReduced : 1024;
int NDVSM = (lengthOfResultVectorReduced+THREADSPERBLOCKDVSM-1) / THREADSPERBLOCKDVSM;
unsigned long long int *resultSparseVectorValueArrayDD = (unsigned long long int *)malloc(sizeof(unsigned long long int)*lengthOfResultVectorReduced*numberOfThreads);
unsigned long long int *resultSparseVectorValueArray_d;
unsigned *queryVector = (unsigned*)malloc(sizeof(unsigned)*p*numberOfThreads);
int *rowIndex_d, *columnPtr_d;
unsigned *valueArray_d, *queryVector_d;
hipMalloc((void**)&valueArray_d,(numberOfNonZeroElements*sizeof(unsigned)));
hipMalloc((void**)&rowIndex_d,(numberOfNonZeroElements*sizeof(int)));
hipMalloc((void**)&columnPtr_d,(lengthOfCPReduced)*sizeof(int));
hipMalloc((void**)&queryVector_d,numberOfThreads*p*sizeof(unsigned));
hipMalloc((void**)&resultSparseVectorValueArray_d,(numberOfThreads*lengthOfResultVectorReduced*sizeof(unsigned long long int)));
hipMemcpy( valueArray_d, valueArray, numberOfNonZeroElements*sizeof(unsigned), hipMemcpyHostToDevice );
hipMemcpy( rowIndex_d, rowIndex, numberOfNonZeroElements*sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( columnPtr_d, columnPtr, lengthOfCPReduced*sizeof(int), hipMemcpyHostToDevice );
unsigned long numberOfOps;
generateQVector(queryVector,p*numberOfThreads);
std::thread thrds[numberOfThreads];
std::chrono::duration<int,std::nano> timeSpend;
std::chrono::nanoseconds zeroSec{0};
timeSpend = zeroSec;
//std::chrono::nanoseconds nsInOneSec{1000000000};
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for(numberOfOps=0; numberOfOps < numberOfThreads; numberOfOps++){
runKernel(queryVector+numberOfOps,p,queryVector_d+numberOfOps,NDVSM,THREADSPERBLOCKDVSM,lengthOfResultVectorReduced,columnPtr_d,rowIndex_d,valueArray_d,resultSparseVectorValueArray_d+numberOfOps,resultSparseVectorValueArrayDD+numberOfOps);
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<int,std::nano> time_span = std::chrono::duration_cast<std::chrono::duration<int,std::nano>>(t2 - t1);
timeSpend = timeSpend + time_span ;
std::cout << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
myfile << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
hipFree( valueArray_d );
hipFree( rowIndex_d );
hipFree( columnPtr_d );
hipFree( queryVector_d );
hipFree( resultSparseVectorValueArray_d );
free(queryVector);
free(resultSparseVectorValueArrayDD);
free(valueArray);
free(rowIndex);
free(columnPtr);
/*
* CUDA finished
*
**/ }
}
myfile.close();
}
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD) {
hipMemcpy( queryVector_d, queryVector, p*sizeof(unsigned), hipMemcpyHostToDevice );
// mtxKernel.lock();
hipLaunchKernelGGL(( dvsmMultKernel), dim3(NDVSM), dim3(THREADSPERBLOCKDVSM), 0, 0, lengthOfResultVectorReduced,columnPtr_d, rowIndex_d, valueArray_d, queryVector_d, resultSparseVectorValueArray_d);
// mtxKernel.unlock();
hipMemcpy( resultSparseVectorValueArrayDD, resultSparseVectorValueArray_d, (lengthOfResultVectorReduced*sizeof(unsigned long long int)), hipMemcpyDeviceToHost );
}
|
f101e25071cc052ac5ac560ff263316d9a553555.cu
|
#include <iostream>
#include <stdlib.h>
#include <set>
#include <fstream>
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <math.h>
#include <random>
#include <chrono>
#include <ratio>
#include <thread>
#include <mutex>
//#define MODULUS_PRIME 1073741827// 30 bit prime
#define MODULUS_PRIME 536870909 //29 bit prime
//#define MODULUS_PRIME 9973 //14 bit prime
//#define MODULUS_PRIME 11 //4 bit prime
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD);
static const int numberOfThreads = 500;
std::mutex mtxKernel;
void generateQVector(unsigned *queryVector, int p){
int i;
for(i=0;i<p;i++){
queryVector[i] = rand() % MODULUS_PRIME + 1;
}
}
void printMatrix(int **a,int r, int c) {
int i=0,j=0;
for(;i<r;i++){
for(j=0;j<c;j++){
printf("%d ",a[i][j]);
}
printf("\n");
}
printf("\n");
}
void printVector(unsigned *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
void printVector2(unsigned long long int *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
int checkIfEqual(unsigned long long int *resultSparseVectorValueArray, unsigned long long int *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
int checkIfEqual2(unsigned *resultSparseVectorValueArray, unsigned *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
__global__ void dvsmMultKernel(int lengthOfResultVectorReduced, int *columnPtr,int *rowIndex,unsigned *valueArray,
unsigned *queryVector,unsigned long long int *resultSparseVectorValueArray){
int col = blockDim.x * blockIdx.x + threadIdx.x;
if(col<lengthOfResultVectorReduced){
unsigned long long int temp = 0;
int j;
for(j=columnPtr[col];j<columnPtr[col+1];j++) {
temp += valueArray[j]*queryVector[rowIndex[j]];
}
resultSparseVectorValueArray[col]= temp % MODULUS_PRIME; //mul_m(1,temp,MODULUS_PRIME,INVK);//
}
}
int main()
{
std::ofstream myfile;
myfile.open ("testDataStatNOPsE.txt");
srand (time(NULL));
const long max_u = 16, r = 1L << 18;
for (long p = 2; p <= r; p <<= 1)
{
for (long u = 1; u <= max_u; ++u)
{
//top:
std::cout << "************************************************************************\n";
std::cout << "p: " << p << "; r: " << r << "; u: " << u << "\n";
myfile << "************************************************************************\n";
myfile << "p: " << p << "; r: " << r << "; u: " << u << "\n";
// long k = 0;
std::vector<std::set<long>> cols;
std::vector<std::set<long>*> cols2;
cols.resize(r);
for (auto it = begin(cols); it != end(cols); ++it) cols2.push_back(&(*it));
for (long i = 1; i <= p; ++i)
{
for (long j = 1; j <= u; )
{
long c = rand() % cols2.size();
if (cols2[c]->size() < u && cols2[c]->insert(i).second)
{
j++;
}
else
{
long a = rand() % r;
if (cols[a].size() > 0 && cols[a].find(i) == end(cols[a]))
{
auto elt = begin(cols[a]);
std::advance(elt, rand() % cols[a].size());
long tmp = *elt;
if (cols2[c]->find(tmp) == end(*(cols2[c])))
{
cols[a].erase(elt);
cols[a].insert(i);
cols2[c]->insert(tmp);
j++;
}
}
}
if (cols2[c]->size() == u) cols2.erase(begin(cols2) + c);
}
}
int numberOfNonZeroElements = p*u;
int lengthOfColumnPtr = r+1;
unsigned *valueArray = (unsigned*)malloc(sizeof(unsigned)*numberOfNonZeroElements);
int *rowIndex = (int*)malloc(sizeof(int)*numberOfNonZeroElements);
int *columnPtr = (int*)malloc(sizeof(int)*(lengthOfColumnPtr));
//std::cout << "\nval (" << numberOfNonZeroElements << "): ";
for (long i = 0; i < p * u; i++) {
valueArray[i] = (rand() % MODULUS_PRIME);
//std::cout << valueArray[i] << ",";
}
//std::cout << "\n\nRow: ";
int t=0;
int sum=0;
columnPtr[0] = 0;
int lengthOfCPReduced = 0;
for (int i = 0; i < r; ++i)
{
for (auto it = begin(cols[i]); it != end(cols[i]); ++it)
{
rowIndex[t++] = (*it)-1;
//std::cout << rowIndex[t-1] << ",";
}
if (cols[i].size())
{
columnPtr[lengthOfCPReduced+1]=columnPtr[lengthOfCPReduced]+cols[i].size();
lengthOfCPReduced++;
}
sum+=cols[i].size();
}
//std::cout << "\n\nCol (" << cols.size() <<"): ";
/*
* CUDA started
*
**/
int lengthOfResultVectorReduced = lengthOfCPReduced-1;
int THREADSPERBLOCKDVSM = lengthOfResultVectorReduced < 1024 ? lengthOfResultVectorReduced : 1024;
int NDVSM = (lengthOfResultVectorReduced+THREADSPERBLOCKDVSM-1) / THREADSPERBLOCKDVSM;
unsigned long long int *resultSparseVectorValueArrayDD = (unsigned long long int *)malloc(sizeof(unsigned long long int)*lengthOfResultVectorReduced*numberOfThreads);
unsigned long long int *resultSparseVectorValueArray_d;
unsigned *queryVector = (unsigned*)malloc(sizeof(unsigned)*p*numberOfThreads);
int *rowIndex_d, *columnPtr_d;
unsigned *valueArray_d, *queryVector_d;
cudaMalloc((void**)&valueArray_d,(numberOfNonZeroElements*sizeof(unsigned)));
cudaMalloc((void**)&rowIndex_d,(numberOfNonZeroElements*sizeof(int)));
cudaMalloc((void**)&columnPtr_d,(lengthOfCPReduced)*sizeof(int));
cudaMalloc((void**)&queryVector_d,numberOfThreads*p*sizeof(unsigned));
cudaMalloc((void**)&resultSparseVectorValueArray_d,(numberOfThreads*lengthOfResultVectorReduced*sizeof(unsigned long long int)));
cudaMemcpy( valueArray_d, valueArray, numberOfNonZeroElements*sizeof(unsigned), cudaMemcpyHostToDevice );
cudaMemcpy( rowIndex_d, rowIndex, numberOfNonZeroElements*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( columnPtr_d, columnPtr, lengthOfCPReduced*sizeof(int), cudaMemcpyHostToDevice );
unsigned long numberOfOps;
generateQVector(queryVector,p*numberOfThreads);
std::thread thrds[numberOfThreads];
std::chrono::duration<int,std::nano> timeSpend;
std::chrono::nanoseconds zeroSec{0};
timeSpend = zeroSec;
//std::chrono::nanoseconds nsInOneSec{1000000000};
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for(numberOfOps=0; numberOfOps < numberOfThreads; numberOfOps++){
runKernel(queryVector+numberOfOps,p,queryVector_d+numberOfOps,NDVSM,THREADSPERBLOCKDVSM,lengthOfResultVectorReduced,columnPtr_d,rowIndex_d,valueArray_d,resultSparseVectorValueArray_d+numberOfOps,resultSparseVectorValueArrayDD+numberOfOps);
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<int,std::nano> time_span = std::chrono::duration_cast<std::chrono::duration<int,std::nano>>(t2 - t1);
timeSpend = timeSpend + time_span ;
std::cout << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
myfile << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
cudaFree( valueArray_d );
cudaFree( rowIndex_d );
cudaFree( columnPtr_d );
cudaFree( queryVector_d );
cudaFree( resultSparseVectorValueArray_d );
free(queryVector);
free(resultSparseVectorValueArrayDD);
free(valueArray);
free(rowIndex);
free(columnPtr);
/*
* CUDA finished
*
**/ }
}
myfile.close();
}
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD) {
cudaMemcpy( queryVector_d, queryVector, p*sizeof(unsigned), cudaMemcpyHostToDevice );
// mtxKernel.lock();
dvsmMultKernel<<< NDVSM, THREADSPERBLOCKDVSM>>>(lengthOfResultVectorReduced,columnPtr_d, rowIndex_d, valueArray_d, queryVector_d, resultSparseVectorValueArray_d);
// mtxKernel.unlock();
cudaMemcpy( resultSparseVectorValueArrayDD, resultSparseVectorValueArray_d, (lengthOfResultVectorReduced*sizeof(unsigned long long int)), cudaMemcpyDeviceToHost );
}
|
a169985224628bcdc13acc7dcb8c79a7f659ebfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define BLOCK_SIZE 8
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
int ceil(int a, int b){
return int((a + b - 1) / b);
}
// Compute C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float PValue = 0;
if(row < numCColumns && col < numCColumns){
for(int j = 0; j < numAColumns; j ++){
PValue += A[row * numAColumns + j] * B[j * numBColumns + col];
}
C[row * numCColumns + col] = PValue;
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *) malloc(sizeof(float) * numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceA, sizeof(float) * numAColumns * numARows);
hipMalloc((void**) &deviceB, sizeof(float) * numBColumns * numBRows);
hipMalloc((void**) &deviceC, sizeof(float) * numCColumns * numCRows);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA, hostA, sizeof(float) * numAColumns * numARows, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(float) * numBColumns * numBRows, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(numCColumns * numCRows, BLOCK_SIZE), ceil(numCColumns * numCRows, BLOCK_SIZE), 1);
dim3 DimBlock(BLOCK_SIZE, BLOCK_SIZE,1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiply), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, sizeof(float) * numCColumns * numCRows, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
float res = 0;
for(int index = 0; index < numAColumns; index ++){
res += hostA[index] * hostB[index * numBColumns];
}
printf("res is \t %f\n", res);
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
a169985224628bcdc13acc7dcb8c79a7f659ebfa.cu
|
#include <wb.h>
#define BLOCK_SIZE 8
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
int ceil(int a, int b){
return int((a + b - 1) / b);
}
// Compute C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float PValue = 0;
if(row < numCColumns && col < numCColumns){
for(int j = 0; j < numAColumns; j ++){
PValue += A[row * numAColumns + j] * B[j * numBColumns + col];
}
C[row * numCColumns + col] = PValue;
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *) malloc(sizeof(float) * numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceA, sizeof(float) * numAColumns * numARows);
cudaMalloc((void**) &deviceB, sizeof(float) * numBColumns * numBRows);
cudaMalloc((void**) &deviceC, sizeof(float) * numCColumns * numCRows);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA, sizeof(float) * numAColumns * numARows, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float) * numBColumns * numBRows, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(numCColumns * numCRows, BLOCK_SIZE), ceil(numCColumns * numCRows, BLOCK_SIZE), 1);
dim3 DimBlock(BLOCK_SIZE, BLOCK_SIZE,1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiply<<<DimGrid, DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, sizeof(float) * numCColumns * numCRows, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
float res = 0;
for(int index = 0; index < numAColumns; index ++){
res += hostA[index] * hostB[index * numBColumns];
}
printf("res is \t %f\n", res);
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
f686a4b88f01a44ee580329f03822e6833c1df35.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ParticlesActions.h"
__global__ void set_pr_kernel(int offset, Particle3D* particles, double *dp)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
particles[a].set_pr(dp[a]);
}
__global__ void external_force_kernel(int offset, Particle3D* particles, double *dt, double g)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
if (particles[a].is_fluid()){
particles[a].set_v(particles[a].get_v().x, particles[a].get_v().y - g*dt[0], (particles[a].get_v().z));
}
}
__global__ void mov_part_kernel(int offset, Particle3D* particles, double *dt)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
if ((particles)[a].is_fluid()){
(particles)[a].set_po((particles)[a].get_po().x + (particles)[a].get_v().x*dt[0], (particles)[a].get_po().y + (particles)[a].get_v().y*dt[0], (particles)[a].get_po().z + (particles)[a].get_v().z*dt[0]);
}
}
__global__ void collision_kernel(int offset, Particle3D* particles, double *dt, int *nei_1D, double radius, double Density, int nump)
{
unsigned int i = offset + (blockDim.x * blockIdx.x + threadIdx.x);
double dx = 0.0, dy = 0.0, dz = 0.0, dist = 0.0;
double vg[3], vr[3], vbas, vm[3];
double m1 = 0.0, m2 = 0.0;
Point3D Part_j, Part_i, Velocity_j, Velocity_i;
int j = 0;;
m1 = Density;
for (int a = 1; a <= nei_1D[(i*nump) + 0]; a++)
{
j = nei_1D[(i*nump) + a];
if (j <= i)
continue;
Part_j = (particles)[j].get_po();
Part_i = (particles)[i].get_po();
dx = Part_j.x - Part_i.x;
dy = Part_j.y - Part_i.y;
dz = Part_j.z - Part_i.z;
dist = dx*dx + dy*dy + dz*dz;
if (dist<radius)
{
Velocity_j = (particles)[j].get_v();
Velocity_i = (particles)[i].get_v();
dist = sqrt(dist);
m2 = Density;
vg[0] = (m1*Velocity_i.x + m2*Velocity_j.x) / (m1 + m2);
vg[1] = (m1*Velocity_i.y + m2*Velocity_j.y) / (m1 + m2);
vg[2] = (m1*Velocity_i.z + m2*Velocity_j.z) / (m1 + m2);
vr[0] = m1*(Velocity_i.x - vg[0]);
vr[1] = m1*(Velocity_i.y - vg[1]);
vr[2] = m1*(Velocity_i.z - vg[2]);
vbas = (vr[0] * dx + vr[1] * dy + vr[2] * dz) / dist;
if (vbas<0.0)continue;
vm[0] = (1.2)*vbas*dx / dist;
vm[1] = (1.2)*vbas*dy / dist;
vm[2] = (1.2)*vbas*dz / dist;
if ((particles)[i].is_fluid())
{
(particles)[i].set_v(Velocity_i.x - vm[0] / m1, Velocity_i.y - vm[1] / m1, Velocity_i.z - vm[2] / m1);
(particles)[i].set_po(Part_i.x - dt[0] * vm[0] / m1, Part_i.y - dt[0] * vm[1] / m1, Part_i.z - dt[0] * vm[2] / m1);
}
if ((particles)[j].is_fluid())
{
(particles)[j].set_v(Velocity_j.x + vm[0] / m2, Velocity_j.y + vm[1] / m2, Velocity_j.z + vm[2] / m2);
(particles)[j].set_po(Part_j.x + dt[0] * vm[0] / m2, Part_j.y + dt[0] * vm[1] / m2, Part_j.z + dt[0] * vm[2] / m2);
}
}
}
}
//void ParticlesActions::external_force_cu(Particle2D* particles, double dt, double g, int nump){
//
// Particle2D *particles_d = NULL;
// hipError_t err = hipSuccess;
// err = hipMalloc((void **)&particles_d, nump*sizeof(Particle2D));
//
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to allocate device particles (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// int division = nump / 1024;
// int mod = nump - (division * 1024);
// err = hipMemcpy(particles_d, particles, nump*sizeof(Particle2D), hipMemcpyHostToDevice);
//
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy particles from host to device (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// external_force_kernel << <division, 1024 >> >(0, particles_d, dt, g);
// external_force_kernel << <1, mod >> >(division * 1024, particles_d, dt, g);
//
// err = hipGetLastError();
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to launch external force kernel (error code %s)!\n", hipGetErrorString(err));
// system("pause");
// exit(EXIT_FAILURE);
// }
//
// err = hipMemcpy(particles, particles_d, nump*sizeof(Particle2D), hipMemcpyDeviceToHost);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy particles from device to host (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// err = hipFree(particles_d);
//
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// err = hipDeviceReset();
//
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// //return particles;
//}
|
f686a4b88f01a44ee580329f03822e6833c1df35.cu
|
#include "ParticlesActions.h"
__global__ void set_pr_kernel(int offset, Particle3D* particles, double *dp)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
particles[a].set_pr(dp[a]);
}
__global__ void external_force_kernel(int offset, Particle3D* particles, double *dt, double g)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
if (particles[a].is_fluid()){
particles[a].set_v(particles[a].get_v().x, particles[a].get_v().y - g*dt[0], (particles[a].get_v().z));
}
}
__global__ void mov_part_kernel(int offset, Particle3D* particles, double *dt)
{
unsigned int a = offset + (blockDim.x * blockIdx.x + threadIdx.x);
if ((particles)[a].is_fluid()){
(particles)[a].set_po((particles)[a].get_po().x + (particles)[a].get_v().x*dt[0], (particles)[a].get_po().y + (particles)[a].get_v().y*dt[0], (particles)[a].get_po().z + (particles)[a].get_v().z*dt[0]);
}
}
__global__ void collision_kernel(int offset, Particle3D* particles, double *dt, int *nei_1D, double radius, double Density, int nump)
{
unsigned int i = offset + (blockDim.x * blockIdx.x + threadIdx.x);
double dx = 0.0, dy = 0.0, dz = 0.0, dist = 0.0;
double vg[3], vr[3], vbas, vm[3];
double m1 = 0.0, m2 = 0.0;
Point3D Part_j, Part_i, Velocity_j, Velocity_i;
int j = 0;;
m1 = Density;
for (int a = 1; a <= nei_1D[(i*nump) + 0]; a++)
{
j = nei_1D[(i*nump) + a];
if (j <= i)
continue;
Part_j = (particles)[j].get_po();
Part_i = (particles)[i].get_po();
dx = Part_j.x - Part_i.x;
dy = Part_j.y - Part_i.y;
dz = Part_j.z - Part_i.z;
dist = dx*dx + dy*dy + dz*dz;
if (dist<radius)
{
Velocity_j = (particles)[j].get_v();
Velocity_i = (particles)[i].get_v();
dist = sqrt(dist);
m2 = Density;
vg[0] = (m1*Velocity_i.x + m2*Velocity_j.x) / (m1 + m2);
vg[1] = (m1*Velocity_i.y + m2*Velocity_j.y) / (m1 + m2);
vg[2] = (m1*Velocity_i.z + m2*Velocity_j.z) / (m1 + m2);
vr[0] = m1*(Velocity_i.x - vg[0]);
vr[1] = m1*(Velocity_i.y - vg[1]);
vr[2] = m1*(Velocity_i.z - vg[2]);
vbas = (vr[0] * dx + vr[1] * dy + vr[2] * dz) / dist;
if (vbas<0.0)continue;
vm[0] = (1.2)*vbas*dx / dist;
vm[1] = (1.2)*vbas*dy / dist;
vm[2] = (1.2)*vbas*dz / dist;
if ((particles)[i].is_fluid())
{
(particles)[i].set_v(Velocity_i.x - vm[0] / m1, Velocity_i.y - vm[1] / m1, Velocity_i.z - vm[2] / m1);
(particles)[i].set_po(Part_i.x - dt[0] * vm[0] / m1, Part_i.y - dt[0] * vm[1] / m1, Part_i.z - dt[0] * vm[2] / m1);
}
if ((particles)[j].is_fluid())
{
(particles)[j].set_v(Velocity_j.x + vm[0] / m2, Velocity_j.y + vm[1] / m2, Velocity_j.z + vm[2] / m2);
(particles)[j].set_po(Part_j.x + dt[0] * vm[0] / m2, Part_j.y + dt[0] * vm[1] / m2, Part_j.z + dt[0] * vm[2] / m2);
}
}
}
}
//void ParticlesActions::external_force_cu(Particle2D* particles, double dt, double g, int nump){
//
// Particle2D *particles_d = NULL;
// cudaError_t err = cudaSuccess;
// err = cudaMalloc((void **)&particles_d, nump*sizeof(Particle2D));
//
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to allocate device particles (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// int division = nump / 1024;
// int mod = nump - (division * 1024);
// err = cudaMemcpy(particles_d, particles, nump*sizeof(Particle2D), cudaMemcpyHostToDevice);
//
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy particles from host to device (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// external_force_kernel << <division, 1024 >> >(0, particles_d, dt, g);
// external_force_kernel << <1, mod >> >(division * 1024, particles_d, dt, g);
//
// err = cudaGetLastError();
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to launch external force kernel (error code %s)!\n", cudaGetErrorString(err));
// system("pause");
// exit(EXIT_FAILURE);
// }
//
// err = cudaMemcpy(particles, particles_d, nump*sizeof(Particle2D), cudaMemcpyDeviceToHost);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy particles from device to host (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// err = cudaFree(particles_d);
//
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// err = cudaDeviceReset();
//
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//
// //return particles;
//}
|
a3987b5fea69a825409c6555c5feb7dade1ce142.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <random>
#include <vector>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <linalg/batched/matrix.cuh>
#include <raft/linalg/add.cuh>
#include "../linalg_naive.h"
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
enum MatrixOperation {
AB_op, // Matrix-matrix product (with GEMM)
AZT_op, // Matrix-vector product (with GEMM)
ZA_op, // Vector-matrix product (with GEMM)
ApB_op, // Addition
AmB_op, // Substraction
AkB_op, // Kronecker product
AsolveZ_op, // Linear equation solver Ax=b
LaggedZ_op, // Lag matrix
CopyA2D_op, // 2D copy
DiffA_op, // Vector first difference
Hessenberg_op, // Hessenberg decomposition A=UHU'
Schur_op, // Schur decomposition A=USU'
Lyapunov_op, // Lyapunov equation solver AXA'-X+B=0
};
template <typename T>
struct MatrixInputs {
MatrixOperation operation;
int batch_size;
int m; // Usually the dimensions of A and/or Z
int n;
int p; // Usually the dimensions of B or other parameters
int q;
int s; // Additional parameters for operations that need more than 4
int t;
T tolerance;
};
template <typename T>
class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam();
// Find out whether A, B and Z will be used (depending on the operation)
bool use_A = (params.operation != LaggedZ_op);
bool use_B = (params.operation == AB_op) || (params.operation == ApB_op) ||
(params.operation == AmB_op) || (params.operation == AkB_op) ||
(params.operation == Lyapunov_op);
bool use_Z = (params.operation == AZT_op) || (params.operation == ZA_op) ||
(params.operation == AsolveZ_op) ||
(params.operation == LaggedZ_op);
bool Z_col = (params.operation == AsolveZ_op);
int r = params.operation == AZT_op ? params.n : params.m;
// Check if the dimensions are valid and compute the output dimensions
int m_r, n_r;
switch (params.operation) {
case AB_op:
ASSERT_TRUE(params.n == params.p);
m_r = params.m;
n_r = params.q;
break;
case ApB_op:
case AmB_op:
ASSERT_TRUE(params.m == params.p && params.n == params.q);
m_r = params.m;
n_r = params.n;
break;
case AkB_op:
m_r = params.m * params.p;
n_r = params.n * params.q;
break;
case AZT_op:
m_r = params.m;
n_r = 1;
break;
case ZA_op:
m_r = 1;
n_r = params.n;
break;
case AsolveZ_op:
ASSERT_TRUE(params.n == params.m);
// For this test we multiply A by the solution and check against Z
m_r = params.m;
n_r = 1;
break;
case LaggedZ_op:
// For this operation params.n holds the number of lags
m_r = params.m - params.n;
n_r = params.n;
break;
case CopyA2D_op:
// For this operation p and q are the dimensions of the copy window
m_r = params.p;
n_r = params.q;
break;
case DiffA_op:
// Note: A can represent either a row or column vector
ASSERT_TRUE(params.m == 1 || params.n == 1);
m_r = ::max(1, params.m - 1);
n_r = ::max(1, params.n - 1);
break;
case Hessenberg_op:
case Schur_op:
case Lyapunov_op:
ASSERT_TRUE(params.m == params.n && params.m == params.p &&
params.m == params.q);
m_r = params.m;
n_r = params.m;
break;
}
// Create test matrices and vector
std::vector<T> A;
std::vector<T> B;
std::vector<T> Z;
if (use_A) A.resize(params.batch_size * params.m * params.n);
if (use_B) B.resize(params.batch_size * params.p * params.q);
if (use_Z) Z.resize(params.batch_size * r);
// Generate random data
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> udis(-1.0, 3.0);
for (int i = 0; i < A.size(); i++) A[i] = udis(gen);
for (int i = 0; i < B.size(); i++) B[i] = udis(gen);
for (int i = 0; i < Z.size(); i++) Z[i] = udis(gen);
// Create handles, stream, allocator
CUBLAS_CHECK(hipblasCreate(&handle));
CUDA_CHECK(hipStreamCreate(&stream));
auto allocator = std::make_shared<raft::mr::device::default_allocator>();
// Created batched matrices
Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator,
stream);
Matrix<T> BbM(params.p, params.q, params.batch_size, handle, allocator,
stream);
Matrix<T> ZbM(Z_col ? r : 1, Z_col ? 1 : r, params.batch_size, handle,
allocator, stream);
// Copy the data to the device
if (use_A) raft::update_device(AbM.raw_data(), A.data(), A.size(), stream);
if (use_B) raft::update_device(BbM.raw_data(), B.data(), B.size(), stream);
if (use_Z) raft::update_device(ZbM.raw_data(), Z.data(), Z.size(), stream);
// Create fake batched matrices to be overwritten by results
res_bM = new Matrix<T>(1, 1, 1, handle, allocator, stream);
// Compute the tested results
switch (params.operation) {
case AB_op:
*res_bM = AbM * BbM;
break;
case ApB_op:
*res_bM = AbM + BbM;
break;
case AmB_op:
*res_bM = AbM - BbM;
break;
case AkB_op:
*res_bM = b_kron(AbM, BbM);
break;
case AZT_op:
*res_bM = b_gemm(AbM, ZbM, false, true);
break;
case ZA_op:
*res_bM = ZbM * AbM;
break;
case AsolveZ_op:
// A * A\Z -> should be Z
*res_bM = AbM * b_solve(AbM, ZbM);
break;
case LaggedZ_op:
*res_bM = b_lagged_mat(ZbM, params.n);
break;
case CopyA2D_op:
*res_bM = b_2dcopy(AbM, params.s, params.t, params.p, params.q);
break;
case DiffA_op:
*res_bM = AbM.difference();
break;
case Hessenberg_op: {
constexpr T zero_tolerance =
std::is_same<T, double>::value ? 1e-7 : 1e-3f;
int n = params.m;
Matrix<T> HbM(n, n, params.batch_size, handle, allocator, stream);
Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream);
b_hessenberg(AbM, UbM, HbM);
// Check that H is in Hessenberg form
std::vector<T> H = std::vector<T>(n * n * params.batch_size);
raft::update_host(H.data(), HbM.raw_data(), H.size(), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int j = 0; j < n - 2; j++) {
for (int i = j + 2; i < n; i++) {
ASSERT_TRUE(raft::abs(H[n * n * ib + n * j + i]) <
zero_tolerance);
}
}
}
// Check that U is unitary (UU'=I)
std::vector<T> UUt = std::vector<T>(n * n * params.batch_size);
raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(),
UUt.size(), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] -
(i == j ? (T)1 : (T)0)) < zero_tolerance);
}
}
}
// Write UHU' in the result (will be compared against A)
*res_bM = UbM * b_gemm(HbM, UbM, false, true);
break;
}
case Schur_op: {
constexpr T zero_tolerance =
std::is_same<T, double>::value ? 1e-7 : 1e-3f;
int n = params.m;
Matrix<T> SbM(n, n, params.batch_size, handle, allocator, stream);
Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream);
b_schur(AbM, UbM, SbM);
// Check that S is in Schur form
std::vector<T> S = std::vector<T>(n * n * params.batch_size);
raft::update_host(S.data(), SbM.raw_data(), S.size(), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int j = 0; j < n - 2; j++) {
for (int i = j + 2; i < n; i++) {
ASSERT_TRUE(raft::abs(S[n * n * ib + n * j + i]) <
zero_tolerance);
}
}
}
for (int ib = 0; ib < params.batch_size; ib++) {
for (int k = 0; k < n - 3; k++) {
ASSERT_FALSE(
raft::abs(S[n * n * ib + n * k + k + 1]) > zero_tolerance &&
raft::abs(S[n * n * ib + n * (k + 1) + k + 2]) > zero_tolerance &&
raft::abs(S[n * n * ib + n * (k + 2) + k + 3]) > zero_tolerance);
}
}
// Check that U is unitary (UU'=I)
std::vector<T> UUt = std::vector<T>(n * n * params.batch_size);
raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(),
UUt.size(), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] -
(i == j ? (T)1 : (T)0)) < zero_tolerance);
}
}
}
// Write USU' in the result (will be compared against A)
*res_bM = UbM * b_gemm(SbM, UbM, false, true);
break;
}
case Lyapunov_op: {
Matrix<T> XbM = b_lyapunov(AbM, BbM);
// Write AXA'-X in the result (will be compared against -B)
*res_bM = AbM * b_gemm(XbM, AbM, false, true) - XbM;
break;
}
}
// Compute the expected results
res_h.resize(params.batch_size * m_r * n_r);
switch (params.operation) {
case AB_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
B.data() + bid * params.p * params.q, params.m,
params.n, params.q);
}
break;
case ApB_op:
Naive::add(res_h.data(), A.data(), B.data(), A.size());
break;
case AmB_op:
Naive::add(res_h.data(), A.data(), B.data(), A.size(), T(-1.0));
break;
case AkB_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::kronecker(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
B.data() + bid * params.p * params.q, params.m,
params.n, params.p, params.q);
}
break;
case AZT_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
Z.data() + bid * r, params.m, params.n, 1);
}
break;
case ZA_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r, Z.data() + bid * r,
A.data() + bid * params.m * params.n, 1, params.m,
params.n);
}
break;
case AsolveZ_op:
// Simply copy Z in the result
memcpy(res_h.data(), Z.data(), r * params.batch_size * sizeof(T));
break;
case LaggedZ_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::laggedMat(res_h.data() + bid * m_r * n_r,
Z.data() + bid * params.m, params.m, params.n);
}
break;
case CopyA2D_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::copy2D(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n, params.s,
params.t, params.m, m_r, n_r);
}
break;
case DiffA_op: {
int len = params.m * params.n;
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::diff(res_h.data() + bid * (len - 1), A.data() + bid * len,
len);
}
break;
}
case Hessenberg_op:
case Schur_op:
// Simply copy A (will be compared against UHU')
memcpy(res_h.data(), A.data(),
params.m * params.m * params.batch_size * sizeof(T));
break;
case Lyapunov_op:
// Simply copy -B (will be compared against AXA'-X)
for (int i = 0; i < params.m * params.m * params.batch_size; i++) {
res_h[i] = -B[i];
}
break;
}
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
delete res_bM;
CUBLAS_CHECK(hipblasDestroy(handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
MatrixInputs<T> params;
Matrix<T> *res_bM;
std::vector<T> res_h;
hipblasHandle_t handle;
hipStream_t stream;
};
// Test parameters (op, batch_size, m, n, p, q, s, t, tolerance)
const std::vector<MatrixInputs<double>> inputsd = {
{AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-6},
{AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-6},
{ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-6},
{ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-6},
{AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-6},
{AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-6},
{AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-6},
{AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-6},
{AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-6},
{LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-6},
{LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-6},
{CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-6},
{CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-6},
{DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-6},
{DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-6},
{Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-6},
{Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-6},
// {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-3},
// {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-3},
// {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2},
// {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2}
};
// Note: Schur and Lyapunov tests have had stability issues on CI so
// they are disabled temporarily. See issue:
// https://github.com/rapidsai/cuml/issues/1949
// Test parameters (op, batch_size, m, n, p, q, s, t, tolerance)
const std::vector<MatrixInputs<float>> inputsf = {
{AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-2},
{AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-2},
{ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-2},
{ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-2},
{AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-2},
{AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-2},
{AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-2},
{AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-2},
{AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-2},
{LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-5},
{LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-5},
{CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-5},
{CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-5},
{DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-2},
{DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-2},
{Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-2},
{Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-2},
// {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-2},
// {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-2},
// {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2},
// {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2}
};
// Note: Schur and Lyapunov operations don't give good precision for
// single-precision floating-point numbers yet...
using BatchedMatrixTestD = MatrixTest<double>;
using BatchedMatrixTestF = MatrixTest<float>;
TEST_P(BatchedMatrixTestD, Result) {
ASSERT_TRUE(raft::devArrMatchHost(
res_h.data(), res_bM->raw_data(), res_h.size(),
raft::CompareApprox<double>(params.tolerance), stream));
}
TEST_P(BatchedMatrixTestF, Result) {
ASSERT_TRUE(raft::devArrMatchHost(
res_h.data(), res_bM->raw_data(), res_h.size(),
raft::CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace LinAlg
} // namespace MLCommon
|
a3987b5fea69a825409c6555c5feb7dade1ce142.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <random>
#include <vector>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <linalg/batched/matrix.cuh>
#include <raft/linalg/add.cuh>
#include "../linalg_naive.h"
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
enum MatrixOperation {
AB_op, // Matrix-matrix product (with GEMM)
AZT_op, // Matrix-vector product (with GEMM)
ZA_op, // Vector-matrix product (with GEMM)
ApB_op, // Addition
AmB_op, // Substraction
AkB_op, // Kronecker product
AsolveZ_op, // Linear equation solver Ax=b
LaggedZ_op, // Lag matrix
CopyA2D_op, // 2D copy
DiffA_op, // Vector first difference
Hessenberg_op, // Hessenberg decomposition A=UHU'
Schur_op, // Schur decomposition A=USU'
Lyapunov_op, // Lyapunov equation solver AXA'-X+B=0
};
template <typename T>
struct MatrixInputs {
MatrixOperation operation;
int batch_size;
int m; // Usually the dimensions of A and/or Z
int n;
int p; // Usually the dimensions of B or other parameters
int q;
int s; // Additional parameters for operations that need more than 4
int t;
T tolerance;
};
template <typename T>
class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam();
// Find out whether A, B and Z will be used (depending on the operation)
bool use_A = (params.operation != LaggedZ_op);
bool use_B = (params.operation == AB_op) || (params.operation == ApB_op) ||
(params.operation == AmB_op) || (params.operation == AkB_op) ||
(params.operation == Lyapunov_op);
bool use_Z = (params.operation == AZT_op) || (params.operation == ZA_op) ||
(params.operation == AsolveZ_op) ||
(params.operation == LaggedZ_op);
bool Z_col = (params.operation == AsolveZ_op);
int r = params.operation == AZT_op ? params.n : params.m;
// Check if the dimensions are valid and compute the output dimensions
int m_r, n_r;
switch (params.operation) {
case AB_op:
ASSERT_TRUE(params.n == params.p);
m_r = params.m;
n_r = params.q;
break;
case ApB_op:
case AmB_op:
ASSERT_TRUE(params.m == params.p && params.n == params.q);
m_r = params.m;
n_r = params.n;
break;
case AkB_op:
m_r = params.m * params.p;
n_r = params.n * params.q;
break;
case AZT_op:
m_r = params.m;
n_r = 1;
break;
case ZA_op:
m_r = 1;
n_r = params.n;
break;
case AsolveZ_op:
ASSERT_TRUE(params.n == params.m);
// For this test we multiply A by the solution and check against Z
m_r = params.m;
n_r = 1;
break;
case LaggedZ_op:
// For this operation params.n holds the number of lags
m_r = params.m - params.n;
n_r = params.n;
break;
case CopyA2D_op:
// For this operation p and q are the dimensions of the copy window
m_r = params.p;
n_r = params.q;
break;
case DiffA_op:
// Note: A can represent either a row or column vector
ASSERT_TRUE(params.m == 1 || params.n == 1);
m_r = std::max(1, params.m - 1);
n_r = std::max(1, params.n - 1);
break;
case Hessenberg_op:
case Schur_op:
case Lyapunov_op:
ASSERT_TRUE(params.m == params.n && params.m == params.p &&
params.m == params.q);
m_r = params.m;
n_r = params.m;
break;
}
// Create test matrices and vector
std::vector<T> A;
std::vector<T> B;
std::vector<T> Z;
if (use_A) A.resize(params.batch_size * params.m * params.n);
if (use_B) B.resize(params.batch_size * params.p * params.q);
if (use_Z) Z.resize(params.batch_size * r);
// Generate random data
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> udis(-1.0, 3.0);
for (int i = 0; i < A.size(); i++) A[i] = udis(gen);
for (int i = 0; i < B.size(); i++) B[i] = udis(gen);
for (int i = 0; i < Z.size(); i++) Z[i] = udis(gen);
// Create handles, stream, allocator
CUBLAS_CHECK(cublasCreate(&handle));
CUDA_CHECK(cudaStreamCreate(&stream));
auto allocator = std::make_shared<raft::mr::device::default_allocator>();
// Created batched matrices
Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator,
stream);
Matrix<T> BbM(params.p, params.q, params.batch_size, handle, allocator,
stream);
Matrix<T> ZbM(Z_col ? r : 1, Z_col ? 1 : r, params.batch_size, handle,
allocator, stream);
// Copy the data to the device
if (use_A) raft::update_device(AbM.raw_data(), A.data(), A.size(), stream);
if (use_B) raft::update_device(BbM.raw_data(), B.data(), B.size(), stream);
if (use_Z) raft::update_device(ZbM.raw_data(), Z.data(), Z.size(), stream);
// Create fake batched matrices to be overwritten by results
res_bM = new Matrix<T>(1, 1, 1, handle, allocator, stream);
// Compute the tested results
switch (params.operation) {
case AB_op:
*res_bM = AbM * BbM;
break;
case ApB_op:
*res_bM = AbM + BbM;
break;
case AmB_op:
*res_bM = AbM - BbM;
break;
case AkB_op:
*res_bM = b_kron(AbM, BbM);
break;
case AZT_op:
*res_bM = b_gemm(AbM, ZbM, false, true);
break;
case ZA_op:
*res_bM = ZbM * AbM;
break;
case AsolveZ_op:
// A * A\Z -> should be Z
*res_bM = AbM * b_solve(AbM, ZbM);
break;
case LaggedZ_op:
*res_bM = b_lagged_mat(ZbM, params.n);
break;
case CopyA2D_op:
*res_bM = b_2dcopy(AbM, params.s, params.t, params.p, params.q);
break;
case DiffA_op:
*res_bM = AbM.difference();
break;
case Hessenberg_op: {
constexpr T zero_tolerance =
std::is_same<T, double>::value ? 1e-7 : 1e-3f;
int n = params.m;
Matrix<T> HbM(n, n, params.batch_size, handle, allocator, stream);
Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream);
b_hessenberg(AbM, UbM, HbM);
// Check that H is in Hessenberg form
std::vector<T> H = std::vector<T>(n * n * params.batch_size);
raft::update_host(H.data(), HbM.raw_data(), H.size(), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int j = 0; j < n - 2; j++) {
for (int i = j + 2; i < n; i++) {
ASSERT_TRUE(raft::abs(H[n * n * ib + n * j + i]) <
zero_tolerance);
}
}
}
// Check that U is unitary (UU'=I)
std::vector<T> UUt = std::vector<T>(n * n * params.batch_size);
raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(),
UUt.size(), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] -
(i == j ? (T)1 : (T)0)) < zero_tolerance);
}
}
}
// Write UHU' in the result (will be compared against A)
*res_bM = UbM * b_gemm(HbM, UbM, false, true);
break;
}
case Schur_op: {
constexpr T zero_tolerance =
std::is_same<T, double>::value ? 1e-7 : 1e-3f;
int n = params.m;
Matrix<T> SbM(n, n, params.batch_size, handle, allocator, stream);
Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream);
b_schur(AbM, UbM, SbM);
// Check that S is in Schur form
std::vector<T> S = std::vector<T>(n * n * params.batch_size);
raft::update_host(S.data(), SbM.raw_data(), S.size(), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int j = 0; j < n - 2; j++) {
for (int i = j + 2; i < n; i++) {
ASSERT_TRUE(raft::abs(S[n * n * ib + n * j + i]) <
zero_tolerance);
}
}
}
for (int ib = 0; ib < params.batch_size; ib++) {
for (int k = 0; k < n - 3; k++) {
ASSERT_FALSE(
raft::abs(S[n * n * ib + n * k + k + 1]) > zero_tolerance &&
raft::abs(S[n * n * ib + n * (k + 1) + k + 2]) > zero_tolerance &&
raft::abs(S[n * n * ib + n * (k + 2) + k + 3]) > zero_tolerance);
}
}
// Check that U is unitary (UU'=I)
std::vector<T> UUt = std::vector<T>(n * n * params.batch_size);
raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(),
UUt.size(), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int ib = 0; ib < params.batch_size; ib++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] -
(i == j ? (T)1 : (T)0)) < zero_tolerance);
}
}
}
// Write USU' in the result (will be compared against A)
*res_bM = UbM * b_gemm(SbM, UbM, false, true);
break;
}
case Lyapunov_op: {
Matrix<T> XbM = b_lyapunov(AbM, BbM);
// Write AXA'-X in the result (will be compared against -B)
*res_bM = AbM * b_gemm(XbM, AbM, false, true) - XbM;
break;
}
}
// Compute the expected results
res_h.resize(params.batch_size * m_r * n_r);
switch (params.operation) {
case AB_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
B.data() + bid * params.p * params.q, params.m,
params.n, params.q);
}
break;
case ApB_op:
Naive::add(res_h.data(), A.data(), B.data(), A.size());
break;
case AmB_op:
Naive::add(res_h.data(), A.data(), B.data(), A.size(), T(-1.0));
break;
case AkB_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::kronecker(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
B.data() + bid * params.p * params.q, params.m,
params.n, params.p, params.q);
}
break;
case AZT_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
Z.data() + bid * r, params.m, params.n, 1);
}
break;
case ZA_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::matMul(res_h.data() + bid * m_r * n_r, Z.data() + bid * r,
A.data() + bid * params.m * params.n, 1, params.m,
params.n);
}
break;
case AsolveZ_op:
// Simply copy Z in the result
memcpy(res_h.data(), Z.data(), r * params.batch_size * sizeof(T));
break;
case LaggedZ_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::laggedMat(res_h.data() + bid * m_r * n_r,
Z.data() + bid * params.m, params.m, params.n);
}
break;
case CopyA2D_op:
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::copy2D(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n, params.s,
params.t, params.m, m_r, n_r);
}
break;
case DiffA_op: {
int len = params.m * params.n;
for (int bid = 0; bid < params.batch_size; bid++) {
Naive::diff(res_h.data() + bid * (len - 1), A.data() + bid * len,
len);
}
break;
}
case Hessenberg_op:
case Schur_op:
// Simply copy A (will be compared against UHU')
memcpy(res_h.data(), A.data(),
params.m * params.m * params.batch_size * sizeof(T));
break;
case Lyapunov_op:
// Simply copy -B (will be compared against AXA'-X)
for (int i = 0; i < params.m * params.m * params.batch_size; i++) {
res_h[i] = -B[i];
}
break;
}
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
delete res_bM;
CUBLAS_CHECK(cublasDestroy(handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
MatrixInputs<T> params;
Matrix<T> *res_bM;
std::vector<T> res_h;
cublasHandle_t handle;
cudaStream_t stream;
};
// Test parameters (op, batch_size, m, n, p, q, s, t, tolerance)
const std::vector<MatrixInputs<double>> inputsd = {
{AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-6},
{AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-6},
{ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-6},
{ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-6},
{AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-6},
{AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-6},
{AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-6},
{AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-6},
{AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-6},
{LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-6},
{LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-6},
{CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-6},
{CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-6},
{DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-6},
{DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-6},
{Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-6},
{Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-6},
// {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-3},
// {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-3},
// {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2},
// {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2}
};
// Note: Schur and Lyapunov tests have had stability issues on CI so
// they are disabled temporarily. See issue:
// https://github.com/rapidsai/cuml/issues/1949
// Test parameters (op, batch_size, m, n, p, q, s, t, tolerance)
const std::vector<MatrixInputs<float>> inputsf = {
{AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-2},
{AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-2},
{ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-2},
{ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-2},
{AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-2},
{AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-2},
{AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-2},
{AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-2},
{AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-2},
{LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-5},
{LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-5},
{CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-5},
{CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-5},
{DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-2},
{DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-2},
{Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-2},
{Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-2},
// {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-2},
// {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-2},
// {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2},
// {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2}
};
// Note: Schur and Lyapunov operations don't give good precision for
// single-precision floating-point numbers yet...
using BatchedMatrixTestD = MatrixTest<double>;
using BatchedMatrixTestF = MatrixTest<float>;
TEST_P(BatchedMatrixTestD, Result) {
ASSERT_TRUE(raft::devArrMatchHost(
res_h.data(), res_bM->raw_data(), res_h.size(),
raft::CompareApprox<double>(params.tolerance), stream));
}
TEST_P(BatchedMatrixTestF, Result) {
ASSERT_TRUE(raft::devArrMatchHost(
res_h.data(), res_bM->raw_data(), res_h.size(),
raft::CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace LinAlg
} // namespace MLCommon
|
57ec8624409f94decbea506bb2d5ecddec26f95a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void staticReverse(int *d, int n)
{
/* FIX ME */
}
__global__ void dynamicReverse(int *d, int n)
{
/* FIX ME */
}
int main(void)
{
const int n = 64; // FIX ME TO max possible size
int a[n], r[n], d[n]; // FIX ME TO dynamic arrays if neccesary
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
hipMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
staticReverse<<<...>>>(d_d, n); // FIX kernel execution params
hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
// run dynamic shared memory version
hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
dynamicReverse<<<...>>>(d_d, n); // FIX kernel executon params
hipMemcpy(d, d_d, n * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
}
|
57ec8624409f94decbea506bb2d5ecddec26f95a.cu
|
#include <stdio.h>
#include <stdlib.h>
__global__ void staticReverse(int *d, int n)
{
/* FIX ME */
}
__global__ void dynamicReverse(int *d, int n)
{
/* FIX ME */
}
int main(void)
{
const int n = 64; // FIX ME TO max possible size
int a[n], r[n], d[n]; // FIX ME TO dynamic arrays if neccesary
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
cudaMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
staticReverse<<<...>>>(d_d, n); // FIX kernel execution params
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
// run dynamic shared memory version
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
dynamicReverse<<<...>>>(d_d, n); // FIX kernel executon params
cudaMemcpy(d, d_d, n * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
}
|
2199bf2da01419ab691e69bae88592122cb3545e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
./cucorrelation -i ../result/smallspec -o ../result/smallcc -c 3600 -x -m 0.5 -d 2
*/
#include "globalvar.cuh"
#include "ini.cu"
#include "file.cu"
#include "cc.hip"
#include "gpumanage.cu"
int main(int argc,char **argv)
{
s=time(NULL);
ini(argc,argv);
////////////////////////////////
specfilenum=0;
prescan(specpath,1);
speclist=(specinf*)malloc(specfilenum*sizeof(specinf));
specfilenum=0;
prescan(specpath,2);
printf("specfilenum=%d\n",specfilenum);
////////////////////////////////
for (int i=0;i<specfilenum;i++)//
for (int j=i+1;j<specfilenum;j++)
if (speccp(speclist[i],speclist[j])==1)
{
specinf t=speclist[i];
speclist[i]=speclist[j];
speclist[j]=t;
}
chimax=1;
chinum=1;
for (int i=1,j=1;i<=specfilenum;i++)//
if (i!=specfilenum)
if (speccp(speclist[i],speclist[i-1])==0)
j++;
else
{
if (chimax<j)
chimax=j;
j=1;
chinum++;
}
else
if (chimax<j)
chimax=j;
chin=(intx2*)malloc(chinum*soi*2);
chin[0].x=0;
for (int i=1,j=0;i<specfilenum;i++)//
if (speccp(speclist[i],speclist[i-1])!=0)
{
chin[j].y=i-1;
j++;
chin[j].x=i;
}
chin[chinum-1].y=specfilenum-1;
printf("chinum=%d, chimax=%d\n",chinum,chimax);
////////////////////////////////
specmax=0;
segmax=0;
for (int i=0;i<specfilenum;i++)
{
if (specmax<speclist[i].head.spec)
specmax=speclist[i].head.spec;
if (segmax<speclist[i].head.seg)
segmax=speclist[i].head.seg;
}
printf("segmax=%d, specmax=%d\n",segmax,specmax);
////////////////////////////////
a=seta();
printf("a=%d\n",a);
c1=(hipfftComplex*)malloc(chimax*segmax*specmax*soc);
r1=(hipfftReal*)malloc(a*specmax*sor*2);
mlist=(intx2*)malloc(a*soi*2);
hipMalloc((void**)&d_c1,chimax*segmax*specmax*soc);
hipMalloc((void**)&d_c2,a*segmax*(specmax+1)*soc);
hipMalloc((void**)&d_r1,a*specmax*sor*2);
hipMalloc((void**)&d_speclist,chimax*sospi);
hipMalloc((void**)&d_mlist,a*soi*2);
////////////////cc////////////////
for (int chii=0;chii<chinum;chii++)
{
int mx=1;
int my=0;
printf("chii=%d\n",chii);
////////////////////////////////
int spec=speclist[chin[chii].x].head.spec;
float dt=speclist[chin[chii].x].head.dt;
int cchalfn=cchalf/dt;
int chincur=chin[chii].y-chin[chii].x+1;
for (int filei_a=chin[chii].x;filei_a<=chin[chii].y;filei_a++)
{
int filei_r=filei_a-chin[chii].x;
FILE *specfile=fopen(speclist[filei_a].name,"rb");
SPECHEAD trashhead;
fread(&trashhead,sosph,1,specfile);
for (int segi=0;segi<speclist[filei_a].head.seg;segi++)
fread(c1+segmax*spec*filei_r+segi*spec,spec*soc,1,specfile);
fclose(specfile);
}
hipMemcpy(d_c1,c1,chincur*segmax*spec*soc,HD);
hipMemcpy(d_speclist,speclist+chin[chii].x,chincur*sospi,HD);
for (int wini=0;wini<(chincur*(chincur-1)/2+a-1)/a;wini++)
{
////////////////////////////////
int wins=wini*a;
int wint=min(wins+a-1,chincur*(chincur-1)/2);
int winw=wint-wins+1;
for (int mi=0;mi<winw;mi++)
{
mlist[mi].x=mx;
mlist[mi].y=my;
if (my==mx-1)
{
my=0;
mx++;
}
else
my++;
}
printf("\twini=%d\n",wini);
hipMemcpy(d_mlist,mlist,a*soi*2,HD);
////////////////////////////////
blocknum=(winw*segmax*spec+maxth-1)/maxth;
hipLaunchKernelGGL(( mul), dim3(blocknum),dim3(maxth), 0, 0, d_c1,d_c2,d_mlist,winw,segmax,spec);
cuds;
////////////////////////////////
for (int gap=1;gap<segmax;gap*=2)
{
int gapnum=(segmax+gap-1)/(2*gap);
blocknum=(winw*gapnum*spec+maxth-1)/maxth;
hipLaunchKernelGGL(( add), dim3(blocknum),dim3(maxth), 0, 0, d_c2,d_speclist,d_mlist,winw,segmax,spec,gap,gapnum);
cuds;
}
printf("\t\tadd complete\n");
for (int conj=0;conj<1;conj++)
{
////////////////////////////////
////////////////////////////////
blocknum=(winw+maxth-1)/maxth;
hipLaunchKernelGGL(( shift), dim3(blocknum),dim3(maxth), 0, 0, d_c2,winw,segmax,spec);
cuds;
printf("\t\tshift complete\n");
////////////////fft////////////////
len[0]=spec*2;
inembed[0]=spec*2;
inembed[1]=winw;
onembed[0]=spec+1;
onembed[1]=winw;
hipfftPlanMany(&plan,1,len,onembed,1,segmax*(spec+1),inembed,1,spec*2,HIPFFT_C2R,winw);
hipfftExecC2R(plan,d_c2,d_r1);
hipfftDestroy(plan);
printf("\t\tfft complete\n");
////////////////////////////////
blocknum=(winw*spec*2+maxth-1)/maxth;
hipLaunchKernelGGL(( div), dim3(blocknum),dim3(maxth), 0, 0, d_r1,d_speclist,d_mlist,winw,spec);
cuds;
printf("\t\tdiv complete\n");
////////////////////////////////
hipMemcpy(r1,d_r1,winw*spec*2*sor,DH);
for (int mi=0;mi<winw;mi++)
{
int srci=chin[chii].x+mlist[mi].x;
int stai=chin[chii].x+mlist[mi].y;
for (int k=strlen(speclist[srci].name)-1;k>=0;k--)
if (speclist[srci].name[k]=='/')
{
strcpy(tpath,speclist[srci].name+k+1);
break;
}
tpath[strlen(tpath)-strlen(spectail)]='\0';
strcat(sacpath,tpath);
strcat(sacpath,"+");
for (int k=strlen(speclist[stai].name)-1;k>=0;k--)
if (speclist[stai].name[k]=='/')
{
strcpy(tpath,speclist[stai].name+k+1);
break;
}
tpath[strlen(tpath)-strlen(spectail)]='\0';
strcat(sacpath,tpath);
strcat(sacpath,sactail);
FILE *sacfile=fopen(sacpath,"wb");
if (sacfile==NULL)
printf("open failed, sac=%s\n",sacpath);
else
{
fwrite(r1+mi*spec*2+spec-cchalfn,(2*cchalfn+1)*sor,1,sacfile);
fclose(sacfile);
}
if (debug>=1)
{
hipfftComplex* swap=(hipfftComplex*)malloc(spec*soc);
float* finalcc=(float*)malloc((2*cchalfn+1)*sor);
float* ingcc=(float*)malloc((2*cchalfn+1)*sor);
memset(finalcc,0,(2*cchalfn+1)*sor);
for (int step=0;step<min(speclist[srci].head.seg,speclist[stai].head.seg);step++)
{
cc(c1+mlist[mi].x*segmax*spec+step*spec,c1+mlist[mi].y*segmax*spec+step*spec,swap,spec,dt,ingcc,cchalfn,step);
for (int p=0;p<2*cchalfn+1;p++)
finalcc[p]+=ingcc[p];
}
float diff=0;
float avg=0;
for (int p=0;p<2*cchalfn+1;p++)
{
finalcc[p]/=min(speclist[srci].head.seg,speclist[stai].head.seg);
diff=max(diff,abs(finalcc[p]-r1[mi*spec*2+spec-cchalfn+p]));
avg+=abs(r1[mi*spec*2+spec-cchalfn+p]);
}
if (debug>=2)
{
printf("multi thread:\n");
for (int p=0;p<2*cchalfn+1;p++)
printf("%f ",r1[mi*spec*2+spec-cchalfn+p]);
printf("\nsingle thread:\n");
for (int p=0;p<2*cchalfn+1;p++)
printf("%f ",finalcc[p]);
getchar();
}
avg/=(2*cchalfn+1);
printf("src=%d, \tsta=%d, \t%s: diff=%f, avg=%f\n",srci,stai,sacpath,diff,avg);
if (diff>=0.00001)
getchar();
free(swap);
free(finalcc);
free(ingcc);
}
sacpath[sacl]='\0';
}
}
}
}
t=time(NULL);
printf("total time=%ds\n",(int)(t-s));
}
|
2199bf2da01419ab691e69bae88592122cb3545e.cu
|
/*
./cucorrelation -i ../result/smallspec -o ../result/smallcc -c 3600 -x -m 0.5 -d 2
*/
#include "globalvar.cuh"
#include "ini.cu"
#include "file.cu"
#include "cc.cu"
#include "gpumanage.cu"
int main(int argc,char **argv)
{
s=time(NULL);
ini(argc,argv);
////////////////读入文件名和头////////////////
specfilenum=0;
prescan(specpath,1);
speclist=(specinf*)malloc(specfilenum*sizeof(specinf));
specfilenum=0;
prescan(specpath,2);
printf("specfilenum=%d\n",specfilenum);
////////////////文件分为团////////////////
for (int i=0;i<specfilenum;i++)//排序
for (int j=i+1;j<specfilenum;j++)
if (speccp(speclist[i],speclist[j])==1)
{
specinf t=speclist[i];
speclist[i]=speclist[j];
speclist[j]=t;
}
chimax=1;
chinum=1;
for (int i=1,j=1;i<=specfilenum;i++)//统计最大团和团数
if (i!=specfilenum)
if (speccp(speclist[i],speclist[i-1])==0)
j++;
else
{
if (chimax<j)
chimax=j;
j=1;
chinum++;
}
else
if (chimax<j)
chimax=j;
chin=(intx2*)malloc(chinum*soi*2);
chin[0].x=0;
for (int i=1,j=0;i<specfilenum;i++)//计算团的两端位置
if (speccp(speclist[i],speclist[i-1])!=0)
{
chin[j].y=i-1;
j++;
chin[j].x=i;
}
chin[chinum-1].y=specfilenum-1;
printf("chinum=%d, chimax=%d\n",chinum,chimax);
////////////////确定单个文件大小////////////////
specmax=0;
segmax=0;
for (int i=0;i<specfilenum;i++)
{
if (specmax<speclist[i].head.spec)
specmax=speclist[i].head.spec;
if (segmax<speclist[i].head.seg)
segmax=speclist[i].head.seg;
}
printf("segmax=%d, specmax=%d\n",segmax,specmax);
////////////////确定窗口大小并分配////////////////
a=seta();
printf("a=%d\n",a);
c1=(cufftComplex*)malloc(chimax*segmax*specmax*soc);
r1=(cufftReal*)malloc(a*specmax*sor*2);
mlist=(intx2*)malloc(a*soi*2);
cudaMalloc((void**)&d_c1,chimax*segmax*specmax*soc);
cudaMalloc((void**)&d_c2,a*segmax*(specmax+1)*soc);
cudaMalloc((void**)&d_r1,a*specmax*sor*2);
cudaMalloc((void**)&d_speclist,chimax*sospi);
cudaMalloc((void**)&d_mlist,a*soi*2);
////////////////cc////////////////
for (int chii=0;chii<chinum;chii++)
{
int mx=1;
int my=0;
printf("chii=%d\n",chii);
////////////////读入////////////////
int spec=speclist[chin[chii].x].head.spec;
float dt=speclist[chin[chii].x].head.dt;
int cchalfn=cchalf/dt;
int chincur=chin[chii].y-chin[chii].x+1;
for (int filei_a=chin[chii].x;filei_a<=chin[chii].y;filei_a++)
{
int filei_r=filei_a-chin[chii].x;
FILE *specfile=fopen(speclist[filei_a].name,"rb");
SPECHEAD trashhead;
fread(&trashhead,sosph,1,specfile);
for (int segi=0;segi<speclist[filei_a].head.seg;segi++)
fread(c1+segmax*spec*filei_r+segi*spec,spec*soc,1,specfile);
fclose(specfile);
}
cudaMemcpy(d_c1,c1,chincur*segmax*spec*soc,HD);
cudaMemcpy(d_speclist,speclist+chin[chii].x,chincur*sospi,HD);
for (int wini=0;wini<(chincur*(chincur-1)/2+a-1)/a;wini++)
{
////////////////分配任务////////////////
int wins=wini*a;
int wint=min(wins+a-1,chincur*(chincur-1)/2);
int winw=wint-wins+1;
for (int mi=0;mi<winw;mi++)
{
mlist[mi].x=mx;
mlist[mi].y=my;
if (my==mx-1)
{
my=0;
mx++;
}
else
my++;
}
printf("\twini=%d\n",wini);
cudaMemcpy(d_mlist,mlist,a*soi*2,HD);
////////////////乘////////////////
blocknum=(winw*segmax*spec+maxth-1)/maxth;
mul<<<blocknum,maxth>>>(d_c1,d_c2,d_mlist,winw,segmax,spec);
cuds;
////////////////加////////////////
for (int gap=1;gap<segmax;gap*=2)
{
int gapnum=(segmax+gap-1)/(2*gap);
blocknum=(winw*gapnum*spec+maxth-1)/maxth;
add<<<blocknum,maxth>>>(d_c2,d_speclist,d_mlist,winw,segmax,spec,gap,gapnum);
cuds;
}
printf("\t\tadd complete\n");
for (int conj=0;conj<1;conj++)
{
////////////////共轭////////////////
////////////////切换复数存储格式////////////////
blocknum=(winw+maxth-1)/maxth;
shift<<<blocknum,maxth>>>(d_c2,winw,segmax,spec);
cuds;
printf("\t\tshift complete\n");
////////////////fft////////////////
len[0]=spec*2;
inembed[0]=spec*2;
inembed[1]=winw;
onembed[0]=spec+1;
onembed[1]=winw;
cufftPlanMany(&plan,1,len,onembed,1,segmax*(spec+1),inembed,1,spec*2,CUFFT_C2R,winw);
cufftExecC2R(plan,d_c2,d_r1);
cufftDestroy(plan);
printf("\t\tfft complete\n");
////////////////除////////////////
blocknum=(winw*spec*2+maxth-1)/maxth;
div<<<blocknum,maxth>>>(d_r1,d_speclist,d_mlist,winw,spec);
cuds;
printf("\t\tdiv complete\n");
////////////////输出////////////////
cudaMemcpy(r1,d_r1,winw*spec*2*sor,DH);
for (int mi=0;mi<winw;mi++)
{
int srci=chin[chii].x+mlist[mi].x;
int stai=chin[chii].x+mlist[mi].y;
for (int k=strlen(speclist[srci].name)-1;k>=0;k--)
if (speclist[srci].name[k]=='/')
{
strcpy(tpath,speclist[srci].name+k+1);
break;
}
tpath[strlen(tpath)-strlen(spectail)]='\0';
strcat(sacpath,tpath);
strcat(sacpath,"+");
for (int k=strlen(speclist[stai].name)-1;k>=0;k--)
if (speclist[stai].name[k]=='/')
{
strcpy(tpath,speclist[stai].name+k+1);
break;
}
tpath[strlen(tpath)-strlen(spectail)]='\0';
strcat(sacpath,tpath);
strcat(sacpath,sactail);
FILE *sacfile=fopen(sacpath,"wb");
if (sacfile==NULL)
printf("open failed, sac=%s\n",sacpath);
else
{
fwrite(r1+mi*spec*2+spec-cchalfn,(2*cchalfn+1)*sor,1,sacfile);
fclose(sacfile);
}
if (debug>=1)
{
cufftComplex* swap=(cufftComplex*)malloc(spec*soc);
float* finalcc=(float*)malloc((2*cchalfn+1)*sor);
float* ingcc=(float*)malloc((2*cchalfn+1)*sor);
memset(finalcc,0,(2*cchalfn+1)*sor);
for (int step=0;step<min(speclist[srci].head.seg,speclist[stai].head.seg);step++)
{
cc(c1+mlist[mi].x*segmax*spec+step*spec,c1+mlist[mi].y*segmax*spec+step*spec,swap,spec,dt,ingcc,cchalfn,step);
for (int p=0;p<2*cchalfn+1;p++)
finalcc[p]+=ingcc[p];
}
float diff=0;
float avg=0;
for (int p=0;p<2*cchalfn+1;p++)
{
finalcc[p]/=min(speclist[srci].head.seg,speclist[stai].head.seg);
diff=max(diff,abs(finalcc[p]-r1[mi*spec*2+spec-cchalfn+p]));
avg+=abs(r1[mi*spec*2+spec-cchalfn+p]);
}
if (debug>=2)
{
printf("multi thread:\n");
for (int p=0;p<2*cchalfn+1;p++)
printf("%f ",r1[mi*spec*2+spec-cchalfn+p]);
printf("\nsingle thread:\n");
for (int p=0;p<2*cchalfn+1;p++)
printf("%f ",finalcc[p]);
getchar();
}
avg/=(2*cchalfn+1);
printf("src=%d, \tsta=%d, \t%s: diff=%f, avg=%f\n",srci,stai,sacpath,diff,avg);
if (diff>=0.00001)
getchar();
free(swap);
free(finalcc);
free(ingcc);
}
sacpath[sacl]='\0';
}
}
}
}
t=time(NULL);
printf("total time=%ds\n",(int)(t-s));
}
|
dff5aad839752247fc0d668fa31d189882fff31d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient(float* cuda_imgIn,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx + 1 < w && t_numy < h && t_numz < nc){
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = cuda_imgIn[t_numx + 1 + w*t_numy + w*h*t_numz] - cuda_imgIn[t_numx + w*t_numy + w*h*t_numz];
}
if (t_numx < w && t_numy + 1< h && t_numz < nc){
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = cuda_imgIn[t_numx + w*(t_numy+1) + w*h*t_numz] - cuda_imgIn[t_numx + w*t_numy + w*h*t_numz];
}
}
__global__ void calculate_g_and_multiply_simple(float* cuda_v1, float* cuda_v2, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float g = 1;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void calculate_g_and_multiply_max(float* cuda_v1, float* cuda_v2, int w, int h, int nc, float epsilon){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float s=0;
float g = 0;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
s += pow(cuda_v1[t_numx + w*t_numy + w*h*k] , 2) + pow(cuda_v2[t_numx + w*t_numy + w*h*k],2);
}
s = sqrt(s);
g = 1.f/max(epsilon, s);
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void calculate_g_and_multiply_exp(float* cuda_v1, float* cuda_v2, int w, int h, int nc, float epsilon){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float s=0;
float g = 0;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
s += pow(cuda_v1[t_numx + w*t_numy + w*h*k] , 2) + pow(cuda_v2[t_numx + w*t_numy + w*h*k],2);
}
s = sqrt(s);
g = exp(-pow(s,2)/epsilon)/epsilon;
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void divergence_update(float* cuda_imgIn,float* cuda_div , float* cuda_v1, float* cuda_v2, int w, int h, int nc, float tau ){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
int index = t_numx + w*t_numy + w*h*t_numz;
if (t_numx > 0 && t_numx < w && t_numy < h && t_numz < nc){
cuda_div[index] += cuda_v1[index] - cuda_v1[index -1];
}
if (t_numy > 0 && t_numx < w && t_numy < h && t_numz < nc){
cuda_div[index] += cuda_v2[index] - cuda_v2[index - w];
}
if (t_numx < w && t_numy < h && t_numz < nc){
cuda_imgIn[index] += (float)tau*cuda_div[index];
}
}
int main(int argc, char **argv)
{
hipDeviceSynchronize(); CUDA_CHECK;
#ifdef CAMERA
#else
// input image
string image = "";
int iterations = 0;
float tau = 0.0;
float epsilon = 0.0;
int g_type = 0;
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
bool ret2 = getParam("iter", iterations, argc, argv);
if (!ret2) {cerr << "ERROR: Num of iterations not specified" << endl; return 1;}
bool ret3 = getParam("tau", tau, argc, argv);
if (!ret3) {cerr << "ERROR: no tau specified" << endl; return 1;}
bool ret4 = getParam("epsilon", epsilon, argc, argv);
if (!ret4) {cerr << "ERROR: no epsilon specified" << endl; return 1;}
bool ret5 = getParam("g_type", g_type, argc, argv);
if (!ret5) {cerr << "ERROR: no gradient calculation type specified" << endl; return 1;}
if (argc <= 4) { cout << "Usage: " << argv[0] << " -i <image> -iter <iterations> -tau <tau> -epsilon <epsilon> -g_type <g_type>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
cv::Mat mOut(h,w,mIn.type());
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
convert_mat_to_layered (imgIn, mIn);
//GPU version
int array_size = w*h*nc;
//int iterations = 100;
//float tau = 0.2;
//float epsilon = 0.1;
float* cuda_imgIn;
float* cuda_v1;
float* cuda_v2;
hipMalloc((void**) &cuda_imgIn , array_size*sizeof(float));
hipMalloc((void**) &cuda_v1, array_size*sizeof(float));
hipMalloc((void**) &cuda_v2, array_size*sizeof(float));
hipMemcpy(cuda_imgIn, imgIn , array_size*sizeof(float) , hipMemcpyHostToDevice);
float *cuda_div;
hipMalloc((void**) &cuda_div , array_size*sizeof(float));
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x - 1)/block.x);
int grid_y = ((h + block.y - 1)/block.y);
int grid_z = ((nc + block.z - 1)/block.z);
dim3 grid = dim3(grid_x, grid_y, grid_z );
for (int iter=0; iter<iterations ; iter++){
hipMemset(cuda_v1, 0 , array_size*sizeof(float));
hipMemset(cuda_v2, 0 , array_size*sizeof(float));
hipLaunchKernelGGL(( gradient) , dim3(grid), dim3(block), 0, 0, cuda_imgIn, cuda_v1, cuda_v2, w, h, nc );
if (g_type == 0)
hipLaunchKernelGGL(( calculate_g_and_multiply_simple) , dim3(grid), dim3(block), 0, 0, cuda_v1, cuda_v2, w, h, nc);
else if (g_type == 1)
hipLaunchKernelGGL(( calculate_g_and_multiply_max) , dim3(grid), dim3(block), 0, 0, cuda_v1, cuda_v2, w, h, nc, epsilon);
else if (g_type ==2)
hipLaunchKernelGGL(( calculate_g_and_multiply_exp) , dim3(grid), dim3(block), 0, 0, cuda_v1, cuda_v2, w, h, nc, epsilon);
hipMemset(cuda_div, 0 , array_size*sizeof(float));
hipLaunchKernelGGL(( divergence_update) , dim3(grid), dim3(block), 0, 0, cuda_imgIn, cuda_div , cuda_v1, cuda_v2, w, h, nc, tau );
}
hipMemcpy(imgOut, cuda_imgIn , array_size*sizeof(float) , hipMemcpyDeviceToHost);
// GPU version end
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
hipFree(cuda_v1);
hipFree(cuda_v2);
hipFree(cuda_div);
hipFree(cuda_imgIn);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
dff5aad839752247fc0d668fa31d189882fff31d.cu
|
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient(float* cuda_imgIn,float* cuda_v1,float* cuda_v2,int w,int h,int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx + 1 < w && t_numy < h && t_numz < nc){
cuda_v1[t_numx + w*t_numy + w*h*t_numz] = cuda_imgIn[t_numx + 1 + w*t_numy + w*h*t_numz] - cuda_imgIn[t_numx + w*t_numy + w*h*t_numz];
}
if (t_numx < w && t_numy + 1< h && t_numz < nc){
cuda_v2[t_numx + w*t_numy + w*h*t_numz] = cuda_imgIn[t_numx + w*(t_numy+1) + w*h*t_numz] - cuda_imgIn[t_numx + w*t_numy + w*h*t_numz];
}
}
__global__ void calculate_g_and_multiply_simple(float* cuda_v1, float* cuda_v2, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float g = 1;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void calculate_g_and_multiply_max(float* cuda_v1, float* cuda_v2, int w, int h, int nc, float epsilon){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float s=0;
float g = 0;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
s += pow(cuda_v1[t_numx + w*t_numy + w*h*k] , 2) + pow(cuda_v2[t_numx + w*t_numy + w*h*k],2);
}
s = sqrt(s);
g = 1.f/max(epsilon, s);
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void calculate_g_and_multiply_exp(float* cuda_v1, float* cuda_v2, int w, int h, int nc, float epsilon){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = 0;
float s=0;
float g = 0;
if (t_numx < w && t_numy < h && t_numz < nc){
for (int k=0 ; k<nc ; k++){
s += pow(cuda_v1[t_numx + w*t_numy + w*h*k] , 2) + pow(cuda_v2[t_numx + w*t_numy + w*h*k],2);
}
s = sqrt(s);
g = exp(-pow(s,2)/epsilon)/epsilon;
for (int k=0 ; k<nc ; k++){
cuda_v1[t_numx + w*t_numy + w*h*k] = g*cuda_v1[t_numx + w*t_numy + w*h*k];
cuda_v2[t_numx + w*t_numy + w*h*k] = g*cuda_v2[t_numx + w*t_numy + w*h*k];
}
}
}
__global__ void divergence_update(float* cuda_imgIn,float* cuda_div , float* cuda_v1, float* cuda_v2, int w, int h, int nc, float tau ){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
int index = t_numx + w*t_numy + w*h*t_numz;
if (t_numx > 0 && t_numx < w && t_numy < h && t_numz < nc){
cuda_div[index] += cuda_v1[index] - cuda_v1[index -1];
}
if (t_numy > 0 && t_numx < w && t_numy < h && t_numz < nc){
cuda_div[index] += cuda_v2[index] - cuda_v2[index - w];
}
if (t_numx < w && t_numy < h && t_numz < nc){
cuda_imgIn[index] += (float)tau*cuda_div[index];
}
}
int main(int argc, char **argv)
{
cudaDeviceSynchronize(); CUDA_CHECK;
#ifdef CAMERA
#else
// input image
string image = "";
int iterations = 0;
float tau = 0.0;
float epsilon = 0.0;
int g_type = 0;
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
bool ret2 = getParam("iter", iterations, argc, argv);
if (!ret2) {cerr << "ERROR: Num of iterations not specified" << endl; return 1;}
bool ret3 = getParam("tau", tau, argc, argv);
if (!ret3) {cerr << "ERROR: no tau specified" << endl; return 1;}
bool ret4 = getParam("epsilon", epsilon, argc, argv);
if (!ret4) {cerr << "ERROR: no epsilon specified" << endl; return 1;}
bool ret5 = getParam("g_type", g_type, argc, argv);
if (!ret5) {cerr << "ERROR: no gradient calculation type specified" << endl; return 1;}
if (argc <= 4) { cout << "Usage: " << argv[0] << " -i <image> -iter <iterations> -tau <tau> -epsilon <epsilon> -g_type <g_type>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
cv::Mat mOut(h,w,mIn.type());
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
convert_mat_to_layered (imgIn, mIn);
//GPU version
int array_size = w*h*nc;
//int iterations = 100;
//float tau = 0.2;
//float epsilon = 0.1;
float* cuda_imgIn;
float* cuda_v1;
float* cuda_v2;
cudaMalloc((void**) &cuda_imgIn , array_size*sizeof(float));
cudaMalloc((void**) &cuda_v1, array_size*sizeof(float));
cudaMalloc((void**) &cuda_v2, array_size*sizeof(float));
cudaMemcpy(cuda_imgIn, imgIn , array_size*sizeof(float) , cudaMemcpyHostToDevice);
float *cuda_div;
cudaMalloc((void**) &cuda_div , array_size*sizeof(float));
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x - 1)/block.x);
int grid_y = ((h + block.y - 1)/block.y);
int grid_z = ((nc + block.z - 1)/block.z);
dim3 grid = dim3(grid_x, grid_y, grid_z );
for (int iter=0; iter<iterations ; iter++){
cudaMemset(cuda_v1, 0 , array_size*sizeof(float));
cudaMemset(cuda_v2, 0 , array_size*sizeof(float));
gradient <<<grid, block>>>(cuda_imgIn, cuda_v1, cuda_v2, w, h, nc );
if (g_type == 0)
calculate_g_and_multiply_simple <<< grid, block>>>(cuda_v1, cuda_v2, w, h, nc);
else if (g_type == 1)
calculate_g_and_multiply_max <<< grid, block>>>(cuda_v1, cuda_v2, w, h, nc, epsilon);
else if (g_type ==2)
calculate_g_and_multiply_exp <<< grid, block>>>(cuda_v1, cuda_v2, w, h, nc, epsilon);
cudaMemset(cuda_div, 0 , array_size*sizeof(float));
divergence_update <<<grid, block>>> (cuda_imgIn, cuda_div , cuda_v1, cuda_v2, w, h, nc, tau );
}
cudaMemcpy(imgOut, cuda_imgIn , array_size*sizeof(float) , cudaMemcpyDeviceToHost);
// GPU version end
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
cudaFree(cuda_v1);
cudaFree(cuda_v2);
cudaFree(cuda_div);
cudaFree(cuda_imgIn);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
11cd98a98b34cf3746208e486b4246f99521fbc7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cp.h"
#include <hip/hip_runtime.h>
#include "cudacheck.h"
static inline int divup(int a, int b) { return (a + b - 1)/b; }
static inline int roundup(int a, int b) { return divup(a, b) * b;}
__global__ void normalizekernel(int ny, int nx, float* data, float* ntdata){
int y = blockIdx.x;
if(y>=ny) return;
// mean
float s=0.0;
for(int x=0; x<nx; ++x){
float v = data[x+y*nx];
s += v;
}
float m = s / (float) nx;
// rootsquaresum
float rs = 0.0;
for(int x=0; x<nx; ++x){
float v = data[x+y*nx];
rs += ((v-m)*(v-m));
}
float r = sqrt(rs);
// store
for(int x=0; x<nx; ++x){
float v = ( (data[x+y*nx]) - m ) / r;
ntdata[y+x*ny] = v;
}
}
__global__ void matmulkernel(int ny, int nx, float* ntdata, float* r){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i>=ny || j>=ny) return;
float s = 0.0;
if(i<=j){
for(int k=0; k<nx; ++k){
float x = ntdata[ny*k+j];
float y = ntdata[ny*k+i];
s += (x * y);
}
}
r[j+i*ny] = s;
}
void correlate(int ny, int nx, const float* data, float* result) {
int tmpsize = ny*nx*sizeof(float);
int ressize = ny*ny*sizeof(float);
// Allocate GPU memory for input data shape: (ny, nx) size: ny*nx
float* dGPU = NULL;
CHECK(hipMalloc((void**)&dGPU, tmpsize));
// Allocate GPU memory for normalized & transposed data shape: (nx, ny) size: ny*nx
float* ntGPU = NULL;
CHECK(hipMalloc((void**)&ntGPU, tmpsize));
// Allocate GPU memory for result data shape: (ny, ny) size: ny*ny
float* rGPU = NULL;
CHECK(hipMalloc((void**)&rGPU, ressize));
// Copy input data to GPU
CHECK(hipMemcpy(dGPU, data, tmpsize, hipMemcpyHostToDevice));
int nBlocks = roundup(ny, 64);
// Run normalization & transpose kernel
{
hipLaunchKernelGGL(( normalizekernel), dim3(nBlocks), dim3(1), 0, 0, ny, nx, dGPU, ntGPU);
CHECK(hipGetLastError());
}
// Run kernel (matmul)
{
dim3 dimBlock(16, 16);
dim3 dimGrid(divup(ny, dimBlock.x), divup(ny, dimBlock.y));
hipLaunchKernelGGL(( matmulkernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ny, nx, ntGPU, rGPU);
CHECK(hipGetLastError());
}
// Copy data back to CPU & release memory
CHECK(hipMemcpy(result, rGPU, ressize, hipMemcpyDeviceToHost));
// Free
CHECK(hipFree(dGPU)); CHECK(hipFree(ntGPU)); CHECK(hipFree(rGPU));
}
|
11cd98a98b34cf3746208e486b4246f99521fbc7.cu
|
#include "cp.h"
#include <cuda_runtime.h>
#include "cudacheck.h"
static inline int divup(int a, int b) { return (a + b - 1)/b; }
static inline int roundup(int a, int b) { return divup(a, b) * b;}
__global__ void normalizekernel(int ny, int nx, float* data, float* ntdata){
int y = blockIdx.x;
if(y>=ny) return;
// mean
float s=0.0;
for(int x=0; x<nx; ++x){
float v = data[x+y*nx];
s += v;
}
float m = s / (float) nx;
// rootsquaresum
float rs = 0.0;
for(int x=0; x<nx; ++x){
float v = data[x+y*nx];
rs += ((v-m)*(v-m));
}
float r = sqrt(rs);
// store
for(int x=0; x<nx; ++x){
float v = ( (data[x+y*nx]) - m ) / r;
ntdata[y+x*ny] = v;
}
}
__global__ void matmulkernel(int ny, int nx, float* ntdata, float* r){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i>=ny || j>=ny) return;
float s = 0.0;
if(i<=j){
for(int k=0; k<nx; ++k){
float x = ntdata[ny*k+j];
float y = ntdata[ny*k+i];
s += (x * y);
}
}
r[j+i*ny] = s;
}
void correlate(int ny, int nx, const float* data, float* result) {
int tmpsize = ny*nx*sizeof(float);
int ressize = ny*ny*sizeof(float);
// Allocate GPU memory for input data shape: (ny, nx) size: ny*nx
float* dGPU = NULL;
CHECK(cudaMalloc((void**)&dGPU, tmpsize));
// Allocate GPU memory for normalized & transposed data shape: (nx, ny) size: ny*nx
float* ntGPU = NULL;
CHECK(cudaMalloc((void**)&ntGPU, tmpsize));
// Allocate GPU memory for result data shape: (ny, ny) size: ny*ny
float* rGPU = NULL;
CHECK(cudaMalloc((void**)&rGPU, ressize));
// Copy input data to GPU
CHECK(cudaMemcpy(dGPU, data, tmpsize, cudaMemcpyHostToDevice));
int nBlocks = roundup(ny, 64);
// Run normalization & transpose kernel
{
normalizekernel<<<nBlocks, 1>>>(ny, nx, dGPU, ntGPU);
CHECK(cudaGetLastError());
}
// Run kernel (matmul)
{
dim3 dimBlock(16, 16);
dim3 dimGrid(divup(ny, dimBlock.x), divup(ny, dimBlock.y));
matmulkernel<<<dimGrid, dimBlock>>>(ny, nx, ntGPU, rGPU);
CHECK(cudaGetLastError());
}
// Copy data back to CPU & release memory
CHECK(cudaMemcpy(result, rGPU, ressize, cudaMemcpyDeviceToHost));
// Free
CHECK(cudaFree(dGPU)); CHECK(cudaFree(ntGPU)); CHECK(cudaFree(rGPU));
}
|
42acdfefc15cd60baf83ed09aefe878e4e4dc7b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error
* Look up Kahan summation
*/
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel1(dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = 1; s < blockDim.x; s <<= 1) {
if ( (threadIdx.x * s * 2) < blockDim.x )
scratch[threadIdx.x * s * 2] += scratch[(threadIdx.x * s * 2) + s];
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_1, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 1;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
hipDeviceSynchronize ();
t_kernel_1 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute strided index GPU reduction kernel: %Lg secs\n", t_kernel_1);
double bw = (N * sizeof(dtype)) / (t_kernel_1 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
42acdfefc15cd60baf83ed09aefe878e4e4dc7b8.cu
|
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error
* Look up Kahan summation
*/
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel1(dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = 1; s < blockDim.x; s <<= 1) {
if ( (threadIdx.x * s * 2) < blockDim.x )
scratch[threadIdx.x * s * 2] += scratch[(threadIdx.x * s * 2) + s];
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_1, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 1;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel1 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel1 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel1 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
cudaThreadSynchronize ();
t_kernel_1 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute strided index GPU reduction kernel: %Lg secs\n", t_kernel_1);
double bw = (N * sizeof(dtype)) / (t_kernel_1 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
cb7c593bfa5539dd79844e7ba033b2b590457b5f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void calculate_tensors(double* SR, const double* fields, const double* norms, const int num_modes, const int Nx) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int nmp4 = num_modes*num_modes*num_modes*num_modes;
unsigned int Nxnm = Nx*num_modes;
if (full_thread_idx >= nmp4) {
return;
}
// Turn linear index into components
unsigned int midx1 = full_thread_idx % num_modes;
unsigned int midx2 = (full_thread_idx/num_modes) % num_modes;
unsigned int midx3 = (full_thread_idx/num_modes/num_modes) % num_modes;
unsigned int midx4 = (full_thread_idx/num_modes/num_modes/num_modes);
// Compute the sum
for (int i = 0; i < Nx; i++) {
for (int j = 0; j < Nx; j++) {
SR[full_thread_idx] += fields[midx1+i*num_modes+j*Nxnm]*fields[midx2+i*num_modes+j*Nxnm]*fields[midx3+i*num_modes+j*Nxnm]*fields[midx4+i*num_modes+j*Nxnm];
}
}
// Normalize
SR[full_thread_idx] /= norms[midx1]*norms[midx2]*norms[midx3]*norms[midx4];
}
|
cb7c593bfa5539dd79844e7ba033b2b590457b5f.cu
|
__global__ void calculate_tensors(double* SR, const double* fields, const double* norms, const int num_modes, const int Nx) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int nmp4 = num_modes*num_modes*num_modes*num_modes;
unsigned int Nxnm = Nx*num_modes;
if (full_thread_idx >= nmp4) {
return;
}
// Turn linear index into components
unsigned int midx1 = full_thread_idx % num_modes;
unsigned int midx2 = (full_thread_idx/num_modes) % num_modes;
unsigned int midx3 = (full_thread_idx/num_modes/num_modes) % num_modes;
unsigned int midx4 = (full_thread_idx/num_modes/num_modes/num_modes);
// Compute the sum
for (int i = 0; i < Nx; i++) {
for (int j = 0; j < Nx; j++) {
SR[full_thread_idx] += fields[midx1+i*num_modes+j*Nxnm]*fields[midx2+i*num_modes+j*Nxnm]*fields[midx3+i*num_modes+j*Nxnm]*fields[midx4+i*num_modes+j*Nxnm];
}
}
// Normalize
SR[full_thread_idx] /= norms[midx1]*norms[midx2]*norms[midx3]*norms[midx4];
}
|
097a5d95e609309400775f5d70e8546643795f89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <stdint.h>
#include <stdio.h>
#ifdef USE_OPENMP
#include <omp.h>
#endif
// Original idea from Martin Roberts ...
// http://extremelearning.com.au/isotropic-blue-noise-point-sets/
// Reference Java implementation from Tommy Ettinger ...
// https://github.com/tommyettinger/sarong/blob/master/src/test/java/sarong/PermutationEtc.java
// Reference C/C++/ISPC implementation from Max Tarpini ...
// https://github.com/RomboDev/Miscellaneous/tree/master/MRbluenoisepointsets
using uint = unsigned int;
inline __device__ uint WangHash(uint seed)
{
seed = (seed ^ 61) ^ (seed >> 16);
seed *= 9;
seed = seed ^ (seed >> 4);
seed *= 0x27d4eb2d;
seed = seed ^ (seed >> 15);
return seed;
}
// https://github.com/tommyettinger/sarong/blob/master/src/test/java/sarong/PermutationEtc.java
inline __device__ int NextIntBounded(int bound, uint64_t & stateA, uint64_t & stateB)
{
const uint64_t s = (stateA += 0xC6BC279692B5C323ULL);
const uint64_t z = ((s < 0x800000006F17146DULL) ? stateB : (stateB += 0x9479D2858AF899E6)) * (s ^ s >> 31);
return (int)(bound * ((z ^ z >> 25) & 0xFFFFFFFFULL) >> 32);
}
template <typename T>
inline __device__ void Swap(T & lhs, T & rhs)
{
const T tmp = lhs;
lhs = rhs;
rhs = tmp;
}
inline __device__ uint LaneId()
{
uint ret;
asm volatile("mov.u32 %0, %laneid;" : "=r"(ret));
return ret;
}
// Set the k-th bit of x to 1
inline __device__ void SetBit(uint & x, uint k)
{
#if 0
x |= (1 << k);
#else
asm volatile("bfi.b32 %0, 0x1, %1, %2, 0x1;" : "=r"(x) : "r"(x), "r"(k));
#endif
}
// Extracts k-th bit from x
inline __device__ uint ReadBit(uint x, uint k)
{
#if 0
return (x >> k) & 0x1;
#else
uint ret;
asm volatile("bfe.u32 %0, %1, %2, 0x1;" : "=r"(ret) : "r"(x), "r"(k));
return ret;
#endif
}
// The array of bit submasks is manually indexed to avoid spilling to local memory (i.e. per-thread global memory)
// because we cannot dynamically index registers in CUDA
// (cf slide 5 of https://developer.download.nvidia.com/CUDA/training/register_spilling.pdf)
template <uint N>
inline __device__ void Bitmask_SetBit(uint (&bitSubmasks)[N], uint k)
{
static_assert(N >= 1 && N <= 4, "N must be in [1, 4]");
if(N == 1) // [0, 32)
{
SetBit(bitSubmasks[0], k);
}
else if(N == 2) // [0, 64)
{
const uint i = k < 32 ? k : k - 32;
k < 32 ? SetBit(bitSubmasks[0], i) : SetBit(bitSubmasks[1], i);
}
else if(N == 3) // [0, 96)
{
const uint i = k < 32 ? k : (k < 64 ? k - 32 : k - 64);
k < 32 ? SetBit(bitSubmasks[0], i) : (k < 64 ? SetBit(bitSubmasks[1], i) : SetBit(bitSubmasks[2], i));
}
else if(N == 4) // [0, 128)
{
const uint i = k < 64 ? (k < 32 ? k : k - 32) : (k < 96 ? k - 64 : k - 96);
k < 64 ? (k < 32 ? SetBit(bitSubmasks[0], i) : SetBit(bitSubmasks[1], i)) : (k < 96 ? SetBit(bitSubmasks[2], i) : SetBit(bitSubmasks[3], i));
}
}
template <uint N>
inline __device__ uint Bitmask_ReadBit(uint const (&bitSubmasks)[N], uint k)
{
static_assert(N >= 1 && N <= 4, "N must be in [1, 4]");
if(N == 1) // [0, 32)
{
return ReadBit(bitSubmasks[0], k);
}
else if(N == 2) // [0, 64)
{
const uint i = k < 32 ? k : k - 32;
return k < 32 ? ReadBit(bitSubmasks[0], i) : ReadBit(bitSubmasks[1], i);
}
else if(N == 3) // [0, 96)
{
const uint i = k < 32 ? k : (k < 64 ? k - 32 : k - 64);
return k < 32 ? ReadBit(bitSubmasks[0], i) : (k < 64 ? ReadBit(bitSubmasks[1], i) : ReadBit(bitSubmasks[2], i));
}
else if(N == 4) // [0, 128)
{
const uint i = k < 64 ? (k < 32 ? k : k - 32) : (k < 96 ? k - 64 : k - 96);
return k < 64 ? (k < 32 ? ReadBit(bitSubmasks[0], i) : ReadBit(bitSubmasks[1], i)) : (k < 96 ? ReadBit(bitSubmasks[2], i) : ReadBit(bitSubmasks[3], i));
}
return 0;
}
template <uint NumSubBitmasks>
__global__ void BalancedPermutationsImpl(int * balancedPermutations, int * atomicCounter, const uint numThreadsPerGroup, const uint permLen, const uint numPerms)
{
// The items and the deltas share the same shared memory array
// When an new item is data, the delta that occupied the slot is moved to the slot of the delta used during the iteration
// 1st iteration: | delta[0] | ... | delta[permLen-1]
// 2nd iteration: | items[1] | ... | delta[permLen-1] or delta[0]
// ...
// permLen-th iteration: | items[1] | items[2] | ... | items[permLen-1] | unused
// Note: item 0 is stored outside of shared memory array (in register)
extern __shared__ char SharedMem[];
char * items = &SharedMem[0];
char * delta = &SharedMem[0]; // Permutation length up to 126 (because deltas are stored in char type)
const uint laneId = LaneId();
const uint gtid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t stateA = WangHash(gtid);
uint64_t stateB = WangHash(gtid + blockDim.x * gridDim.x);
const uint halfPermLen = permLen >> 1;
while(true)
{
uint numBalancedPermsFound = 0;
if(laneId == 0) numBalancedPermsFound = *atomicCounter; // Only lane 0 reads the atomic counter...
numBalancedPermsFound = __shfl_sync(0xFFFFFFFF, numBalancedPermsFound, 0); // ...that is then broadcasted to the other lanes
if(numBalancedPermsFound >= numPerms) return;
// Generate a random permutations of deltas
for(int i = 0, smi = threadIdx.x; i < halfPermLen; ++i, smi += numThreadsPerGroup)
{
delta[smi] = i + 1;
delta[smi + numThreadsPerGroup * halfPermLen] = ~i;
}
// Shuffle delta array in-place
for (int i = permLen, smi = threadIdx.x + numThreadsPerGroup * (permLen-1); i > 1; --i, smi -= numThreadsPerGroup)
{
Swap(delta[smi], delta[threadIdx.x + numThreadsPerGroup * NextIntBounded(i, stateA, stateB)]);
}
// Try to generate a balanced permutation from the random deltas
const uint item0 = NextIntBounded(permLen, stateA, stateB) + 1;
uint usedItemMasks[NumSubBitmasks] = { 0 };
Bitmask_SetBit(usedItemMasks, item0 - 1);
uint currItem = item0;
bool bFoundBalancedPerm = true;
for(int i = 0, smi = threadIdx.x; i < permLen-1; ++i, smi += numThreadsPerGroup)
{
bool bFoundIthItem = false;
for(int j = i, smj = smi; j < permLen; ++j, smj += numThreadsPerGroup) // Starts from i because the items and deltas share the same array
{
const int t = currItem + delta[smj];
if(t > 0 && t <= permLen)
{
const uint usedItemIdx = t - 1;
const bool bUsedItem = Bitmask_ReadBit(usedItemMasks, usedItemIdx);
if(!bUsedItem)
{
// The items and deltas share the same array so before updating the i-th item
// we need to move the delta value that stands there
delta[smj] = delta[smi];
items[smi] = t;
currItem = t;
Bitmask_SetBit(usedItemMasks, usedItemIdx);
bFoundIthItem = true;
break;
}
}
}
if(!bFoundIthItem)
{
bFoundBalancedPerm = false;
break;
}
}
// Write found balanced permutation to global memory (if limit has been not reached)
if(bFoundBalancedPerm)
{
// TODO: check that we didn't already write the permutation using a bloom filter
const uint m = __activemask();
const uint laneIdOfFirstActiveLane = __ffs(m) - 1;
const uint numActiveLanes = __popc(m);
// Only first active lane increments the atomic counter...
if(laneId == laneIdOfFirstActiveLane)
numBalancedPermsFound = atomicAdd(atomicCounter, numActiveLanes);
// ...and then broadcast the counter value to the other lanes
numBalancedPermsFound = __shfl_sync(m, numBalancedPermsFound, laneIdOfFirstActiveLane);
const uint laneLTMask = (1 << laneId) - 1;
const uint idxAmongActiveLanes = __popc(m & laneLTMask);
const uint balancedPermIdx = numBalancedPermsFound + idxAmongActiveLanes;
if(balancedPermIdx > numPerms) return;
// Write permutation to output
int * output = &balancedPermutations[balancedPermIdx * permLen];
output[0] = item0;
for(int k = 1, smk = threadIdx.x; k < permLen; ++k, smk += numThreadsPerGroup)
{
output[k] = items[smk];
}
}
}
}
void BalancedPermutations(uint numGroups, uint numThreadsPerGroup, size_t sharedMemByteSize, int * balancedPermutations, int * atomicCounter, uint permLen, uint numPerms)
{
if(permLen <= 32)
hipLaunchKernelGGL(( BalancedPermutationsImpl<1>), dim3(numGroups), dim3(numThreadsPerGroup), sharedMemByteSize, 0, balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 64)
hipLaunchKernelGGL(( BalancedPermutationsImpl<2>), dim3(numGroups), dim3(numThreadsPerGroup), sharedMemByteSize, 0, balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 96)
hipLaunchKernelGGL(( BalancedPermutationsImpl<3>), dim3(numGroups), dim3(numThreadsPerGroup), sharedMemByteSize, 0, balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 126)
hipLaunchKernelGGL(( BalancedPermutationsImpl<4>), dim3(numGroups), dim3(numThreadsPerGroup), sharedMemByteSize, 0, balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else
printf("Permutation length above 126 are not supported!\n");
}
void SanityChecks(int * permutations, int permLen, int numPerms)
{
const int k = permLen / 2;
const int numDeltas = 2 * k + 1;
// Check that we have valid permutations
#ifdef USE_OPENMP
int * mcounts = reinterpret_cast<int*>(alloca(numDeltas * sizeof(int) * omp_get_max_threads()));
#pragma omp parallel for
for(int i = 0; i < numPerms; ++i)
{
int * counts = mcounts + omp_get_thread_num() * numDeltas;
#else
int * counts = reinterpret_cast<int*>(alloca(numDeltas * sizeof(int)));
for(int i = 0; i < numPerms; ++i)
{
#endif
memset(counts, 0, numDeltas * sizeof(int));
int * perm = &permutations[i * permLen];
for(int j = 0; j < permLen; ++j)
{
if(perm[j] < 1 || perm[j] > permLen) // perm[j] is in [1, permLen]
{
printf("Invalid value!!!\n");
break;
}
else
{
if(++counts[perm[j]-1] > 1)
{
printf("Invalid permutation!!!\n");
break;
}
}
}
}
// Check that we have balanced permutations
#pragma omp parallel for
for(int i = 0; i < numPerms; ++i)
{
#ifdef USE_OPENMP
int * counts = mcounts + omp_get_thread_num() * numDeltas;
#endif
memset(counts, 0, numDeltas * sizeof(int));
int * perm = &permutations[i * permLen];
for(int j = 1; j <= permLen; ++j)
{
int d = j != permLen ? (perm[j] - perm[j-1]) : (perm[0] - perm[permLen-1]);
if(d < -k || d > k)
{
printf("Unbalanced permutation: delta too big!!!\n");
break;
}
else
{
if(++counts[d + k] > 1)
{
printf("Unbalanced permutation: non-unique delta!!!\n");
break;
}
}
}
}
}
int main(int argc, char const * argv[])
{
uint PermLen = 32;
uint NumPerms = 10;
bool bParsingSuccess = true;
bool bPrintPerms = true;
bool bPrintTimings = (argc == 1);
bool bPerformSanityChecks = (argc == 1);
for(int i = 1; i < argc && argv[i][0] == '-'; ++i)
{
switch(argv[i][1])
{
case 'l': { ++i; if(i >= argc) { bParsingSuccess = false; break; } PermLen = atoi(argv[i]); break; }
case 'n': { ++i; if(i >= argc) { bParsingSuccess = false; break; } NumPerms = atoi(argv[i]); break; }
case 't': { bPrintTimings = true; break; }
case 'c': { bPerformSanityChecks = true; break; }
case 's': { bPrintPerms = false; break; }
default: bParsingSuccess = false; break;
}
}
if(!bParsingSuccess)
{
fprintf(stderr, "Failed to parse command line arguments\n");
fprintf(stderr, "Usage: %s -l <permutation length> -n <number of permutations to generate> [-t] [-c]\n", argv[0]);
exit(EXIT_FAILURE);
}
if(PermLen & 1 || PermLen > 126)
{
fprintf(stderr, "Permutation length must be even and at most equal to 126.\n");
exit(EXIT_FAILURE);
}
// Select "best" device
int numDevices;
hipGetDeviceCount(&numDevices);
int deviceId = 0;
int maxNumMultiprocessors = 0;
for(int i = 0; i < numDevices; ++i)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
if(prop.multiProcessorCount > maxNumMultiprocessors)
{
maxNumMultiprocessors = prop.multiProcessorCount;
deviceId = i;
}
}
hipSetDevice(deviceId);
const int numSMs = maxNumMultiprocessors;
const int numGroupsPerSM = 8;
const int numGroups = numSMs * numGroupsPerSM;
const int numThreadsPerGroup = 128;
int * atomicCounter;
hipMalloc(&atomicCounter, sizeof(int));
hipMemset(atomicCounter, 0, sizeof(int));
const size_t balancedPermutationsByteSize = NumPerms * PermLen * sizeof(int);
int * dBalancedPermutations;
hipMalloc(&dBalancedPermutations, balancedPermutationsByteSize);
const size_t sharedMemByteSize = numThreadsPerGroup * PermLen * sizeof(char);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
BalancedPermutations(numGroups, numThreadsPerGroup, sharedMemByteSize, dBalancedPermutations, atomicCounter, PermLen, NumPerms);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsedTime_ms = 0;
hipEventElapsedTime(&elapsedTime_ms, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
if(bPrintTimings)
printf("Generated %d balanced permutations of length %d in %.3fms (%.3f permutations/s)\n\n", NumPerms, PermLen, elapsedTime_ms, (NumPerms * 1000.f / elapsedTime_ms));
int * hBalancedPermutations = reinterpret_cast<int*>(malloc(balancedPermutationsByteSize));
hipMemcpy(hBalancedPermutations, dBalancedPermutations, balancedPermutationsByteSize, hipMemcpyDeviceToHost);
if(bPerformSanityChecks)
SanityChecks(hBalancedPermutations, PermLen, NumPerms);
if(bPrintPerms)
{
for(uint i = 0; i < NumPerms; ++i)
{
int * items = &hBalancedPermutations[i * PermLen];
printf("%2d", items[0]);
for(uint j = 1; j < PermLen; ++j)
{
printf(", %2d", items[j]);
}
if(i < NumPerms-1) printf("\n");
}
}
free(hBalancedPermutations);
hipFree(dBalancedPermutations);
hipFree(atomicCounter);
return 0;
}
|
097a5d95e609309400775f5d70e8546643795f89.cu
|
#include <cuda.h>
#include <malloc.h>
#include <stdint.h>
#include <stdio.h>
#ifdef USE_OPENMP
#include <omp.h>
#endif
// Original idea from Martin Roberts ...
// http://extremelearning.com.au/isotropic-blue-noise-point-sets/
// Reference Java implementation from Tommy Ettinger ...
// https://github.com/tommyettinger/sarong/blob/master/src/test/java/sarong/PermutationEtc.java
// Reference C/C++/ISPC implementation from Max Tarpini ...
// https://github.com/RomboDev/Miscellaneous/tree/master/MRbluenoisepointsets
using uint = unsigned int;
inline __device__ uint WangHash(uint seed)
{
seed = (seed ^ 61) ^ (seed >> 16);
seed *= 9;
seed = seed ^ (seed >> 4);
seed *= 0x27d4eb2d;
seed = seed ^ (seed >> 15);
return seed;
}
// https://github.com/tommyettinger/sarong/blob/master/src/test/java/sarong/PermutationEtc.java
inline __device__ int NextIntBounded(int bound, uint64_t & stateA, uint64_t & stateB)
{
const uint64_t s = (stateA += 0xC6BC279692B5C323ULL);
const uint64_t z = ((s < 0x800000006F17146DULL) ? stateB : (stateB += 0x9479D2858AF899E6)) * (s ^ s >> 31);
return (int)(bound * ((z ^ z >> 25) & 0xFFFFFFFFULL) >> 32);
}
template <typename T>
inline __device__ void Swap(T & lhs, T & rhs)
{
const T tmp = lhs;
lhs = rhs;
rhs = tmp;
}
inline __device__ uint LaneId()
{
uint ret;
asm volatile("mov.u32 %0, %laneid;" : "=r"(ret));
return ret;
}
// Set the k-th bit of x to 1
inline __device__ void SetBit(uint & x, uint k)
{
#if 0
x |= (1 << k);
#else
asm volatile("bfi.b32 %0, 0x1, %1, %2, 0x1;" : "=r"(x) : "r"(x), "r"(k));
#endif
}
// Extracts k-th bit from x
inline __device__ uint ReadBit(uint x, uint k)
{
#if 0
return (x >> k) & 0x1;
#else
uint ret;
asm volatile("bfe.u32 %0, %1, %2, 0x1;" : "=r"(ret) : "r"(x), "r"(k));
return ret;
#endif
}
// The array of bit submasks is manually indexed to avoid spilling to local memory (i.e. per-thread global memory)
// because we cannot dynamically index registers in CUDA
// (cf slide 5 of https://developer.download.nvidia.com/CUDA/training/register_spilling.pdf)
template <uint N>
inline __device__ void Bitmask_SetBit(uint (&bitSubmasks)[N], uint k)
{
static_assert(N >= 1 && N <= 4, "N must be in [1, 4]");
if(N == 1) // [0, 32)
{
SetBit(bitSubmasks[0], k);
}
else if(N == 2) // [0, 64)
{
const uint i = k < 32 ? k : k - 32;
k < 32 ? SetBit(bitSubmasks[0], i) : SetBit(bitSubmasks[1], i);
}
else if(N == 3) // [0, 96)
{
const uint i = k < 32 ? k : (k < 64 ? k - 32 : k - 64);
k < 32 ? SetBit(bitSubmasks[0], i) : (k < 64 ? SetBit(bitSubmasks[1], i) : SetBit(bitSubmasks[2], i));
}
else if(N == 4) // [0, 128)
{
const uint i = k < 64 ? (k < 32 ? k : k - 32) : (k < 96 ? k - 64 : k - 96);
k < 64 ? (k < 32 ? SetBit(bitSubmasks[0], i) : SetBit(bitSubmasks[1], i)) : (k < 96 ? SetBit(bitSubmasks[2], i) : SetBit(bitSubmasks[3], i));
}
}
template <uint N>
inline __device__ uint Bitmask_ReadBit(uint const (&bitSubmasks)[N], uint k)
{
static_assert(N >= 1 && N <= 4, "N must be in [1, 4]");
if(N == 1) // [0, 32)
{
return ReadBit(bitSubmasks[0], k);
}
else if(N == 2) // [0, 64)
{
const uint i = k < 32 ? k : k - 32;
return k < 32 ? ReadBit(bitSubmasks[0], i) : ReadBit(bitSubmasks[1], i);
}
else if(N == 3) // [0, 96)
{
const uint i = k < 32 ? k : (k < 64 ? k - 32 : k - 64);
return k < 32 ? ReadBit(bitSubmasks[0], i) : (k < 64 ? ReadBit(bitSubmasks[1], i) : ReadBit(bitSubmasks[2], i));
}
else if(N == 4) // [0, 128)
{
const uint i = k < 64 ? (k < 32 ? k : k - 32) : (k < 96 ? k - 64 : k - 96);
return k < 64 ? (k < 32 ? ReadBit(bitSubmasks[0], i) : ReadBit(bitSubmasks[1], i)) : (k < 96 ? ReadBit(bitSubmasks[2], i) : ReadBit(bitSubmasks[3], i));
}
return 0;
}
template <uint NumSubBitmasks>
__global__ void BalancedPermutationsImpl(int * balancedPermutations, int * atomicCounter, const uint numThreadsPerGroup, const uint permLen, const uint numPerms)
{
// The items and the deltas share the same shared memory array
// When an new item is data, the delta that occupied the slot is moved to the slot of the delta used during the iteration
// 1st iteration: | delta[0] | ... | delta[permLen-1]
// 2nd iteration: | items[1] | ... | delta[permLen-1] or delta[0]
// ...
// permLen-th iteration: | items[1] | items[2] | ... | items[permLen-1] | unused
// Note: item 0 is stored outside of shared memory array (in register)
extern __shared__ char SharedMem[];
char * items = &SharedMem[0];
char * delta = &SharedMem[0]; // Permutation length up to 126 (because deltas are stored in char type)
const uint laneId = LaneId();
const uint gtid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t stateA = WangHash(gtid);
uint64_t stateB = WangHash(gtid + blockDim.x * gridDim.x);
const uint halfPermLen = permLen >> 1;
while(true)
{
uint numBalancedPermsFound = 0;
if(laneId == 0) numBalancedPermsFound = *atomicCounter; // Only lane 0 reads the atomic counter...
numBalancedPermsFound = __shfl_sync(0xFFFFFFFF, numBalancedPermsFound, 0); // ...that is then broadcasted to the other lanes
if(numBalancedPermsFound >= numPerms) return;
// Generate a random permutations of deltas
for(int i = 0, smi = threadIdx.x; i < halfPermLen; ++i, smi += numThreadsPerGroup)
{
delta[smi] = i + 1;
delta[smi + numThreadsPerGroup * halfPermLen] = ~i;
}
// Shuffle delta array in-place
for (int i = permLen, smi = threadIdx.x + numThreadsPerGroup * (permLen-1); i > 1; --i, smi -= numThreadsPerGroup)
{
Swap(delta[smi], delta[threadIdx.x + numThreadsPerGroup * NextIntBounded(i, stateA, stateB)]);
}
// Try to generate a balanced permutation from the random deltas
const uint item0 = NextIntBounded(permLen, stateA, stateB) + 1;
uint usedItemMasks[NumSubBitmasks] = { 0 };
Bitmask_SetBit(usedItemMasks, item0 - 1);
uint currItem = item0;
bool bFoundBalancedPerm = true;
for(int i = 0, smi = threadIdx.x; i < permLen-1; ++i, smi += numThreadsPerGroup)
{
bool bFoundIthItem = false;
for(int j = i, smj = smi; j < permLen; ++j, smj += numThreadsPerGroup) // Starts from i because the items and deltas share the same array
{
const int t = currItem + delta[smj];
if(t > 0 && t <= permLen)
{
const uint usedItemIdx = t - 1;
const bool bUsedItem = Bitmask_ReadBit(usedItemMasks, usedItemIdx);
if(!bUsedItem)
{
// The items and deltas share the same array so before updating the i-th item
// we need to move the delta value that stands there
delta[smj] = delta[smi];
items[smi] = t;
currItem = t;
Bitmask_SetBit(usedItemMasks, usedItemIdx);
bFoundIthItem = true;
break;
}
}
}
if(!bFoundIthItem)
{
bFoundBalancedPerm = false;
break;
}
}
// Write found balanced permutation to global memory (if limit has been not reached)
if(bFoundBalancedPerm)
{
// TODO: check that we didn't already write the permutation using a bloom filter
const uint m = __activemask();
const uint laneIdOfFirstActiveLane = __ffs(m) - 1;
const uint numActiveLanes = __popc(m);
// Only first active lane increments the atomic counter...
if(laneId == laneIdOfFirstActiveLane)
numBalancedPermsFound = atomicAdd(atomicCounter, numActiveLanes);
// ...and then broadcast the counter value to the other lanes
numBalancedPermsFound = __shfl_sync(m, numBalancedPermsFound, laneIdOfFirstActiveLane);
const uint laneLTMask = (1 << laneId) - 1;
const uint idxAmongActiveLanes = __popc(m & laneLTMask);
const uint balancedPermIdx = numBalancedPermsFound + idxAmongActiveLanes;
if(balancedPermIdx > numPerms) return;
// Write permutation to output
int * output = &balancedPermutations[balancedPermIdx * permLen];
output[0] = item0;
for(int k = 1, smk = threadIdx.x; k < permLen; ++k, smk += numThreadsPerGroup)
{
output[k] = items[smk];
}
}
}
}
void BalancedPermutations(uint numGroups, uint numThreadsPerGroup, size_t sharedMemByteSize, int * balancedPermutations, int * atomicCounter, uint permLen, uint numPerms)
{
if(permLen <= 32)
BalancedPermutationsImpl<1><<<numGroups, numThreadsPerGroup, sharedMemByteSize>>>(balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 64)
BalancedPermutationsImpl<2><<<numGroups, numThreadsPerGroup, sharedMemByteSize>>>(balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 96)
BalancedPermutationsImpl<3><<<numGroups, numThreadsPerGroup, sharedMemByteSize>>>(balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else if(permLen <= 126)
BalancedPermutationsImpl<4><<<numGroups, numThreadsPerGroup, sharedMemByteSize>>>(balancedPermutations, atomicCounter, numThreadsPerGroup, permLen, numPerms);
else
printf("Permutation length above 126 are not supported!\n");
}
void SanityChecks(int * permutations, int permLen, int numPerms)
{
const int k = permLen / 2;
const int numDeltas = 2 * k + 1;
// Check that we have valid permutations
#ifdef USE_OPENMP
int * mcounts = reinterpret_cast<int*>(alloca(numDeltas * sizeof(int) * omp_get_max_threads()));
#pragma omp parallel for
for(int i = 0; i < numPerms; ++i)
{
int * counts = mcounts + omp_get_thread_num() * numDeltas;
#else
int * counts = reinterpret_cast<int*>(alloca(numDeltas * sizeof(int)));
for(int i = 0; i < numPerms; ++i)
{
#endif
memset(counts, 0, numDeltas * sizeof(int));
int * perm = &permutations[i * permLen];
for(int j = 0; j < permLen; ++j)
{
if(perm[j] < 1 || perm[j] > permLen) // perm[j] is in [1, permLen]
{
printf("Invalid value!!!\n");
break;
}
else
{
if(++counts[perm[j]-1] > 1)
{
printf("Invalid permutation!!!\n");
break;
}
}
}
}
// Check that we have balanced permutations
#pragma omp parallel for
for(int i = 0; i < numPerms; ++i)
{
#ifdef USE_OPENMP
int * counts = mcounts + omp_get_thread_num() * numDeltas;
#endif
memset(counts, 0, numDeltas * sizeof(int));
int * perm = &permutations[i * permLen];
for(int j = 1; j <= permLen; ++j)
{
int d = j != permLen ? (perm[j] - perm[j-1]) : (perm[0] - perm[permLen-1]);
if(d < -k || d > k)
{
printf("Unbalanced permutation: delta too big!!!\n");
break;
}
else
{
if(++counts[d + k] > 1)
{
printf("Unbalanced permutation: non-unique delta!!!\n");
break;
}
}
}
}
}
int main(int argc, char const * argv[])
{
uint PermLen = 32;
uint NumPerms = 10;
bool bParsingSuccess = true;
bool bPrintPerms = true;
bool bPrintTimings = (argc == 1);
bool bPerformSanityChecks = (argc == 1);
for(int i = 1; i < argc && argv[i][0] == '-'; ++i)
{
switch(argv[i][1])
{
case 'l': { ++i; if(i >= argc) { bParsingSuccess = false; break; } PermLen = atoi(argv[i]); break; }
case 'n': { ++i; if(i >= argc) { bParsingSuccess = false; break; } NumPerms = atoi(argv[i]); break; }
case 't': { bPrintTimings = true; break; }
case 'c': { bPerformSanityChecks = true; break; }
case 's': { bPrintPerms = false; break; }
default: bParsingSuccess = false; break;
}
}
if(!bParsingSuccess)
{
fprintf(stderr, "Failed to parse command line arguments\n");
fprintf(stderr, "Usage: %s -l <permutation length> -n <number of permutations to generate> [-t] [-c]\n", argv[0]);
exit(EXIT_FAILURE);
}
if(PermLen & 1 || PermLen > 126)
{
fprintf(stderr, "Permutation length must be even and at most equal to 126.\n");
exit(EXIT_FAILURE);
}
// Select "best" device
int numDevices;
cudaGetDeviceCount(&numDevices);
int deviceId = 0;
int maxNumMultiprocessors = 0;
for(int i = 0; i < numDevices; ++i)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
if(prop.multiProcessorCount > maxNumMultiprocessors)
{
maxNumMultiprocessors = prop.multiProcessorCount;
deviceId = i;
}
}
cudaSetDevice(deviceId);
const int numSMs = maxNumMultiprocessors;
const int numGroupsPerSM = 8;
const int numGroups = numSMs * numGroupsPerSM;
const int numThreadsPerGroup = 128;
int * atomicCounter;
cudaMalloc(&atomicCounter, sizeof(int));
cudaMemset(atomicCounter, 0, sizeof(int));
const size_t balancedPermutationsByteSize = NumPerms * PermLen * sizeof(int);
int * dBalancedPermutations;
cudaMalloc(&dBalancedPermutations, balancedPermutationsByteSize);
const size_t sharedMemByteSize = numThreadsPerGroup * PermLen * sizeof(char);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
BalancedPermutations(numGroups, numThreadsPerGroup, sharedMemByteSize, dBalancedPermutations, atomicCounter, PermLen, NumPerms);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsedTime_ms = 0;
cudaEventElapsedTime(&elapsedTime_ms, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if(bPrintTimings)
printf("Generated %d balanced permutations of length %d in %.3fms (%.3f permutations/s)\n\n", NumPerms, PermLen, elapsedTime_ms, (NumPerms * 1000.f / elapsedTime_ms));
int * hBalancedPermutations = reinterpret_cast<int*>(malloc(balancedPermutationsByteSize));
cudaMemcpy(hBalancedPermutations, dBalancedPermutations, balancedPermutationsByteSize, cudaMemcpyDeviceToHost);
if(bPerformSanityChecks)
SanityChecks(hBalancedPermutations, PermLen, NumPerms);
if(bPrintPerms)
{
for(uint i = 0; i < NumPerms; ++i)
{
int * items = &hBalancedPermutations[i * PermLen];
printf("%2d", items[0]);
for(uint j = 1; j < PermLen; ++j)
{
printf(", %2d", items[j]);
}
if(i < NumPerms-1) printf("\n");
}
}
free(hBalancedPermutations);
cudaFree(dBalancedPermutations);
cudaFree(atomicCounter);
return 0;
}
|
c8143d384c3932bffae307ff3edadce6f6550946.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gtest/gtest.h"
#include "ATen/ATen.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/hip/NumericLimits.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "hip/hip_runtime.h"
#include <assert.h>
using namespace at;
__device__ void test(){
// test half construction and implicit conversions in device
assert(Half(3) == Half(3.0f));
assert(static_cast<Half>(3.0f) == Half(3.0f));
// there is no float <=> __half implicit conversion
assert(static_cast<Half>(3.0f) == 3.0f);
__half a = __float2half(3.0f);
__half b = __float2half(2.0f);
__half c = a - Half(b);
assert(static_cast<Half>(c) == Half(1.0));
// asserting if the functions used on
// half types give almost equivalent results when using
// functions on double.
// The purpose of these asserts are to test the device side
// half API for the common mathematical functions.
// Note: When calling std math functions from device, don't
// use the std namespace, but just "::" so that the function
// gets resolved from nvcc math_functions.hpp
float threshold = 0.00001;
assert(::abs(::lgamma(Half(10.0)) - ::lgamma(10.0f)) <= threshold);
assert(::abs(::exp(Half(1.0)) - ::exp(1.0f)) <= threshold);
assert(::abs(::log(Half(1.0)) - ::log(1.0f)) <= threshold);
assert(::abs(::log10(Half(1000.0)) - ::log10(1000.0f)) <= threshold);
assert(::abs(::log1p(Half(0.0)) - ::log1p(0.0f)) <= threshold);
assert(::abs(::log2(Half(1000.0)) - ::log2(1000.0f)) <= threshold);
assert(::abs(::expm1(Half(1.0)) - ::expm1(1.0f)) <= threshold);
assert(::abs(::cos(Half(0.0)) - ::cos(0.0f)) <= threshold);
assert(::abs(::sin(Half(0.0)) - ::sin(0.0f)) <= threshold);
assert(::abs(::sqrt(Half(100.0)) - ::sqrt(100.0f)) <= threshold);
assert(::abs(::ceil(Half(2.4)) - ::ceil(2.4f)) <= threshold);
assert(::abs(::floor(Half(2.7)) - ::floor(2.7f)) <= threshold);
assert(::abs(::trunc(Half(2.7)) - ::trunc(2.7f)) <= threshold);
assert(::abs(::acos(Half(-1.0)) - ::acos(-1.0f)) <= threshold);
assert(::abs(::cosh(Half(1.0)) - ::cosh(1.0f)) <= threshold);
assert(::abs(::acosh(Half(1.0)) - ::acosh(1.0f)) <= threshold);
assert(::abs(::asin(Half(1.0)) - ::asin(1.0f)) <= threshold);
assert(::abs(::sinh(Half(1.0)) - ::sinh(1.0f)) <= threshold);
assert(::abs(::asinh(Half(1.0)) - ::asinh(1.0f)) <= threshold);
assert(::abs(::tan(Half(0.0)) - ::tan(0.0f)) <= threshold);
assert(::abs(::atan(Half(1.0)) - ::atan(1.0f)) <= threshold);
assert(::abs(::tanh(Half(1.0)) - ::tanh(1.0f)) <= threshold);
assert(::abs(::erf(Half(10.0)) - ::erf(10.0f)) <= threshold);
assert(::abs(::erfc(Half(10.0)) - ::erfc(10.0f)) <= threshold);
assert(::abs(::abs(Half(-3.0)) - ::abs(-3.0f)) <= threshold);
assert(::abs(::round(Half(2.3)) - ::round(2.3f)) <= threshold);
assert(::abs(::pow(Half(2.0), Half(10.0)) - ::pow(2.0f, 10.0f)) <= threshold);
assert(
::abs(::atan2(Half(7.0), Half(0.0)) - ::atan2(7.0f, 0.0f)) <= threshold);
// note: can't use namespace on isnan and isinf in device code
#ifdef _MSC_VER
// Windows requires this explicit conversion. The reason is unclear
// related issue with clang: https://reviews.llvm.org/D37906
assert(::abs(::isnan((float)Half(0.0)) - ::isnan(0.0f)) <= threshold);
assert(::abs(::isinf((float)Half(0.0)) - ::isinf(0.0f)) <= threshold);
#else
assert(::abs(::isnan(Half(0.0)) - ::isnan(0.0f)) <= threshold);
assert(::abs(::isinf(Half(0.0)) - ::isinf(0.0f)) <= threshold);
#endif
}
__global__ void kernel(){
test();
}
void launch_function(){
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, );
}
// half common math functions tests in device
TEST(HalfCuda, HalfCuda) {
if (!at::cuda::is_available()) return;
launch_function();
hipError_t err = hipDeviceSynchronize();
bool isEQ = err == hipSuccess;
ASSERT_TRUE(isEQ);
}
|
c8143d384c3932bffae307ff3edadce6f6550946.cu
|
#include "gtest/gtest.h"
#include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/NumericLimits.cuh"
#include "cuda.h"
#include "cuda_fp16.h"
#include "cuda_runtime.h"
#include <assert.h>
using namespace at;
__device__ void test(){
// test half construction and implicit conversions in device
assert(Half(3) == Half(3.0f));
assert(static_cast<Half>(3.0f) == Half(3.0f));
// there is no float <=> __half implicit conversion
assert(static_cast<Half>(3.0f) == 3.0f);
__half a = __float2half(3.0f);
__half b = __float2half(2.0f);
__half c = a - Half(b);
assert(static_cast<Half>(c) == Half(1.0));
// asserting if the functions used on
// half types give almost equivalent results when using
// functions on double.
// The purpose of these asserts are to test the device side
// half API for the common mathematical functions.
// Note: When calling std math functions from device, don't
// use the std namespace, but just "::" so that the function
// gets resolved from nvcc math_functions.hpp
float threshold = 0.00001;
assert(::abs(::lgamma(Half(10.0)) - ::lgamma(10.0f)) <= threshold);
assert(::abs(::exp(Half(1.0)) - ::exp(1.0f)) <= threshold);
assert(::abs(::log(Half(1.0)) - ::log(1.0f)) <= threshold);
assert(::abs(::log10(Half(1000.0)) - ::log10(1000.0f)) <= threshold);
assert(::abs(::log1p(Half(0.0)) - ::log1p(0.0f)) <= threshold);
assert(::abs(::log2(Half(1000.0)) - ::log2(1000.0f)) <= threshold);
assert(::abs(::expm1(Half(1.0)) - ::expm1(1.0f)) <= threshold);
assert(::abs(::cos(Half(0.0)) - ::cos(0.0f)) <= threshold);
assert(::abs(::sin(Half(0.0)) - ::sin(0.0f)) <= threshold);
assert(::abs(::sqrt(Half(100.0)) - ::sqrt(100.0f)) <= threshold);
assert(::abs(::ceil(Half(2.4)) - ::ceil(2.4f)) <= threshold);
assert(::abs(::floor(Half(2.7)) - ::floor(2.7f)) <= threshold);
assert(::abs(::trunc(Half(2.7)) - ::trunc(2.7f)) <= threshold);
assert(::abs(::acos(Half(-1.0)) - ::acos(-1.0f)) <= threshold);
assert(::abs(::cosh(Half(1.0)) - ::cosh(1.0f)) <= threshold);
assert(::abs(::acosh(Half(1.0)) - ::acosh(1.0f)) <= threshold);
assert(::abs(::asin(Half(1.0)) - ::asin(1.0f)) <= threshold);
assert(::abs(::sinh(Half(1.0)) - ::sinh(1.0f)) <= threshold);
assert(::abs(::asinh(Half(1.0)) - ::asinh(1.0f)) <= threshold);
assert(::abs(::tan(Half(0.0)) - ::tan(0.0f)) <= threshold);
assert(::abs(::atan(Half(1.0)) - ::atan(1.0f)) <= threshold);
assert(::abs(::tanh(Half(1.0)) - ::tanh(1.0f)) <= threshold);
assert(::abs(::erf(Half(10.0)) - ::erf(10.0f)) <= threshold);
assert(::abs(::erfc(Half(10.0)) - ::erfc(10.0f)) <= threshold);
assert(::abs(::abs(Half(-3.0)) - ::abs(-3.0f)) <= threshold);
assert(::abs(::round(Half(2.3)) - ::round(2.3f)) <= threshold);
assert(::abs(::pow(Half(2.0), Half(10.0)) - ::pow(2.0f, 10.0f)) <= threshold);
assert(
::abs(::atan2(Half(7.0), Half(0.0)) - ::atan2(7.0f, 0.0f)) <= threshold);
// note: can't use namespace on isnan and isinf in device code
#ifdef _MSC_VER
// Windows requires this explicit conversion. The reason is unclear
// related issue with clang: https://reviews.llvm.org/D37906
assert(::abs(::isnan((float)Half(0.0)) - ::isnan(0.0f)) <= threshold);
assert(::abs(::isinf((float)Half(0.0)) - ::isinf(0.0f)) <= threshold);
#else
assert(::abs(::isnan(Half(0.0)) - ::isnan(0.0f)) <= threshold);
assert(::abs(::isinf(Half(0.0)) - ::isinf(0.0f)) <= threshold);
#endif
}
__global__ void kernel(){
test();
}
void launch_function(){
kernel<<<1, 1>>>();
}
// half common math functions tests in device
TEST(HalfCuda, HalfCuda) {
if (!at::cuda::is_available()) return;
launch_function();
cudaError_t err = cudaDeviceSynchronize();
bool isEQ = err == cudaSuccess;
ASSERT_TRUE(isEQ);
}
|
da24cb52b7908021c037a0c773f1908c721a8dbd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"Accuracy.cuh"
int CAccuracyGPU::setup(std::vector<Blob<precision>*>& inputs,std::vector<Blob<precision>*>& outputs)
{
outputs[0]->create(1,1,1,1);
m_corrects.create(inputs[0]->num,1,1,1);
return 0;
}
//block<<<<1>>>
//thread<<<batch>>>
__global__ void getCorrects(precision* corrects,precision* inputData,precision* labels,int dataChannel)
{
int tid=threadIdx.x;
precision max=0.0;
int maxIndex=0;
for(int i=0;i<dataChannel;i++)
{
int index=tid*dataChannel+i;
if(max<inputData[index])
{
max=inputData[index];
maxIndex=i;
}
}
if(maxIndex==(int)labels[tid])
corrects[tid]=1;
else
corrects[tid]=0;
}
//block<<<1>>>
//thread<<<min(512,batch)>>>
__global__ void getCorrectSum(precision* sum,precision* correct,int batch)
{
extern __shared__ precision shared[];
int tid=threadIdx.x;
shared[tid]=0;
for(int id = tid; id < batch; id += blockDim.x)
shared[tid]+=correct[id];
__syncthreads();
int offset = blockDim.x / 2;
while(offset > 0) {
if(tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
if(tid == 0)
sum[0] += shared[0];
}
precision CAccuracyGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
hipError_t cudaStat=hipSuccess;
hipLaunchKernelGGL(( getCorrects), dim3(1),dim3(bottoms[1]->num), 0, 0, m_corrects.gpuData,
bottoms[0]->gpuData,
bottoms[1]->gpuData,
bottoms[0]->dataChannel
);
cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
int threadNum=512;//2
hipLaunchKernelGGL(( getCorrectSum), dim3(1),dim3(threadNum),sizeof(precision)*threadNum, 0, tops[0]->gpuData,
m_corrects.gpuData,
bottoms[1]->num
);
cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
|
da24cb52b7908021c037a0c773f1908c721a8dbd.cu
|
#include"Accuracy.cuh"
int CAccuracyGPU::setup(std::vector<Blob<precision>*>& inputs,std::vector<Blob<precision>*>& outputs)
{
outputs[0]->create(1,1,1,1);
m_corrects.create(inputs[0]->num,1,1,1);
return 0;
}
//block<<<<1>>>
//thread<<<batch>>>
__global__ void getCorrects(precision* corrects,precision* inputData,precision* labels,int dataChannel)
{
int tid=threadIdx.x;
precision max=0.0;
int maxIndex=0;
for(int i=0;i<dataChannel;i++)
{
int index=tid*dataChannel+i;
if(max<inputData[index])
{
max=inputData[index];
maxIndex=i;
}
}
if(maxIndex==(int)labels[tid])
corrects[tid]=1;
else
corrects[tid]=0;
}
//block<<<1>>>
//thread<<<min(512,batch)>>>
__global__ void getCorrectSum(precision* sum,precision* correct,int batch)
{
extern __shared__ precision shared[];
int tid=threadIdx.x;
shared[tid]=0;
for(int id = tid; id < batch; id += blockDim.x)
shared[tid]+=correct[id];
__syncthreads();
int offset = blockDim.x / 2;
while(offset > 0) {
if(tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
if(tid == 0)
sum[0] += shared[0];
}
precision CAccuracyGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
cudaError_t cudaStat=cudaSuccess;
getCorrects<<<1,bottoms[1]->num>>>(m_corrects.gpuData,
bottoms[0]->gpuData,
bottoms[1]->gpuData,
bottoms[0]->dataChannel
);
cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
int threadNum=512;//保证是2的整数倍,因为求和是树形加法
getCorrectSum<<<1,threadNum,sizeof(precision)*threadNum>>>(tops[0]->gpuData,
m_corrects.gpuData,
bottoms[1]->num
);
cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
|
50bd65bd3b0bb2db2ed8035b2a899c02eed123af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _copy_mat(float *m, float* target, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = m[tid];
}
}
|
50bd65bd3b0bb2db2ed8035b2a899c02eed123af.cu
|
#include "includes.h"
__global__ void _copy_mat(float *m, float* target, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = m[tid];
}
}
|
8a5d01136c107c93d7723497a876ea70e54e9dc2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "al2o3_platform/platform.h"
#include "al2o3_memory/memory.h"
#include "accel_cuda.hpp"
#include <hip/hip_runtime.h>
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoArchName for SM %d.%d is undefined. Default to use %s\n",
major,
minor,
nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
template<typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), hipGetErrorName(result), func);
}
}
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
struct Cuda {
int deviceIndex;
};
Cuda *AccelCUDA_Create() {
int deviceCount;
int pickedDeviceIndex = -1;
int pickedTotalCores = -1;
hipDeviceProp_t pickedDevice{};
checkCudaErrors(hipGetDeviceCount(&deviceCount));
LOGINFO("--- CUDA Devices ---");
for (int i = 0u; i < deviceCount; ++i) {
hipDeviceProp_t deviceProp;
int computeMode = -1;
checkCudaErrors(hipDeviceGetAttribute(&computeMode, hipDeviceAttributeComputeMode, i));
if(computeMode == hipComputeModeProhibited) continue;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, i));
int const coresPerSM = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
int const totalCores = coresPerSM * deviceProp.multiProcessorCount;
int const computePerf = totalCores * (deviceProp.clockRate/1024);
LOGINFO("%d: %s %s (%d.%d)", i,
deviceProp.name, _ConvertSMVer2ArchName(deviceProp.major, deviceProp.minor), deviceProp.major, deviceProp.minor);
LOGINFO("%d: SMs %d, Cores %d, Total Cores %d Clock %d ~GFLOPs %f", i,
deviceProp.multiProcessorCount, coresPerSM, totalCores, deviceProp.clockRate/1024, ((float)2 * computePerf)/1024.0f);
// for now just pick the biggest new enough device
if (totalCores > pickedTotalCores) {
memcpy(&pickedDevice, &deviceProp, sizeof(hipDeviceProp_t));
pickedDeviceIndex = i;
pickedTotalCores = totalCores;
}
}
LOGINFO("---");
if (pickedDeviceIndex == -1) {
return nullptr;
}
checkCudaErrors(hipSetDevice(pickedDeviceIndex));
Cuda* cuda = (Cuda*)MEMORY_CALLOC(1, sizeof(Cuda));
if(!cuda) return nullptr;
cuda->deviceIndex = pickedDeviceIndex;
return cuda;
}
void AccelCUDA_Destroy(Cuda *cuda) {
if(!cuda) return;
MEMORY_FREE(cuda);
}
|
8a5d01136c107c93d7723497a876ea70e54e9dc2.cu
|
#include "al2o3_platform/platform.h"
#include "al2o3_memory/memory.h"
#include "accel_cuda.hpp"
#include <cuda.h>
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char *_ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char *name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
LOGINFO("MapSMtoArchName for SM %d.%d is undefined. Default to use %s\n",
major,
minor,
nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
template<typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
if (result) {
LOGERROR("CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), cudaGetErrorName(result), func);
}
}
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
struct Cuda {
int deviceIndex;
};
Cuda *AccelCUDA_Create() {
int deviceCount;
int pickedDeviceIndex = -1;
int pickedTotalCores = -1;
cudaDeviceProp pickedDevice{};
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
LOGINFO("--- CUDA Devices ---");
for (int i = 0u; i < deviceCount; ++i) {
cudaDeviceProp deviceProp;
int computeMode = -1;
checkCudaErrors(cudaDeviceGetAttribute(&computeMode, cudaDevAttrComputeMode, i));
if(computeMode == cudaComputeModeProhibited) continue;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i));
int const coresPerSM = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
int const totalCores = coresPerSM * deviceProp.multiProcessorCount;
int const computePerf = totalCores * (deviceProp.clockRate/1024);
LOGINFO("%d: %s %s (%d.%d)", i,
deviceProp.name, _ConvertSMVer2ArchName(deviceProp.major, deviceProp.minor), deviceProp.major, deviceProp.minor);
LOGINFO("%d: SMs %d, Cores %d, Total Cores %d Clock %d ~GFLOPs %f", i,
deviceProp.multiProcessorCount, coresPerSM, totalCores, deviceProp.clockRate/1024, ((float)2 * computePerf)/1024.0f);
// for now just pick the biggest new enough device
if (totalCores > pickedTotalCores) {
memcpy(&pickedDevice, &deviceProp, sizeof(cudaDeviceProp));
pickedDeviceIndex = i;
pickedTotalCores = totalCores;
}
}
LOGINFO("---");
if (pickedDeviceIndex == -1) {
return nullptr;
}
checkCudaErrors(cudaSetDevice(pickedDeviceIndex));
Cuda* cuda = (Cuda*)MEMORY_CALLOC(1, sizeof(Cuda));
if(!cuda) return nullptr;
cuda->deviceIndex = pickedDeviceIndex;
return cuda;
}
void AccelCUDA_Destroy(Cuda *cuda) {
if(!cuda) return;
MEMORY_FREE(cuda);
}
|
f7c6fd08133c3b3c433e87cb0abd030abb5402d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/mask_kernel.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/aligned_vector.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/flatten_indices.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
__global__ void MaskKernel(const T* x_ptr,
const IntT* indices_ptr,
const int64_t* sparse_offsets,
const int64_t non_zero_num,
const int cols,
const int sparse_dim,
T* out_values_ptr) {
CUDA_KERNEL_LOOP_TYPE(i, non_zero_num * cols, int64_t) {
int64_t out_i = i / cols;
int64_t col_i = i - out_i * cols;
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices_ptr[j * non_zero_num + out_i] * sparse_offsets[j];
}
out_values_ptr[out_i * cols + col_i] = x_ptr[index * cols + col_i];
}
}
template <typename T, typename IntT>
void SparseMaskGPUKernel(const GPUContext& dev_ctx,
const DenseTensor& x,
const SparseCooTensor& mask,
SparseCooTensor* out) {
const DDim& dims = x.dims();
PADDLE_ENFORCE_EQ(
x.dims(),
mask.dims(),
phi::errors::InvalidArgument("the input x and mask must have the shape"));
const DenseTensor& indices = mask.indices();
const DenseTensor& values = mask.values();
const int sparse_dim = mask.sparse_dim();
DenseTensor sparse_offsets = phi::Empty<GPUContext>(
dev_ctx,
DenseTensorMeta(DataType::INT64, {sparse_dim}, DataLayout::NCHW));
std::vector<int64_t> h_sparse_offsets(sparse_dim);
phi::funcs::sparse::CalcOffsetsPerDim(
dims, sparse_dim, h_sparse_offsets.data());
phi::backends::gpu::GpuMemcpyAsync(sparse_offsets.data<int64_t>(),
&h_sparse_offsets[0],
sizeof(int64_t) * sparse_dim,
gpuMemcpyHostToDevice,
dev_ctx.stream());
DenseTensor out_indices = phi::EmptyLike<T>(dev_ctx, indices);
DenseTensor out_values = phi::EmptyLike<T>(dev_ctx, values);
phi::Copy(dev_ctx, indices, dev_ctx.GetPlace(), false, &out_indices);
const IntT* indices_ptr = indices.data<IntT>();
T* out_values_ptr = out_values.data<T>();
const T* x_ptr = x.data<T>();
const int64_t non_zero_num = mask.nnz();
auto dims_2d = flatten_to_2d(dims, sparse_dim);
const int cols = dims_2d[1];
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num * cols, 1);
hipLaunchKernelGGL(( MaskKernel<T, IntT>)
, dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(),
x_ptr,
indices_ptr,
sparse_offsets.data<int64_t>(),
non_zero_num,
cols,
sparse_dim,
out_values_ptr);
out->SetMember(out_indices, out_values, dims, true);
}
/**
* @brief Filter the DenseTensor x by the
* mask.indices() and output a SparseCooTensor
* x and mask must have the same shape.
**/
template <typename T, typename Context>
void SparseMaskKernel(const Context& dev_ctx,
const DenseTensor& x,
const SparseCooTensor& mask,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
mask.indices().dtype(), "SparseMaskGPUKernel", ([&] {
SparseMaskGPUKernel<T, data_t>(dev_ctx, x, mask, out);
}));
}
template <typename IntT>
__global__ void MaskTable(const IntT* x_indexs, const int n, int* table) {
CUDA_KERNEL_LOOP_TYPE(i, n, int64_t) {
int index = x_indexs[i];
table[index] = i == 0 ? -1 : i;
}
}
template <typename T, typename IntT, int VecSize>
__global__ void MaskCopy(const IntT* mask_indexs,
const int* table,
const int n,
const int stride,
const T* x_values,
T* out_values) {
using LoadT = phi::AlignedVector<T, VecSize>;
using StoreT = phi::AlignedVector<T, VecSize>;
CUDA_KERNEL_LOOP_TYPE(i, n, int64_t) {
int j = table[mask_indexs[i]];
if (j != 0) {
if (j == -1) j = 0;
for (int k = 0; k < stride; k += VecSize) {
LoadT vec_x;
phi::Load<T, VecSize>(x_values + j * stride + k, &vec_x);
phi::Store<T, VecSize>(vec_x, out_values + i * stride + k);
}
}
}
}
template <typename T, typename IntT>
void SparseMaskHelperGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& mask_indices,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
mask_indices.dims().size(),
2,
phi::errors::InvalidArgument("the mask_indices must be 2-D tensor"));
const int32_t sparse_dim = x.sparse_dim();
auto indices_dtype = paddle::experimental::CppTypeToDataType<IntT>::Type();
std::vector<IntT> sparse_offsets(sparse_dim);
DenseTensorMeta x_indexs_meta(indices_dtype, {x.nnz()}, DataLayout::NCHW);
DenseTensorMeta mask_indexs_meta(
indices_dtype, {mask_indices.dims()[1]}, DataLayout::NCHW);
DenseTensorMeta sparse_offset_meta(
indices_dtype, {sparse_dim}, DataLayout::NCHW);
DenseTensor x_indexs =
phi::Empty<GPUContext>(dev_ctx, std::move(x_indexs_meta));
DenseTensor mask_indexs =
phi::Empty<GPUContext>(dev_ctx, std::move(mask_indexs_meta));
DenseTensor bound_out =
phi::Empty<GPUContext>(dev_ctx, std::move(mask_indexs_meta));
DenseTensor d_sparse_offsets =
phi::Empty<GPUContext>(dev_ctx, std::move(sparse_offset_meta));
IntT* x_indexs_ptr = x_indexs.data<IntT>();
IntT* mask_indexs_ptr = mask_indexs.data<IntT>();
IntT* bound_out_ptr = bound_out.data<IntT>();
// 1. calc the offsets of per dim
phi::funcs::sparse::CalcOffsetsPerDim(
x.dims(), sparse_dim, sparse_offsets.data());
// 2. copy sparse_offsets to device
phi::backends::gpu::GpuMemcpyAsync(d_sparse_offsets.data<IntT>(),
sparse_offsets.data(),
sizeof(IntT) * sparse_dim,
gpuMemcpyHostToDevice,
dev_ctx.stream());
// 3. flatten x indices and mask indices
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, x_indexs.numel(), 1);
hipLaunchKernelGGL(( phi::funcs::sparse::FlattenIndicesKernel), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(),
x.indices().data<IntT>(),
d_sparse_offsets.data<IntT>(),
x_indexs.numel(),
sparse_dim,
x_indexs_ptr);
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, mask_indexs.numel(), 1);
hipLaunchKernelGGL(( phi::funcs::sparse::FlattenIndicesKernel), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(),
mask_indices.data<IntT>(),
d_sparse_offsets.data<IntT>(),
mask_indexs.numel(),
sparse_dim,
mask_indexs_ptr);
int table_size = 1;
auto x_dims = x.dims();
for (int i = 0; i < x_dims.size() - 1; i++) {
table_size *= x_dims[i];
}
DenseTensor table = phi::Empty<int>(dev_ctx, {table_size});
phi::backends::gpu::GpuMemsetAsync(
table.data<int>(), 0, table_size * sizeof(int), dev_ctx.stream());
const int64_t stride =
x.dims().size() == sparse_dim ? 1 : x.values().dims()[1];
*out = phi::EmptyLike<T>(dev_ctx, x.values());
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, out, static_cast<T>(0));
T* out_ptr = out->data<T>();
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, x_indexs.numel(), 1);
hipLaunchKernelGGL(( MaskTable), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(),
x_indexs_ptr, x_indexs.numel(), table.data<int>());
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, mask_indexs.numel(), 1);
const int VecBytes = 16;
const int VecSize = VecBytes / sizeof(T);
if (stride % VecSize == 0) {
hipLaunchKernelGGL(( MaskCopy<T, IntT, VecSize>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), mask_indexs_ptr,
table.data<int>(),
mask_indexs.numel(),
stride,
x.values().data<T>(),
out_ptr);
} else {
hipLaunchKernelGGL(( MaskCopy<T, IntT, 1>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), mask_indexs_ptr,
table.data<int>(),
mask_indexs.numel(),
stride,
x.values().data<T>(),
out_ptr);
}
}
template <typename T, typename Context>
void SparseMaskHelperKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& mask_indices,
DenseTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "SparseMaskHelperGPUKernel", ([&] {
SparseMaskHelperGPUKernel<T, data_t>(dev_ctx, x, mask_indices, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(mask,
GPU,
ALL_LAYOUT,
phi::sparse::SparseMaskKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(mask_helper,
GPU,
ALL_LAYOUT,
phi::sparse::SparseMaskHelperKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
f7c6fd08133c3b3c433e87cb0abd030abb5402d5.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/mask_kernel.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/aligned_vector.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/flatten_indices.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
__global__ void MaskKernel(const T* x_ptr,
const IntT* indices_ptr,
const int64_t* sparse_offsets,
const int64_t non_zero_num,
const int cols,
const int sparse_dim,
T* out_values_ptr) {
CUDA_KERNEL_LOOP_TYPE(i, non_zero_num * cols, int64_t) {
int64_t out_i = i / cols;
int64_t col_i = i - out_i * cols;
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices_ptr[j * non_zero_num + out_i] * sparse_offsets[j];
}
out_values_ptr[out_i * cols + col_i] = x_ptr[index * cols + col_i];
}
}
template <typename T, typename IntT>
void SparseMaskGPUKernel(const GPUContext& dev_ctx,
const DenseTensor& x,
const SparseCooTensor& mask,
SparseCooTensor* out) {
const DDim& dims = x.dims();
PADDLE_ENFORCE_EQ(
x.dims(),
mask.dims(),
phi::errors::InvalidArgument("the input x and mask must have the shape"));
const DenseTensor& indices = mask.indices();
const DenseTensor& values = mask.values();
const int sparse_dim = mask.sparse_dim();
DenseTensor sparse_offsets = phi::Empty<GPUContext>(
dev_ctx,
DenseTensorMeta(DataType::INT64, {sparse_dim}, DataLayout::NCHW));
std::vector<int64_t> h_sparse_offsets(sparse_dim);
phi::funcs::sparse::CalcOffsetsPerDim(
dims, sparse_dim, h_sparse_offsets.data());
phi::backends::gpu::GpuMemcpyAsync(sparse_offsets.data<int64_t>(),
&h_sparse_offsets[0],
sizeof(int64_t) * sparse_dim,
gpuMemcpyHostToDevice,
dev_ctx.stream());
DenseTensor out_indices = phi::EmptyLike<T>(dev_ctx, indices);
DenseTensor out_values = phi::EmptyLike<T>(dev_ctx, values);
phi::Copy(dev_ctx, indices, dev_ctx.GetPlace(), false, &out_indices);
const IntT* indices_ptr = indices.data<IntT>();
T* out_values_ptr = out_values.data<T>();
const T* x_ptr = x.data<T>();
const int64_t non_zero_num = mask.nnz();
auto dims_2d = flatten_to_2d(dims, sparse_dim);
const int cols = dims_2d[1];
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num * cols, 1);
MaskKernel<T, IntT>
<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(
x_ptr,
indices_ptr,
sparse_offsets.data<int64_t>(),
non_zero_num,
cols,
sparse_dim,
out_values_ptr);
out->SetMember(out_indices, out_values, dims, true);
}
/**
* @brief Filter the DenseTensor x by the
* mask.indices() and output a SparseCooTensor
* x and mask must have the same shape.
**/
template <typename T, typename Context>
void SparseMaskKernel(const Context& dev_ctx,
const DenseTensor& x,
const SparseCooTensor& mask,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
mask.indices().dtype(), "SparseMaskGPUKernel", ([&] {
SparseMaskGPUKernel<T, data_t>(dev_ctx, x, mask, out);
}));
}
template <typename IntT>
__global__ void MaskTable(const IntT* x_indexs, const int n, int* table) {
CUDA_KERNEL_LOOP_TYPE(i, n, int64_t) {
int index = x_indexs[i];
table[index] = i == 0 ? -1 : i;
}
}
template <typename T, typename IntT, int VecSize>
__global__ void MaskCopy(const IntT* mask_indexs,
const int* table,
const int n,
const int stride,
const T* x_values,
T* out_values) {
using LoadT = phi::AlignedVector<T, VecSize>;
using StoreT = phi::AlignedVector<T, VecSize>;
CUDA_KERNEL_LOOP_TYPE(i, n, int64_t) {
int j = table[mask_indexs[i]];
if (j != 0) {
if (j == -1) j = 0;
for (int k = 0; k < stride; k += VecSize) {
LoadT vec_x;
phi::Load<T, VecSize>(x_values + j * stride + k, &vec_x);
phi::Store<T, VecSize>(vec_x, out_values + i * stride + k);
}
}
}
}
template <typename T, typename IntT>
void SparseMaskHelperGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& mask_indices,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
mask_indices.dims().size(),
2,
phi::errors::InvalidArgument("the mask_indices must be 2-D tensor"));
const int32_t sparse_dim = x.sparse_dim();
auto indices_dtype = paddle::experimental::CppTypeToDataType<IntT>::Type();
std::vector<IntT> sparse_offsets(sparse_dim);
DenseTensorMeta x_indexs_meta(indices_dtype, {x.nnz()}, DataLayout::NCHW);
DenseTensorMeta mask_indexs_meta(
indices_dtype, {mask_indices.dims()[1]}, DataLayout::NCHW);
DenseTensorMeta sparse_offset_meta(
indices_dtype, {sparse_dim}, DataLayout::NCHW);
DenseTensor x_indexs =
phi::Empty<GPUContext>(dev_ctx, std::move(x_indexs_meta));
DenseTensor mask_indexs =
phi::Empty<GPUContext>(dev_ctx, std::move(mask_indexs_meta));
DenseTensor bound_out =
phi::Empty<GPUContext>(dev_ctx, std::move(mask_indexs_meta));
DenseTensor d_sparse_offsets =
phi::Empty<GPUContext>(dev_ctx, std::move(sparse_offset_meta));
IntT* x_indexs_ptr = x_indexs.data<IntT>();
IntT* mask_indexs_ptr = mask_indexs.data<IntT>();
IntT* bound_out_ptr = bound_out.data<IntT>();
// 1. calc the offsets of per dim
phi::funcs::sparse::CalcOffsetsPerDim(
x.dims(), sparse_dim, sparse_offsets.data());
// 2. copy sparse_offsets to device
phi::backends::gpu::GpuMemcpyAsync(d_sparse_offsets.data<IntT>(),
sparse_offsets.data(),
sizeof(IntT) * sparse_dim,
gpuMemcpyHostToDevice,
dev_ctx.stream());
// 3. flatten x indices and mask indices
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, x_indexs.numel(), 1);
phi::funcs::sparse::FlattenIndicesKernel<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(
x.indices().data<IntT>(),
d_sparse_offsets.data<IntT>(),
x_indexs.numel(),
sparse_dim,
x_indexs_ptr);
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, mask_indexs.numel(), 1);
phi::funcs::sparse::FlattenIndicesKernel<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(
mask_indices.data<IntT>(),
d_sparse_offsets.data<IntT>(),
mask_indexs.numel(),
sparse_dim,
mask_indexs_ptr);
int table_size = 1;
auto x_dims = x.dims();
for (int i = 0; i < x_dims.size() - 1; i++) {
table_size *= x_dims[i];
}
DenseTensor table = phi::Empty<int>(dev_ctx, {table_size});
phi::backends::gpu::GpuMemsetAsync(
table.data<int>(), 0, table_size * sizeof(int), dev_ctx.stream());
const int64_t stride =
x.dims().size() == sparse_dim ? 1 : x.values().dims()[1];
*out = phi::EmptyLike<T>(dev_ctx, x.values());
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, out, static_cast<T>(0));
T* out_ptr = out->data<T>();
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, x_indexs.numel(), 1);
MaskTable<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(
x_indexs_ptr, x_indexs.numel(), table.data<int>());
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, mask_indexs.numel(), 1);
const int VecBytes = 16;
const int VecSize = VecBytes / sizeof(T);
if (stride % VecSize == 0) {
MaskCopy<T, IntT, VecSize><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(mask_indexs_ptr,
table.data<int>(),
mask_indexs.numel(),
stride,
x.values().data<T>(),
out_ptr);
} else {
MaskCopy<T, IntT, 1><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(mask_indexs_ptr,
table.data<int>(),
mask_indexs.numel(),
stride,
x.values().data<T>(),
out_ptr);
}
}
template <typename T, typename Context>
void SparseMaskHelperKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& mask_indices,
DenseTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "SparseMaskHelperGPUKernel", ([&] {
SparseMaskHelperGPUKernel<T, data_t>(dev_ctx, x, mask_indices, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(mask,
GPU,
ALL_LAYOUT,
phi::sparse::SparseMaskKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(mask_helper,
GPU,
ALL_LAYOUT,
phi::sparse::SparseMaskHelperKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int16_t,
int,
int64_t) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
81262f9b8046303527d21c71de527e1562db230a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#define BLOCK_SIZE 256
using namespace std;
__global__ void vectorInc(int* input, int* output, int numElements) {
int t = blockDim.x * blockIdx.x + threadIdx.x;
// bounds
if (t < numElements)
output[t] = input[t] + 1;
}
// entry point
int main() {
const int numElements = 10;
int host_input[numElements] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int* device_input;
int* device_output;
int* host_output;
// copy memory and allocate for output
host_output = (int*) malloc(sizeof(int) * numElements);
hipMalloc((void**) &device_input, sizeof(int) * numElements);
hipMalloc((void**) &device_output, sizeof(int) * numElements);
hipMemcpy(device_input, host_input, sizeof(int) * numElements, hipMemcpyHostToDevice);
// init kernel
dim3 blockDim(BLOCK_SIZE);
dim3 gridDim(ceil(1.0 * numElements / BLOCK_SIZE));
hipLaunchKernelGGL(( vectorInc), dim3(gridDim), dim3(blockDim), 0, 0, device_input, device_output, numElements);
// wait for device to finish
hipDeviceSynchronize();
// copy answer back to host
hipMemcpy(host_output, device_output, sizeof(int) * numElements, hipMemcpyDeviceToHost);
// verify answer
for (int i = 0; i < numElements; i++) {
cout << host_output[i] << " ";
}
cout << endl;
// free memory
hipFree(device_output);
hipFree(device_input);
free(device_input);
free(device_output);
return 0;
}
|
81262f9b8046303527d21c71de527e1562db230a.cu
|
#include <cuda_runtime.h>
#include <iostream>
#define BLOCK_SIZE 256
using namespace std;
__global__ void vectorInc(int* input, int* output, int numElements) {
int t = blockDim.x * blockIdx.x + threadIdx.x;
// bounds
if (t < numElements)
output[t] = input[t] + 1;
}
// entry point
int main() {
const int numElements = 10;
int host_input[numElements] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int* device_input;
int* device_output;
int* host_output;
// copy memory and allocate for output
host_output = (int*) malloc(sizeof(int) * numElements);
cudaMalloc((void**) &device_input, sizeof(int) * numElements);
cudaMalloc((void**) &device_output, sizeof(int) * numElements);
cudaMemcpy(device_input, host_input, sizeof(int) * numElements, cudaMemcpyHostToDevice);
// init kernel
dim3 blockDim(BLOCK_SIZE);
dim3 gridDim(ceil(1.0 * numElements / BLOCK_SIZE));
vectorInc<<<gridDim, blockDim>>>(device_input, device_output, numElements);
// wait for device to finish
cudaDeviceSynchronize();
// copy answer back to host
cudaMemcpy(host_output, device_output, sizeof(int) * numElements, cudaMemcpyDeviceToHost);
// verify answer
for (int i = 0; i < numElements; i++) {
cout << host_output[i] << " ";
}
cout << endl;
// free memory
cudaFree(device_output);
cudaFree(device_input);
free(device_input);
free(device_output);
return 0;
}
|
43c8264b65144413983092ff64e7fcf1331694ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void count_characters(int *buffer, int *freq, long file_size, int base) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x * blockDim.x;
long i;
for (i=index; i<file_size; i+=total_threads)
atomicAdd(&(freq[buffer[i] - base]), 1);
}
|
43c8264b65144413983092ff64e7fcf1331694ab.cu
|
__global__ void count_characters(int *buffer, int *freq, long file_size, int base) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x * blockDim.x;
long i;
for (i=index; i<file_size; i+=total_threads)
atomicAdd(&(freq[buffer[i] - base]), 1);
}
|
ba3889185071f09999cb577abfc27ef460806433.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "myProto.h"
#define THREADS_PER_BLOCKS 512
__global__ void AlignmentScore(char *seq1, char *seq2, int seq1_size,
int seq2_size, float w[4], int max_ms, int max_offset, double *results,
int num_of_threads);
// Host functions
void checkErr(hipError_t err, const char *err_message, char *var_name) {
if (err != hipSuccess) {
fprintf(stderr, "%s - %s\n, var-> %s", err_message, hipGetErrorString(err), var_name);
exit(EXIT_FAILURE);
}
}
void cudaMallocDoubleArr(double** d_arr,size_t arr_size, hipError_t err, char* var_name,const char* malloc_err_message){
err = hipMalloc((void**)d_arr, arr_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMallocFloatArr(float** d_w,size_t w_size, hipError_t err, char* var_name,const char* malloc_err_message){
err = hipMalloc((void**)d_w, w_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMallocString(char** d_seq,size_t seq_size, hipError_t err, char* var_name,const char* malloc_err_message){
err = hipMalloc((void**)d_seq, seq_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMemcpyHostToDeviceFloat(float* d_w, float* w, size_t w_size, hipError_t err,const char* copy_err_message,char* var_name){
err = hipMemcpy(d_w, w, w_size, hipMemcpyHostToDevice);
checkErr(err, copy_err_message, var_name);
}
void cudaMemcpyHostToDeviceString(char* d_seq, char* seq, size_t seq_size, hipError_t err,const char* copy_err_message,char* var_name){
err = hipMemcpy(d_seq, seq, seq_size, hipMemcpyHostToDevice);
checkErr(err, copy_err_message, var_name);
}
// Call the GPU and get GPU results
int computeOnGPU(char *seq1, char *seq2, float w[4], int *bestMS, int *best_offset) {
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
char var_name[30];
// Define results arrays
double *scores_arr;
// Define parameters size
size_t seq1_size = strlen(seq1);
size_t seq2_size = strlen(seq2);
int max_offset = (int) seq1_size - (int) seq2_size - 1;
int max_ms = (int) seq2_size;
int num_of_threads = (int) max_ms * (max_offset + 1);
size_t scores_size = num_of_threads * sizeof(double);
size_t w_size = 4 * sizeof(float);
// Allocate results arrays in host
scores_arr = (double*) malloc(scores_size);
// Allocate memory on GPU to copy the data from the host
const char *malloc_err_message = "Failed to allocate device memory";
char *d_seq1;
char *d_seq2;
double *d_scores_arr;
float *d_w;
strcpy(var_name, "d_w");
cudaMallocFloatArr(&d_w, w_size, err, var_name, malloc_err_message);
strcpy(var_name, "d_scores_arr");
cudaMallocDoubleArr(&d_scores_arr, scores_size, err, var_name, malloc_err_message);
strcpy(var_name, "d_seq1");
cudaMallocString(&d_seq1, seq1_size * sizeof(char), err, var_name, malloc_err_message);
strcpy(var_name, "d_seq2");
cudaMallocString(&d_seq2, seq2_size * sizeof(char), err, var_name, malloc_err_message);
// Copy data from host to the GPU memory
const char *copy_err_message = "Failed to copy data from host to device";
strcpy(var_name, "d_w");
cudaMemcpyHostToDeviceFloat(d_w, w, w_size, err, copy_err_message, var_name);
strcpy(var_name, "d_seq1");
cudaMemcpyHostToDeviceString(d_seq1, seq1, seq1_size, err, copy_err_message, var_name);
strcpy(var_name, "d_seq2");
cudaMemcpyHostToDeviceString(d_seq2, seq2, seq2_size, err, copy_err_message, var_name);
// Launch the Kernel
int num_of_blocks_per_grid = num_of_threads / THREADS_PER_BLOCKS;
// Check if we need to add more block
if (num_of_threads % THREADS_PER_BLOCKS || !num_of_threads) {
num_of_blocks_per_grid++;
}
hipLaunchKernelGGL(( AlignmentScore), dim3(num_of_blocks_per_grid), dim3(THREADS_PER_BLOCKS), 0, 0, d_seq1, d_seq2, seq1_size, seq2_size, d_w, max_ms, max_offset, d_scores_arr, num_of_threads);
err = hipGetLastError();
strcpy(var_name, "No var");
checkErr(err, "Failed to launch vectorAdd kernel", var_name);
// Copy the result from GPU to the host memory.
const char *copy_res_err_message = "Failed to copy data from decive to host";
strcpy(var_name, "scores_arr");
err = hipMemcpy(scores_arr, d_scores_arr, scores_size,
hipMemcpyDeviceToHost);
checkErr(err, copy_res_err_message, var_name);
// Initial bests
*bestMS = 1;
*best_offset = 0;
double best_score = scores_arr[0];
// Compute best score ms and offset
for (int x = 0; x < num_of_threads; x++) {
if (scores_arr[x] > best_score) {
best_score = scores_arr[x];
*bestMS = x % max_ms + 1;
*best_offset = x / max_ms;
}
}
//printf("bestMS = %d, best_offset = %d, best_score=%lf\n",*bestMS, *best_offset, best_score);
free(scores_arr);
// Free allocated memory on GPU
const char *free_err_message = "Failed to free device data";
strcpy(var_name, "d_scores_arr");
err = hipFree(d_scores_arr);
checkErr(err, free_err_message, var_name);
strcpy(var_name, "d_seq1");
err = hipFree(d_seq1);
checkErr(err, free_err_message, var_name);
strcpy(var_name, "d_seq2");
err = hipFree(d_seq2);
checkErr(err, free_err_message, var_name);
return 0;
}
__device__ void mystrlen(int *len, const char *str) {
// Calculate length of a string
(*len) = 0;
while (*str) {
(*len)++;
str++;
}
}
__device__ void checkspecGroup(const char **group_to_check, int size, char c1, char c2, int *is_cons) {
// Get group of strings and check if 2 characters are in the same string
int i, j, k, str_len;
for (i = 0; i < size; i++) {
mystrlen(&str_len, group_to_check[i]);
for (j = 0; j < str_len; j++) {
if (c1 == group_to_check[i][j]) {
for (k = 0; k < str_len; k++) {
if (c2 == group_to_check[i][k]) {
*(is_cons) = 1;
return;
}
}
}
}
}
}
__device__ void checkConserative(int similarityes[4], char c1, char c2) {
if (c1 == c2) {
similarityes[0]++;
} else {
// If c1 != c2 chack if they in the same Conserative Group and update similarityes[1] if yes
const char *CONSERVATIVE_GROUPS[9] = { "NDEQ", "MILV", "FYW", "NEQK",
"QHRK", "HY", "STA", "NHQK", "MILF" };
const int CONSERVATIVE_GROUPS_SIZE = 9;
int is_cons = 0;
checkspecGroup(CONSERVATIVE_GROUPS, CONSERVATIVE_GROUPS_SIZE, c1, c2,
&is_cons);
if (is_cons) {
similarityes[1]++;
}
if (!is_cons) {
// If c1 and c2 are not in the same Conserative Group check if they in the same Semi Conserative Group
// And update similarityes[2] if yes
const char *SEMI_CONSERVATIVE_GROUPS[11] = { "SAG", "SGND",
"NEQHRK", "ATV", "STPA", "NDEQHK", "HFY", "CSA", "STNK",
"SNDEQK", "FVLIM" };
const int SEMI_CONSERVATIVE_GROUPS_SIZE = 11;
checkspecGroup(SEMI_CONSERVATIVE_GROUPS,
SEMI_CONSERVATIVE_GROUPS_SIZE, c1, c2, &is_cons);
if (is_cons) {
similarityes[2]++;
}
// If the not in the same group and not equal update similarityes[3]
if (!is_cons)
similarityes[3]++;
}
}
}
__device__ void calcSimilarity(char *seq1, char *seq2, int len, int similarityes[4], int ms, int offset) {
len++;// add 1 to len for the ms
int i = 0;
// Check if chars in same location (according offset and ms location) are equel, conserative or semi conserative
// Check chars till ms location
while (i < ms) {
checkConserative(similarityes, seq1[i + offset], seq2[i]);
i++;
}
// For ms location is not equel, conserative and semi conserative
similarityes[3]++;
i++;
// Check chars from ms location to the end
while (i < len) {
checkConserative(similarityes, seq1[i + offset], seq2[i - 1]);
i++;
}
}
__device__ void alignmentScoreFunc(double *results, int similarityes[4], float w[4]) {
*results = (double) (w[0] * similarityes[0] - w[1] * similarityes[1]
- w[2] * similarityes[2] - w[3] * similarityes[3]);
}
__global__ void AlignmentScore(char *seq1, char *seq2, int seq1_size,
int seq2_size, float w[4], int max_ms, int max_offset, double *results,
int num_of_threads) {
int new_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (new_id < num_of_threads) {
int temp_len;
// Make sure seq1 and seq2 stay as thet were in file
mystrlen(&temp_len, seq2);
if (temp_len > seq2_size)
seq2[seq2_size] = '\0';
mystrlen(&temp_len, seq1);
if (temp_len > seq1_size)
seq1[seq1_size] = '\0';
// Cumpure ms and offset to compute
int my_ms = new_id % max_ms + 1;
int my_offset = new_id / max_ms;
// Initial similarityes arr - holds amount of each char in similiarity string
int similarityes[4] = { 0 };
// Update similarityes arr with amount off each char in similarityes string
calcSimilarity(seq1, seq2, seq2_size, similarityes, my_ms, my_offset);
// Compute alignmentScoreFunc
alignmentScoreFunc(&results[new_id], similarityes, w);
}
}
|
ba3889185071f09999cb577abfc27ef460806433.cu
|
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "myProto.h"
#define THREADS_PER_BLOCKS 512
__global__ void AlignmentScore(char *seq1, char *seq2, int seq1_size,
int seq2_size, float w[4], int max_ms, int max_offset, double *results,
int num_of_threads);
// Host functions
void checkErr(cudaError_t err, const char *err_message, char *var_name) {
if (err != cudaSuccess) {
fprintf(stderr, "%s - %s\n, var-> %s", err_message, cudaGetErrorString(err), var_name);
exit(EXIT_FAILURE);
}
}
void cudaMallocDoubleArr(double** d_arr,size_t arr_size, cudaError_t err, char* var_name,const char* malloc_err_message){
err = cudaMalloc((void**)d_arr, arr_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMallocFloatArr(float** d_w,size_t w_size, cudaError_t err, char* var_name,const char* malloc_err_message){
err = cudaMalloc((void**)d_w, w_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMallocString(char** d_seq,size_t seq_size, cudaError_t err, char* var_name,const char* malloc_err_message){
err = cudaMalloc((void**)d_seq, seq_size);
checkErr(err, malloc_err_message, var_name);
}
void cudaMemcpyHostToDeviceFloat(float* d_w, float* w, size_t w_size, cudaError_t err,const char* copy_err_message,char* var_name){
err = cudaMemcpy(d_w, w, w_size, cudaMemcpyHostToDevice);
checkErr(err, copy_err_message, var_name);
}
void cudaMemcpyHostToDeviceString(char* d_seq, char* seq, size_t seq_size, cudaError_t err,const char* copy_err_message,char* var_name){
err = cudaMemcpy(d_seq, seq, seq_size, cudaMemcpyHostToDevice);
checkErr(err, copy_err_message, var_name);
}
// Call the GPU and get GPU results
int computeOnGPU(char *seq1, char *seq2, float w[4], int *bestMS, int *best_offset) {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
char var_name[30];
// Define results arrays
double *scores_arr;
// Define parameters size
size_t seq1_size = strlen(seq1);
size_t seq2_size = strlen(seq2);
int max_offset = (int) seq1_size - (int) seq2_size - 1;
int max_ms = (int) seq2_size;
int num_of_threads = (int) max_ms * (max_offset + 1);
size_t scores_size = num_of_threads * sizeof(double);
size_t w_size = 4 * sizeof(float);
// Allocate results arrays in host
scores_arr = (double*) malloc(scores_size);
// Allocate memory on GPU to copy the data from the host
const char *malloc_err_message = "Failed to allocate device memory";
char *d_seq1;
char *d_seq2;
double *d_scores_arr;
float *d_w;
strcpy(var_name, "d_w");
cudaMallocFloatArr(&d_w, w_size, err, var_name, malloc_err_message);
strcpy(var_name, "d_scores_arr");
cudaMallocDoubleArr(&d_scores_arr, scores_size, err, var_name, malloc_err_message);
strcpy(var_name, "d_seq1");
cudaMallocString(&d_seq1, seq1_size * sizeof(char), err, var_name, malloc_err_message);
strcpy(var_name, "d_seq2");
cudaMallocString(&d_seq2, seq2_size * sizeof(char), err, var_name, malloc_err_message);
// Copy data from host to the GPU memory
const char *copy_err_message = "Failed to copy data from host to device";
strcpy(var_name, "d_w");
cudaMemcpyHostToDeviceFloat(d_w, w, w_size, err, copy_err_message, var_name);
strcpy(var_name, "d_seq1");
cudaMemcpyHostToDeviceString(d_seq1, seq1, seq1_size, err, copy_err_message, var_name);
strcpy(var_name, "d_seq2");
cudaMemcpyHostToDeviceString(d_seq2, seq2, seq2_size, err, copy_err_message, var_name);
// Launch the Kernel
int num_of_blocks_per_grid = num_of_threads / THREADS_PER_BLOCKS;
// Check if we need to add more block
if (num_of_threads % THREADS_PER_BLOCKS || !num_of_threads) {
num_of_blocks_per_grid++;
}
AlignmentScore<<<num_of_blocks_per_grid, THREADS_PER_BLOCKS>>>(d_seq1, d_seq2, seq1_size, seq2_size, d_w, max_ms, max_offset, d_scores_arr, num_of_threads);
err = cudaGetLastError();
strcpy(var_name, "No var");
checkErr(err, "Failed to launch vectorAdd kernel", var_name);
// Copy the result from GPU to the host memory.
const char *copy_res_err_message = "Failed to copy data from decive to host";
strcpy(var_name, "scores_arr");
err = cudaMemcpy(scores_arr, d_scores_arr, scores_size,
cudaMemcpyDeviceToHost);
checkErr(err, copy_res_err_message, var_name);
// Initial bests
*bestMS = 1;
*best_offset = 0;
double best_score = scores_arr[0];
// Compute best score ms and offset
for (int x = 0; x < num_of_threads; x++) {
if (scores_arr[x] > best_score) {
best_score = scores_arr[x];
*bestMS = x % max_ms + 1;
*best_offset = x / max_ms;
}
}
//printf("bestMS = %d, best_offset = %d, best_score=%lf\n",*bestMS, *best_offset, best_score);
free(scores_arr);
// Free allocated memory on GPU
const char *free_err_message = "Failed to free device data";
strcpy(var_name, "d_scores_arr");
err = cudaFree(d_scores_arr);
checkErr(err, free_err_message, var_name);
strcpy(var_name, "d_seq1");
err = cudaFree(d_seq1);
checkErr(err, free_err_message, var_name);
strcpy(var_name, "d_seq2");
err = cudaFree(d_seq2);
checkErr(err, free_err_message, var_name);
return 0;
}
__device__ void mystrlen(int *len, const char *str) {
// Calculate length of a string
(*len) = 0;
while (*str) {
(*len)++;
str++;
}
}
__device__ void checkspecGroup(const char **group_to_check, int size, char c1, char c2, int *is_cons) {
// Get group of strings and check if 2 characters are in the same string
int i, j, k, str_len;
for (i = 0; i < size; i++) {
mystrlen(&str_len, group_to_check[i]);
for (j = 0; j < str_len; j++) {
if (c1 == group_to_check[i][j]) {
for (k = 0; k < str_len; k++) {
if (c2 == group_to_check[i][k]) {
*(is_cons) = 1;
return;
}
}
}
}
}
}
__device__ void checkConserative(int similarityes[4], char c1, char c2) {
if (c1 == c2) {
similarityes[0]++;
} else {
// If c1 != c2 chack if they in the same Conserative Group and update similarityes[1] if yes
const char *CONSERVATIVE_GROUPS[9] = { "NDEQ", "MILV", "FYW", "NEQK",
"QHRK", "HY", "STA", "NHQK", "MILF" };
const int CONSERVATIVE_GROUPS_SIZE = 9;
int is_cons = 0;
checkspecGroup(CONSERVATIVE_GROUPS, CONSERVATIVE_GROUPS_SIZE, c1, c2,
&is_cons);
if (is_cons) {
similarityes[1]++;
}
if (!is_cons) {
// If c1 and c2 are not in the same Conserative Group check if they in the same Semi Conserative Group
// And update similarityes[2] if yes
const char *SEMI_CONSERVATIVE_GROUPS[11] = { "SAG", "SGND",
"NEQHRK", "ATV", "STPA", "NDEQHK", "HFY", "CSA", "STNK",
"SNDEQK", "FVLIM" };
const int SEMI_CONSERVATIVE_GROUPS_SIZE = 11;
checkspecGroup(SEMI_CONSERVATIVE_GROUPS,
SEMI_CONSERVATIVE_GROUPS_SIZE, c1, c2, &is_cons);
if (is_cons) {
similarityes[2]++;
}
// If the not in the same group and not equal update similarityes[3]
if (!is_cons)
similarityes[3]++;
}
}
}
__device__ void calcSimilarity(char *seq1, char *seq2, int len, int similarityes[4], int ms, int offset) {
len++;// add 1 to len for the ms
int i = 0;
// Check if chars in same location (according offset and ms location) are equel, conserative or semi conserative
// Check chars till ms location
while (i < ms) {
checkConserative(similarityes, seq1[i + offset], seq2[i]);
i++;
}
// For ms location is not equel, conserative and semi conserative
similarityes[3]++;
i++;
// Check chars from ms location to the end
while (i < len) {
checkConserative(similarityes, seq1[i + offset], seq2[i - 1]);
i++;
}
}
__device__ void alignmentScoreFunc(double *results, int similarityes[4], float w[4]) {
*results = (double) (w[0] * similarityes[0] - w[1] * similarityes[1]
- w[2] * similarityes[2] - w[3] * similarityes[3]);
}
__global__ void AlignmentScore(char *seq1, char *seq2, int seq1_size,
int seq2_size, float w[4], int max_ms, int max_offset, double *results,
int num_of_threads) {
int new_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (new_id < num_of_threads) {
int temp_len;
// Make sure seq1 and seq2 stay as thet were in file
mystrlen(&temp_len, seq2);
if (temp_len > seq2_size)
seq2[seq2_size] = '\0';
mystrlen(&temp_len, seq1);
if (temp_len > seq1_size)
seq1[seq1_size] = '\0';
// Cumpure ms and offset to compute
int my_ms = new_id % max_ms + 1;
int my_offset = new_id / max_ms;
// Initial similarityes arr - holds amount of each char in similiarity string
int similarityes[4] = { 0 };
// Update similarityes arr with amount off each char in similarityes string
calcSimilarity(seq1, seq2, seq2_size, similarityes, my_ms, my_offset);
// Compute alignmentScoreFunc
alignmentScoreFunc(&results[new_id], similarityes, w);
}
}
|
bb3970c2438e75ced5ee441f53f603bb36ba66fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "valid_convolution.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_kernel = NULL;
hipMalloc(&d_kernel, XSIZE*YSIZE);
int k_size = XSIZE*YSIZE;
float *d_matrix = NULL;
hipMalloc(&d_matrix, XSIZE*YSIZE);
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
float *d_conv = NULL;
hipMalloc(&d_conv, XSIZE*YSIZE);
int max_row = 1;
int max_col = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
valid_convolution), dim3(gridBlock),dim3(threadBlock), 0, 0, d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
valid_convolution), dim3(gridBlock),dim3(threadBlock), 0, 0, d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
valid_convolution), dim3(gridBlock),dim3(threadBlock), 0, 0, d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bb3970c2438e75ced5ee441f53f603bb36ba66fa.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "valid_convolution.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_kernel = NULL;
cudaMalloc(&d_kernel, XSIZE*YSIZE);
int k_size = XSIZE*YSIZE;
float *d_matrix = NULL;
cudaMalloc(&d_matrix, XSIZE*YSIZE);
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
float *d_conv = NULL;
cudaMalloc(&d_conv, XSIZE*YSIZE);
int max_row = 1;
int max_col = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
valid_convolution<<<gridBlock,threadBlock>>>(d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
valid_convolution<<<gridBlock,threadBlock>>>(d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
valid_convolution<<<gridBlock,threadBlock>>>(d_kernel,k_size,d_matrix,size_x,size_y,d_conv,max_row,max_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d31c8b8fa19a08eba002d83a5cdd8203823d0cab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <sys/time.h>
#include "../lib/bed.h"
#include "../lib/set_intersect.h"
#include "radixsort.h"
//#include "gpu.hpp"
#include "random.hpp"
#include "../lib/timer.h"
#include "set_intersect_cuda.h"
int main(int argc, char *argv[]) {
if (argc < 6) {
fprintf(stderr, "usage: %s <u> <a> <b> "
"<inter N> <sum N> <device>\n"
"e.g., order U.bed A.bed B.bed 1 1024 0\n",
argv[0]);
return 1;
}
int chrom_num = 24;
CUDA_SAFE_CALL( hipSetDevice( atoi(argv[6] ) ) );
/***********************REPLACE WITH INPUT FILE************************/
char *chrom_names[] = {
"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
"chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16",
"chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"
};
/**********************************************************************/
struct chr_list *U, *A, *B;
char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3];
int inter_threads = atoi(argv[4]);
int sum_threads = atoi(argv[5]);
if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) ||
( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) ||
( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) {
fprintf(stderr, "Error parsing bed files.\n");
return 1;
}
unsigned int max = add_offsets(U, chrom_num);
trim(U, A, chrom_num);
trim(U, B, chrom_num);
int A_size, B_size, U_size;
struct bed_line *U_array, *A_array, *B_array;
U_size = chr_array_from_list(U, &U_array, chrom_num);
A_size = chr_array_from_list(A, &A_array, chrom_num);
B_size = chr_array_from_list(B, &B_array, chrom_num);
unsigned int *A_start_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *A_len_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *B_start_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
unsigned int *B_end_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
/*
* In CUDA we can sort key value pairs,
* the key can be the offset, and the value can be the length
*/
set_start_len( U_array, U_size,
A_array, A_start_h, A_len_h, A_size );
set_start_end( U_array, U_size,
B_array, B_start_h, B_end_h, B_size );
// Move A and B to deviceB
unsigned int *A_start_d, *A_len_d, *B_start_d, *B_end_d;
hipMalloc((void **)&A_start_d, (A_size)*sizeof(unsigned int));
hipMalloc((void **)&A_len_d, (A_size)*sizeof(unsigned int));
hipMalloc((void **)&B_start_d, (B_size)*sizeof(unsigned int));
hipMalloc((void **)&B_end_d, (B_size)*sizeof(unsigned int));
start();
hipMemcpy(A_start_d, A_start_h, (A_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(A_len_d, A_len_h, (A_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(B_start_d, B_start_h, (B_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(B_end_d, B_end_h, (B_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
stop();
// R will hold the results of the intersection, for each interval A[i],
// R[i] will be the number of intervals in B that A[i] intersects,
unsigned int *R_d;
hipMalloc((void **)&R_d, (A_size)*sizeof(unsigned int));
unsigned long memup_time = report();
int block_size = 256;
dim3 dimBlock(block_size);
// *_key_d holds the start position, and *_val_d holds the length,
// the end position is *_key_d + *_val_d
//
// Each thread will search |reps| items in A, we will keep the blocksize
// fixed at 256, but we will need to adjust the grid size
int grid_size = ( A_size + block_size - 1) / (block_size * 1);
dim3 dimGridSearch( grid_size );
hipError_t err;
// Sort A
/*
nvRadixSort::RadixSort radixsortA(A_size, false);
radixsortA.sort((unsigned int*)A_start_d, (unsigned int*)A_len_d,
A_size, 32);
*/
// Sort B by start
nvRadixSort::RadixSort radixsortB_start(B_size, true);
radixsortB_start.sort((unsigned int*)B_start_d, 0, B_size, 32);
// Sort B by end
nvRadixSort::RadixSort radixsortB_end(B_size, true);
radixsortB_end.sort((unsigned int*)B_end_d, 0, B_size, 32);
hipDeviceSynchronize();
stop();
unsigned long sort_time = report();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "Sort: %s.\n", hipGetErrorString( err) );
start();
hipLaunchKernelGGL(( count_bsearch_cuda) , dim3(dimGridSearch), dim3(dimBlock) , 0, 0,
A_start_d, A_len_d, A_size,
B_start_d, B_end_d, B_size,
R_d,
1);
hipDeviceSynchronize();
stop();
unsigned long search_time = report();
start();
parallel_sum(R_d, block_size, A_size, sum_threads);
stop();
unsigned long sum_time = report();
unsigned int O;
start();
hipMemcpy(&O, R_d, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost);
stop();
unsigned long memdown_time = report();
unsigned long total = memup_time + sort_time +
search_time + sum_time + memdown_time;
fprintf(stderr,"O:%d\n", O);
printf("%d,%d,%d\tT:%ld\t"
"up:%ld,%G\t"
"sort:%ld,%G\t"
"search:%ld,%G\t"
"sum:%ld,%G\t"
"down:%ld,%G\n",
A_size,
B_size,
A_size + B_size,
total,
memup_time, (double)memup_time / (double)total,
sort_time, (double)sort_time / (double)total,
search_time, (double)search_time / (double)total,
sum_time, (double)sum_time / (double)total,
memdown_time, (double)memdown_time / (double)total
);
hipFree(A_start_d);
hipFree(B_start_d);
hipFree(R_d);
return 0;
}
|
d31c8b8fa19a08eba002d83a5cdd8203823d0cab.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cutil.h>
#include <sys/time.h>
#include "../lib/bed.h"
#include "../lib/set_intersect.h"
#include "radixsort.h"
//#include "gpu.hpp"
#include "random.hpp"
#include "../lib/timer.h"
#include "set_intersect_cuda.h"
int main(int argc, char *argv[]) {
if (argc < 6) {
fprintf(stderr, "usage: %s <u> <a> <b> "
"<inter N> <sum N> <device>\n"
"e.g., order U.bed A.bed B.bed 1 1024 0\n",
argv[0]);
return 1;
}
int chrom_num = 24;
CUDA_SAFE_CALL( cudaSetDevice( atoi(argv[6] ) ) );
/***********************REPLACE WITH INPUT FILE************************/
char *chrom_names[] = {
"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
"chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16",
"chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"
};
/**********************************************************************/
struct chr_list *U, *A, *B;
char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3];
int inter_threads = atoi(argv[4]);
int sum_threads = atoi(argv[5]);
if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) ||
( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) ||
( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) {
fprintf(stderr, "Error parsing bed files.\n");
return 1;
}
unsigned int max = add_offsets(U, chrom_num);
trim(U, A, chrom_num);
trim(U, B, chrom_num);
int A_size, B_size, U_size;
struct bed_line *U_array, *A_array, *B_array;
U_size = chr_array_from_list(U, &U_array, chrom_num);
A_size = chr_array_from_list(A, &A_array, chrom_num);
B_size = chr_array_from_list(B, &B_array, chrom_num);
unsigned int *A_start_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *A_len_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *B_start_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
unsigned int *B_end_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
/*
* In CUDA we can sort key value pairs,
* the key can be the offset, and the value can be the length
*/
set_start_len( U_array, U_size,
A_array, A_start_h, A_len_h, A_size );
set_start_end( U_array, U_size,
B_array, B_start_h, B_end_h, B_size );
// Move A and B to deviceB
unsigned int *A_start_d, *A_len_d, *B_start_d, *B_end_d;
cudaMalloc((void **)&A_start_d, (A_size)*sizeof(unsigned int));
cudaMalloc((void **)&A_len_d, (A_size)*sizeof(unsigned int));
cudaMalloc((void **)&B_start_d, (B_size)*sizeof(unsigned int));
cudaMalloc((void **)&B_end_d, (B_size)*sizeof(unsigned int));
start();
cudaMemcpy(A_start_d, A_start_h, (A_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(A_len_d, A_len_h, (A_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(B_start_d, B_start_h, (B_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(B_end_d, B_end_h, (B_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
stop();
// R will hold the results of the intersection, for each interval A[i],
// R[i] will be the number of intervals in B that A[i] intersects,
unsigned int *R_d;
cudaMalloc((void **)&R_d, (A_size)*sizeof(unsigned int));
unsigned long memup_time = report();
int block_size = 256;
dim3 dimBlock(block_size);
// *_key_d holds the start position, and *_val_d holds the length,
// the end position is *_key_d + *_val_d
//
// Each thread will search |reps| items in A, we will keep the blocksize
// fixed at 256, but we will need to adjust the grid size
int grid_size = ( A_size + block_size - 1) / (block_size * 1);
dim3 dimGridSearch( grid_size );
cudaError_t err;
// Sort A
/*
nvRadixSort::RadixSort radixsortA(A_size, false);
radixsortA.sort((unsigned int*)A_start_d, (unsigned int*)A_len_d,
A_size, 32);
*/
// Sort B by start
nvRadixSort::RadixSort radixsortB_start(B_size, true);
radixsortB_start.sort((unsigned int*)B_start_d, 0, B_size, 32);
// Sort B by end
nvRadixSort::RadixSort radixsortB_end(B_size, true);
radixsortB_end.sort((unsigned int*)B_end_d, 0, B_size, 32);
cudaThreadSynchronize();
stop();
unsigned long sort_time = report();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "Sort: %s.\n", cudaGetErrorString( err) );
start();
count_bsearch_cuda <<<dimGridSearch, dimBlock >>> (
A_start_d, A_len_d, A_size,
B_start_d, B_end_d, B_size,
R_d,
1);
cudaThreadSynchronize();
stop();
unsigned long search_time = report();
start();
parallel_sum(R_d, block_size, A_size, sum_threads);
stop();
unsigned long sum_time = report();
unsigned int O;
start();
cudaMemcpy(&O, R_d, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
stop();
unsigned long memdown_time = report();
unsigned long total = memup_time + sort_time +
search_time + sum_time + memdown_time;
fprintf(stderr,"O:%d\n", O);
printf("%d,%d,%d\tT:%ld\t"
"up:%ld,%G\t"
"sort:%ld,%G\t"
"search:%ld,%G\t"
"sum:%ld,%G\t"
"down:%ld,%G\n",
A_size,
B_size,
A_size + B_size,
total,
memup_time, (double)memup_time / (double)total,
sort_time, (double)sort_time / (double)total,
search_time, (double)search_time / (double)total,
sum_time, (double)sum_time / (double)total,
memdown_time, (double)memdown_time / (double)total
);
cudaFree(A_start_d);
cudaFree(B_start_d);
cudaFree(R_d);
return 0;
}
|
dd76e01ac6928849f0f257745971d81ed9c64704.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void removeRuntyPartsKernel(int size, int *partition, int *removeStencil, int *subtractions)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentNode = partition[idx];
if(removeStencil[currentNode] == 1)
partition[idx] = -1;
else
partition[idx] -= subtractions[currentNode];
}
}
|
dd76e01ac6928849f0f257745971d81ed9c64704.cu
|
#include "includes.h"
__global__ void removeRuntyPartsKernel(int size, int *partition, int *removeStencil, int *subtractions)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentNode = partition[idx];
if(removeStencil[currentNode] == 1)
partition[idx] = -1;
else
partition[idx] -= subtractions[currentNode];
}
}
|
4bee5d5efed453b0620f516df59fed81990089c5.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/triton_kernel.h"
#include "core/framework/tunable.h"
#include <fstream>
#include <thread>
#ifdef USE_TRITON_KERNEL
#include <dlfcn.h>
#include "triton_kernel_infos.h"
#endif
#define ORT_TRITON_CHECK(expr, msg) \
do { \
auto status = expr; \
const char* error_str; \
if (status != hipSuccess) { \
auto get_status_err_str = hipGetErrorString(status, &error_str); \
ORT_UNUSED_PARAMETER(get_status_err_str); \
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, msg, " ", error_str); \
} \
} while (0)
#define ORT_TRITON_THROW(expr, msg) \
do { \
auto status = expr; \
const char* error_str; \
if (status != hipSuccess) { \
auto get_status_err_str = hipGetErrorString(status, &error_str); \
ORT_UNUSED_PARAMETER(get_status_err_str); \
ORT_THROW(msg, error_str); \
} \
} while (0)
namespace onnxruntime {
namespace cuda {
namespace {
// A vector of kernel metadata
static std::vector<TritonKernelMetaData> ort_triton_kernel_metadata;
// Store group_name -> [kernel metadata id vector]
static std::unordered_map<std::string, std::vector<int>> ort_triton_kernel_group_map;
#ifdef USE_TRITON_KERNEL
// Store func_name -> kernel metadata id
static std::unordered_map<std::string, int> ort_triton_kernel_map;
const int GPU_WARP_SIZE = 32;
constexpr int kMaxThreadsPerBlock = 1024;
// Currently the max shared memory per block is hardcoded to 64KB.
constexpr int kMaxSharedMemoryPerBlock = 64 * 1024;
Status GetSymbolFromLibrary(const std::string& symbol_name, void** symbol) {
dlerror(); // Clear any old error str
// Use RTLD_DEFAULT for search current lib.so.
// Value of RTLD_DEFAULT differs across posix platforms (-2 on macos, 0 on linux).
void* handle = RTLD_DEFAULT;
*symbol = dlsym(handle, symbol_name.c_str());
char* error_str = dlerror();
if (error_str) {
Status status = ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"Failed to get symbol " + symbol_name + " with error: " + error_str);
return status;
}
// It's possible to get a NULL symbol in our case when Schemas are not custom.
return Status::OK();
}
#endif
/*
* Try to load CUDA kernels that are compiled by Triton.
* They are in hsaco/cubin format, and should use hipModuleLoad to load these kernels.
*/
void TryToLoadKernel() {
auto status = Status::OK();
#ifdef USE_TRITON_KERNEL
// get all kernel symbols from curret lib.so
size_t size = sizeof(kernel_infos) / sizeof(kernel_infos[0]);
for (int i = 0; i < size; ++i) {
auto k_i = kernel_infos[i];
void* buff;
ORT_THROW_IF_ERROR(GetSymbolFromLibrary(k_i.name_start, &buff));
// try to load module and get function
hipModule_t module;
ORT_TRITON_THROW(hipModuleLoadData(&module, buff), "Loading module data failed.");
hipFunction_t function;
ORT_TRITON_THROW(hipModuleGetFunction(&function, module, k_i.func_name), "Getting function from module failed.");
// setup kernel metadata
TritonKernelMetaData metadata;
metadata.num_warps = k_i.num_warps;
metadata.shared_mem_size = k_i.shared;
metadata.func = function;
std::string fname = k_i.name; // name is not same as func_name
metadata.name = fname;
std::string group_name = k_i.group_name;
// pass constants
for (auto& kv : k_i.constants) {
metadata.constants[kv.first] = kv.second;
}
auto idx = ort_triton_kernel_metadata.size();
ort_triton_kernel_metadata.push_back(metadata);
ort_triton_kernel_map[fname] = idx;
ort_triton_kernel_group_map[group_name].push_back(idx);
LOGS_DEFAULT(VERBOSE) << "Loaded ort triton kernel: " << fname << " idx: " << idx;
}
#endif
ORT_THROW_IF_ERROR(status);
}
static std::once_flag load_ort_triton_kernel_flag;
} // namespace
void LoadOrtTritonKernel() {
// load kernel should be called only once
std::call_once(load_ort_triton_kernel_flag, TryToLoadKernel);
}
Status LaunchTritonKernel(hipStream_t stream, std::string fname,
int grid0, int grid1, int grid2, void* args, size_t args_size) {
#ifdef USE_TRITON_KERNEL
if (ort_triton_kernel_map.count(fname) == 0) {
// Return unsupported status if function name not found in registry.
// This error status will be used by TunableOp
std::ostringstream message_stream;
message_stream << "Can't find ort triton kernel name: " << fname;
std::string message = message_stream.str();
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(true, message);
}
auto idx = ort_triton_kernel_map[fname];
return LaunchTritonKernel(stream, idx, grid0, grid1, grid2, args, args_size);
#else
return Status::OK();
#endif
}
Status LaunchTritonKernel(hipStream_t stream, size_t idx,
int grid0, int grid1, int grid2, void* args, size_t args_size) {
#ifdef USE_TRITON_KERNEL
if (idx >= ort_triton_kernel_metadata.size()) {
// Return unsupported status when idx exceeds the size of ort_triton_kernel_metadata.
// This error status will be used by TunableOp
std::ostringstream message_stream;
message_stream << "Can't find ort triton kernel idx: " << idx;
std::string message = message_stream.str();
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(true, message);
}
auto metadata = ort_triton_kernel_metadata[idx];
int threads_per_block = GPU_WARP_SIZE * metadata.num_warps;
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(
threads_per_block > kMaxThreadsPerBlock,
"The threads_per_block (", threads_per_block, ") exceeds the max allowed value (", kMaxThreadsPerBlock, ").");
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(
metadata.shared_mem_size > kMaxSharedMemoryPerBlock,
"The shared_mem_size (", metadata.shared_mem_size, ") exceeds the max allowed value (",
kMaxSharedMemoryPerBlock, " bytes).");
void* config[] = {HIP_LAUNCH_PARAM_BUFFER_POINTER, args, HIP_LAUNCH_PARAM_BUFFER_SIZE, &args_size,
HIP_LAUNCH_PARAM_END};
ORT_TRITON_CHECK(hipModuleLaunchKernel(metadata.func,
grid0, grid1, grid2,
threads_per_block, 1, 1,
metadata.shared_mem_size,
stream,
nullptr,
(void**)&config),
"Launching kernel failed.");
#endif
return Status::OK();
}
const TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx) {
if (idx >= ort_triton_kernel_metadata.size()) {
return nullptr;
}
return &ort_triton_kernel_metadata[idx];
}
const std::vector<int>* GetOrtTritonKernelByGroup(std::string group_name) {
if (ort_triton_kernel_group_map.count(group_name) == 0) {
return nullptr;
}
return &ort_triton_kernel_group_map.at(group_name);
}
} // namespace cuda
} // namespace onnxruntime
|
4bee5d5efed453b0620f516df59fed81990089c5.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/triton_kernel.h"
#include "core/framework/tunable.h"
#include <fstream>
#include <thread>
#ifdef USE_TRITON_KERNEL
#include <dlfcn.h>
#include "triton_kernel_infos.h"
#endif
#define ORT_TRITON_CHECK(expr, msg) \
do { \
auto status = expr; \
const char* error_str; \
if (status != CUDA_SUCCESS) { \
auto get_status_err_str = cuGetErrorString(status, &error_str); \
ORT_UNUSED_PARAMETER(get_status_err_str); \
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, msg, " ", error_str); \
} \
} while (0)
#define ORT_TRITON_THROW(expr, msg) \
do { \
auto status = expr; \
const char* error_str; \
if (status != CUDA_SUCCESS) { \
auto get_status_err_str = cuGetErrorString(status, &error_str); \
ORT_UNUSED_PARAMETER(get_status_err_str); \
ORT_THROW(msg, error_str); \
} \
} while (0)
namespace onnxruntime {
namespace cuda {
namespace {
// A vector of kernel metadata
static std::vector<TritonKernelMetaData> ort_triton_kernel_metadata;
// Store group_name -> [kernel metadata id vector]
static std::unordered_map<std::string, std::vector<int>> ort_triton_kernel_group_map;
#ifdef USE_TRITON_KERNEL
// Store func_name -> kernel metadata id
static std::unordered_map<std::string, int> ort_triton_kernel_map;
const int GPU_WARP_SIZE = 32;
constexpr int kMaxThreadsPerBlock = 1024;
// Currently the max shared memory per block is hardcoded to 64KB.
constexpr int kMaxSharedMemoryPerBlock = 64 * 1024;
Status GetSymbolFromLibrary(const std::string& symbol_name, void** symbol) {
dlerror(); // Clear any old error str
// Use RTLD_DEFAULT for search current lib.so.
// Value of RTLD_DEFAULT differs across posix platforms (-2 on macos, 0 on linux).
void* handle = RTLD_DEFAULT;
*symbol = dlsym(handle, symbol_name.c_str());
char* error_str = dlerror();
if (error_str) {
Status status = ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"Failed to get symbol " + symbol_name + " with error: " + error_str);
return status;
}
// It's possible to get a NULL symbol in our case when Schemas are not custom.
return Status::OK();
}
#endif
/*
* Try to load CUDA kernels that are compiled by Triton.
* They are in hsaco/cubin format, and should use cuModuleLoad to load these kernels.
*/
void TryToLoadKernel() {
auto status = Status::OK();
#ifdef USE_TRITON_KERNEL
// get all kernel symbols from curret lib.so
size_t size = sizeof(kernel_infos) / sizeof(kernel_infos[0]);
for (int i = 0; i < size; ++i) {
auto k_i = kernel_infos[i];
void* buff;
ORT_THROW_IF_ERROR(GetSymbolFromLibrary(k_i.name_start, &buff));
// try to load module and get function
CUmodule module;
ORT_TRITON_THROW(cuModuleLoadData(&module, buff), "Loading module data failed.");
CUfunction function;
ORT_TRITON_THROW(cuModuleGetFunction(&function, module, k_i.func_name), "Getting function from module failed.");
// setup kernel metadata
TritonKernelMetaData metadata;
metadata.num_warps = k_i.num_warps;
metadata.shared_mem_size = k_i.shared;
metadata.func = function;
std::string fname = k_i.name; // name is not same as func_name
metadata.name = fname;
std::string group_name = k_i.group_name;
// pass constants
for (auto& kv : k_i.constants) {
metadata.constants[kv.first] = kv.second;
}
auto idx = ort_triton_kernel_metadata.size();
ort_triton_kernel_metadata.push_back(metadata);
ort_triton_kernel_map[fname] = idx;
ort_triton_kernel_group_map[group_name].push_back(idx);
LOGS_DEFAULT(VERBOSE) << "Loaded ort triton kernel: " << fname << " idx: " << idx;
}
#endif
ORT_THROW_IF_ERROR(status);
}
static std::once_flag load_ort_triton_kernel_flag;
} // namespace
void LoadOrtTritonKernel() {
// load kernel should be called only once
std::call_once(load_ort_triton_kernel_flag, TryToLoadKernel);
}
Status LaunchTritonKernel(cudaStream_t stream, std::string fname,
int grid0, int grid1, int grid2, void* args, size_t args_size) {
#ifdef USE_TRITON_KERNEL
if (ort_triton_kernel_map.count(fname) == 0) {
// Return unsupported status if function name not found in registry.
// This error status will be used by TunableOp
std::ostringstream message_stream;
message_stream << "Can't find ort triton kernel name: " << fname;
std::string message = message_stream.str();
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(true, message);
}
auto idx = ort_triton_kernel_map[fname];
return LaunchTritonKernel(stream, idx, grid0, grid1, grid2, args, args_size);
#else
return Status::OK();
#endif
}
Status LaunchTritonKernel(cudaStream_t stream, size_t idx,
int grid0, int grid1, int grid2, void* args, size_t args_size) {
#ifdef USE_TRITON_KERNEL
if (idx >= ort_triton_kernel_metadata.size()) {
// Return unsupported status when idx exceeds the size of ort_triton_kernel_metadata.
// This error status will be used by TunableOp
std::ostringstream message_stream;
message_stream << "Can't find ort triton kernel idx: " << idx;
std::string message = message_stream.str();
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(true, message);
}
auto metadata = ort_triton_kernel_metadata[idx];
int threads_per_block = GPU_WARP_SIZE * metadata.num_warps;
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(
threads_per_block > kMaxThreadsPerBlock,
"The threads_per_block (", threads_per_block, ") exceeds the max allowed value (", kMaxThreadsPerBlock, ").");
TUNABLE_OP_RETURN_UNSUPPORTED_ARGUMENT_IF(
metadata.shared_mem_size > kMaxSharedMemoryPerBlock,
"The shared_mem_size (", metadata.shared_mem_size, ") exceeds the max allowed value (",
kMaxSharedMemoryPerBlock, " bytes).");
void* config[] = {CU_LAUNCH_PARAM_BUFFER_POINTER, args, CU_LAUNCH_PARAM_BUFFER_SIZE, &args_size,
CU_LAUNCH_PARAM_END};
ORT_TRITON_CHECK(cuLaunchKernel(metadata.func,
grid0, grid1, grid2,
threads_per_block, 1, 1,
metadata.shared_mem_size,
stream,
nullptr,
(void**)&config),
"Launching kernel failed.");
#endif
return Status::OK();
}
const TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx) {
if (idx >= ort_triton_kernel_metadata.size()) {
return nullptr;
}
return &ort_triton_kernel_metadata[idx];
}
const std::vector<int>* GetOrtTritonKernelByGroup(std::string group_name) {
if (ort_triton_kernel_group_map.count(group_name) == 0) {
return nullptr;
}
return &ort_triton_kernel_group_map.at(group_name);
}
} // namespace cuda
} // namespace onnxruntime
|
1ac115bda32d2e1d6ed97c3c9f8cb0791eed79fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AddVector.h"
#include <assert.h>
#include <Device.h>
#include <DeviceWrapperTemplate.h>
#include <iostream>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
extern __global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeVector= n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevV1, sizeVector);
// TODO ptrV2
Device::malloc(&ptrDevV2, sizeVector);
// TODO ptrW
Device::malloc(&ptrDevW, sizeVector);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevV1, ptrV1, sizeVector);
// TODO ptrV2
Device::memcpyHToD(ptrDevV2, ptrV2, sizeVector);
}
}
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
Device::free(ptrDevV1);
// TODO ptrV2
Device::free(ptrDevV2);
// TODO ptrW
Device::free(ptrDevW);
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
//v1 pattern entrelacement
//addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n);// assynchrone
//v2 pattern 1<-->1
{
//assert(db.x*db.y*db.z*dg.x*dg.y*dg.z==n);
hipLaunchKernelGGL(( addVector11), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
}
//Device::synchronize(); // Temp,debug, only for printf in GPU, sinon pas necessaire
// MM (Device -> Host)
{
// TODO
Device::memcpyDToH(ptrW, ptrDevW, sizeVector);
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
1ac115bda32d2e1d6ed97c3c9f8cb0791eed79fb.cu
|
#include "AddVector.h"
#include <assert.h>
#include <Device.h>
#include <DeviceWrapperTemplate.h>
#include <iostream>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
extern __global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeVector= n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevV1, sizeVector);
// TODO ptrV2
Device::malloc(&ptrDevV2, sizeVector);
// TODO ptrW
Device::malloc(&ptrDevW, sizeVector);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevV1, ptrV1, sizeVector);
// TODO ptrV2
Device::memcpyHToD(ptrDevV2, ptrV2, sizeVector);
}
}
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
Device::free(ptrDevV1);
// TODO ptrV2
Device::free(ptrDevV2);
// TODO ptrW
Device::free(ptrDevW);
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
//v1 pattern entrelacement
//addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n);// assynchrone
//v2 pattern 1<-->1
{
//assert(db.x*db.y*db.z*dg.x*dg.y*dg.z==n);
addVector11<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
}
//Device::synchronize(); // Temp,debug, only for printf in GPU, sinon pas necessaire
// MM (Device -> Host)
{
// TODO
Device::memcpyDToH(ptrW, ptrDevW, sizeVector);
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
8fe54cb79371f48ec7caddfc1df851631b21a59a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <valarray>
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "kernel_hip.cuh"
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Transfers the image from GPU to CPU greyed out
void device_to_img_grey(Rgb *device_img, cv::Mat& img)
{
int width = img.rows;
int height = img.cols;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
img.at<uchar>(j, i) = device_img[j + i * width].r;
}
// Transfers the image from GPU to CPU
void device_to_img(Rgb *device_img, cv::Mat& img)
{
int width = img.rows;
int height = img.cols;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
{
img.at<cv::Vec3b>(j, i)[0] = device_img[j + i * width].r;
img.at<cv::Vec3b>(j, i)[1] = device_img[j + i * width].g;
img.at<cv::Vec3b>(j, i)[2] = device_img[j + i * width].b;
}
}
// Pushed an image from the CPU to GPU greyed out
double *img_to_device_grey(cv::Mat img)
{
double *device_img;
int width = img.rows;
int height = img.cols;
hipMallocManaged(&device_img, width * height * sizeof (double));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = img.at<uchar>(j, i);
return device_img;
}
// Pushed an image from the CPU to GPU
Rgb *img_to_device(cv::Mat img)
{
Rgb *device_img;
int width = img.rows;
int height = img.cols;
hipMallocManaged(&device_img, width * height * sizeof (Rgb));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = Rgb(img.at<cv::Vec3b>(j, i));
return device_img;
}
// Creates an empty grey image on the CPU
double *empty_img_device_grey(cv::Mat img)
{
double *device_img;
int width = img.rows;
int height = img.cols;
hipMallocManaged(&device_img, width * height * sizeof (double));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = 0.0;
return device_img;
}
// Allocates an empty image on the GPU
Rgb *empty_img_device(cv::Mat img)
{
Rgb *device_img;
int width = img.rows;
int height = img.cols;
hipMallocManaged(&device_img, width * height * sizeof (Rgb));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = Rgb(0.0, 0.0, 0.0);
return device_img;
}
// Implementation of the convolution algorithm with a shared memory optimization
void kernel_shared_conv_host(Rgb* device_img, Rgb* img, int width, int height, int r)
{
int strel_size = 2 * r + r % 2;
if (strel_size <= 0 or strel_size > 16)
{
std::cout << "\nerror: <Strel_size> parameter must be between 1 and 16 due to shared memory constaint.\n" << std::endl;
assert(strel_size > 0 and strel_size < 16);
return;
}
// Creation of the gpu unit grid
int block_w = TILE_WIDTH + 2 * r;
dim3 blockSize = dim3(block_w, block_w);
int bx = (width / TILE_WIDTH - 1) + blockSize.x;
int by = (height / TILE_HEIGHT - 1) + blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call the kernel shared_conv
hipLaunchKernelGGL(( kernel_shared_conv), dim3(gridSize), dim3(blockSize), block_w * block_w * sizeof (Rgb), 0, device_img, img, width, height, strel_size);
}
// Implementation of the convolution algorith
void kernel_conv_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size)
{
if (conv_size <= 0)
{
std::cout << "\nerror: <Conv_size> parameter must be strictly greater than 0.\n" << std::endl;
assert(conv_size > 0);
return;
}
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Calls the kernel conv
hipLaunchKernelGGL(( kernel_conv), dim3(gridSize), dim3(blockSize), 0, 0, device_img, img, width, height, conv_size);
}
void kernel_pixelize_host(Rgb* device_img, Rgb* img, int width, int height, int pix_size)
{
if (pix_size <= 1 or pix_size > 32)
{
std::cout << "\nerror: <Pix_size> parameter must be between 2 and 32 included.\n" << std::endl;
assert(pix_size > 1 and pix_size < 33);
return;
}
// Creation of the gpu unit grid
dim3 blockSize = dim3(pix_size, pix_size);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the pixelize kernel
hipLaunchKernelGGL(( kernel_pixelize), dim3(gridSize), dim3(blockSize), pix_size * pix_size * sizeof (Rgb), 0, device_img, img, width, height, pix_size);
}
// Implementation of the K-Nearest Neighbors algorithm for de-noising a image
void kernel_knn_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, double h_param)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the knn kernel
hipLaunchKernelGGL(( knn), dim3(gridSize), dim3(blockSize), 0, 0, device_img, img, width, height, conv_size, h_param);
}
// Implementation of the K-Nearest Neighbors algorithm for de-noising an image
// with a shared memory optimization
void kernel_shared_knn_host(Rgb* device_img, Rgb* img, int width, int height, int r, double h_param)
{
int strel_size = 2 * r + r % 2;
if (strel_size <= 0 or strel_size > 16)
{
std::cout << "\nerror: <Strel_size> parameter must be between 1 and 16 due to shared memory constaint.\n" << std::endl;
assert(strel_size > 0 and strel_size < 16);
return;
}
// Creation of the gpu unit grid
int block_w = TILE_WIDTH + 2 * r;
dim3 blockSize = dim3(block_w, block_w);
int bx = (width / TILE_WIDTH - 1) + blockSize.x;
int by = (height / TILE_HEIGHT - 1) + blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the shared knn kernel
hipLaunchKernelGGL(( shared_knn), dim3(gridSize), dim3(blockSize), block_w * block_w * sizeof (Rgb), 0, device_img, img, width, height, strel_size, h_param);
}
// Implementation of the Non-Local Means algorithm for de-noising an image
void kernel_nlm_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, int block_radius, double h_param)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the nlm, kernel
hipLaunchKernelGGL(( nlm), dim3(gridSize), dim3(blockSize), 0, 0, device_img, img, width, height, conv_size, block_radius, h_param);
}
// Implementation of the Canny Edge detection algorithm
void kernel_nlm_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, int block_radius, double h_param)
void kernel_edge_detect(Rgb* device_img, double* img, int width, int height, int conv_size, double otsu_threshold)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Preprocessing
// Apply a convolution on the image using the Sobel kernel
hipLaunchKernelGGL(( sobel_conv), dim3(gridSize), dim3(blockSize), 0, 0, device_img, img, width, height, conv_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( non_max_suppr), dim3(gridSize), dim3(blockSize), 0, 0, device_img, img, width, height, otsu_threshold);
hipDeviceSynchronize();
// Run the hysterysis algorithm, stops when the image is unchanged
int *changed_device;
int *changed_host;
hipMallocManaged(&changed_device, 1 * sizeof (int));
hipLaunchKernelGGL(( hysterysis), dim3(gridSize), dim3(blockSize), 0, 0, device_img, changed_device, width, height, otsu_threshold * 0.5);
hipDeviceSynchronize();
hipMemcpy(changed_host, changed_device, sizeof (int), hipMemcpyDeviceToHost);
while (changed_host)
{
hipLaunchKernelGGL(( hysterysis), dim3(gridSize), dim3(blockSize), 0, 0, device_img, changed_device, width, height, otsu_threshold * 0.5);
hipDeviceSynchronize();
hipMemcpy(changed_host, changed_device, sizeof (int), hipMemcpyDeviceToHost);
}
}
|
8fe54cb79371f48ec7caddfc1df851631b21a59a.cu
|
#include <stdio.h>
#include <iostream>
#include <valarray>
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "kernel.cuh"
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Transfers the image from GPU to CPU greyed out
void device_to_img_grey(Rgb *device_img, cv::Mat& img)
{
int width = img.rows;
int height = img.cols;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
img.at<uchar>(j, i) = device_img[j + i * width].r;
}
// Transfers the image from GPU to CPU
void device_to_img(Rgb *device_img, cv::Mat& img)
{
int width = img.rows;
int height = img.cols;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
{
img.at<cv::Vec3b>(j, i)[0] = device_img[j + i * width].r;
img.at<cv::Vec3b>(j, i)[1] = device_img[j + i * width].g;
img.at<cv::Vec3b>(j, i)[2] = device_img[j + i * width].b;
}
}
// Pushed an image from the CPU to GPU greyed out
double *img_to_device_grey(cv::Mat img)
{
double *device_img;
int width = img.rows;
int height = img.cols;
cudaMallocManaged(&device_img, width * height * sizeof (double));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = img.at<uchar>(j, i);
return device_img;
}
// Pushed an image from the CPU to GPU
Rgb *img_to_device(cv::Mat img)
{
Rgb *device_img;
int width = img.rows;
int height = img.cols;
cudaMallocManaged(&device_img, width * height * sizeof (Rgb));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = Rgb(img.at<cv::Vec3b>(j, i));
return device_img;
}
// Creates an empty grey image on the CPU
double *empty_img_device_grey(cv::Mat img)
{
double *device_img;
int width = img.rows;
int height = img.cols;
cudaMallocManaged(&device_img, width * height * sizeof (double));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = 0.0;
return device_img;
}
// Allocates an empty image on the GPU
Rgb *empty_img_device(cv::Mat img)
{
Rgb *device_img;
int width = img.rows;
int height = img.cols;
cudaMallocManaged(&device_img, width * height * sizeof (Rgb));
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
device_img[j + i * width] = Rgb(0.0, 0.0, 0.0);
return device_img;
}
// Implementation of the convolution algorithm with a shared memory optimization
void kernel_shared_conv_host(Rgb* device_img, Rgb* img, int width, int height, int r)
{
int strel_size = 2 * r + r % 2;
if (strel_size <= 0 or strel_size > 16)
{
std::cout << "\nerror: <Strel_size> parameter must be between 1 and 16 due to shared memory constaint.\n" << std::endl;
assert(strel_size > 0 and strel_size < 16);
return;
}
// Creation of the gpu unit grid
int block_w = TILE_WIDTH + 2 * r;
dim3 blockSize = dim3(block_w, block_w);
int bx = (width / TILE_WIDTH - 1) + blockSize.x;
int by = (height / TILE_HEIGHT - 1) + blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call the kernel shared_conv
kernel_shared_conv<<<gridSize, blockSize, block_w * block_w * sizeof (Rgb)>>>(device_img, img, width, height, strel_size);
}
// Implementation of the convolution algorith
void kernel_conv_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size)
{
if (conv_size <= 0)
{
std::cout << "\nerror: <Conv_size> parameter must be strictly greater than 0.\n" << std::endl;
assert(conv_size > 0);
return;
}
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Calls the kernel conv
kernel_conv<<<gridSize, blockSize>>>(device_img, img, width, height, conv_size);
}
void kernel_pixelize_host(Rgb* device_img, Rgb* img, int width, int height, int pix_size)
{
if (pix_size <= 1 or pix_size > 32)
{
std::cout << "\nerror: <Pix_size> parameter must be between 2 and 32 included.\n" << std::endl;
assert(pix_size > 1 and pix_size < 33);
return;
}
// Creation of the gpu unit grid
dim3 blockSize = dim3(pix_size, pix_size);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the pixelize kernel
kernel_pixelize<<<gridSize, blockSize, pix_size * pix_size * sizeof (Rgb)>>>(device_img, img, width, height, pix_size);
}
// Implementation of the K-Nearest Neighbors algorithm for de-noising a image
void kernel_knn_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, double h_param)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the knn kernel
knn<<<gridSize, blockSize>>>(device_img, img, width, height, conv_size, h_param);
}
// Implementation of the K-Nearest Neighbors algorithm for de-noising an image
// with a shared memory optimization
void kernel_shared_knn_host(Rgb* device_img, Rgb* img, int width, int height, int r, double h_param)
{
int strel_size = 2 * r + r % 2;
if (strel_size <= 0 or strel_size > 16)
{
std::cout << "\nerror: <Strel_size> parameter must be between 1 and 16 due to shared memory constaint.\n" << std::endl;
assert(strel_size > 0 and strel_size < 16);
return;
}
// Creation of the gpu unit grid
int block_w = TILE_WIDTH + 2 * r;
dim3 blockSize = dim3(block_w, block_w);
int bx = (width / TILE_WIDTH - 1) + blockSize.x;
int by = (height / TILE_HEIGHT - 1) + blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the shared knn kernel
shared_knn<<<gridSize, blockSize, block_w * block_w * sizeof (Rgb)>>>(device_img, img, width, height, strel_size, h_param);
}
// Implementation of the Non-Local Means algorithm for de-noising an image
void kernel_nlm_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, int block_radius, double h_param)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Call to the nlm, kernel
nlm<<<gridSize, blockSize>>>(device_img, img, width, height, conv_size, block_radius, h_param);
}
// Implementation of the Canny Edge detection algorithm
void kernel_nlm_host(Rgb* device_img, Rgb* img, int width, int height, int conv_size, int block_radius, double h_param)
void kernel_edge_detect(Rgb* device_img, double* img, int width, int height, int conv_size, double otsu_threshold)
{
// Creation of the gpu unit grid
dim3 blockSize = dim3(TILE_WIDTH, TILE_WIDTH);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3(bx, by);
// Preprocessing
// Apply a convolution on the image using the Sobel kernel
sobel_conv<<<gridSize, blockSize>>>(device_img, img, width, height, conv_size);
cudaDeviceSynchronize();
non_max_suppr<<<gridSize, blockSize>>>(device_img, img, width, height, otsu_threshold);
cudaDeviceSynchronize();
// Run the hysterysis algorithm, stops when the image is unchanged
int *changed_device;
int *changed_host;
cudaMallocManaged(&changed_device, 1 * sizeof (int));
hysterysis<<<gridSize, blockSize>>>(device_img, changed_device, width, height, otsu_threshold * 0.5);
cudaDeviceSynchronize();
cudaMemcpy(changed_host, changed_device, sizeof (int), cudaMemcpyDeviceToHost);
while (changed_host)
{
hysterysis<<<gridSize, blockSize>>>(device_img, changed_device, width, height, otsu_threshold * 0.5);
cudaDeviceSynchronize();
cudaMemcpy(changed_host, changed_device, sizeof (int), cudaMemcpyDeviceToHost);
}
}
|
58e568fe6febc8e1f2230ab77af83c459869806f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__
void scaleit_kernel(double *a,int n, int scaleBy)
{
/* Determine my index */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
a[i] = a[i] * (double)scaleBy;
}
}
extern "C" {
int scaleit_launcher(int scaleBy)
{
double *h_a, *d_a;
int i,n=16384;
dim3 block, grid;
/* Allocate Host Pointer */
h_a = (double*)malloc(n*sizeof(double));
for(i=0; i<n; i++)
{
h_a[i] = i+1;
}
/* Allocate Device Pointer */
hipMalloc((void**)&d_a, n*sizeof(double));
if ( d_a == NULL )
{
fprintf(stderr,"Failed to malloc!\n");
exit(1);
}
/* Decompose Problem */
block = dim3(1024, 1, 1);
grid = dim3(n/block.x, 1, 1);
/* Copy from Host to Device */
hipMemcpy(d_a, h_a, n*sizeof(double),hipMemcpyHostToDevice);
/* Launch Compute Kernel */
hipLaunchKernelGGL(( scaleit_kernel), dim3(grid),dim3(block), 0, 0, d_a,n,scaleBy);
/* Copy from Device to Host */
hipMemcpy(h_a, d_a, n*sizeof(double),hipMemcpyDeviceToHost);
for(i=0;i<n;i++)
{
if(h_a[i] != ((double)scaleBy * (i+1)))
{
fprintf(stderr, "Error! %d: %lf\n",i,h_a[i]);
return 1;
}
}
fprintf(stdout, "Correct!\n");
/* Free Device Pointer */
hipFree(d_a);
/* Free Host Pointer */
free(h_a);
return 0;
}
}
|
58e568fe6febc8e1f2230ab77af83c459869806f.cu
|
#include <cuda.h>
#include <stdio.h>
__global__
void scaleit_kernel(double *a,int n, int scaleBy)
{
/* Determine my index */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
a[i] = a[i] * (double)scaleBy;
}
}
extern "C" {
int scaleit_launcher(int scaleBy)
{
double *h_a, *d_a;
int i,n=16384;
dim3 block, grid;
/* Allocate Host Pointer */
h_a = (double*)malloc(n*sizeof(double));
for(i=0; i<n; i++)
{
h_a[i] = i+1;
}
/* Allocate Device Pointer */
cudaMalloc((void**)&d_a, n*sizeof(double));
if ( d_a == NULL )
{
fprintf(stderr,"Failed to malloc!\n");
exit(1);
}
/* Decompose Problem */
block = dim3(1024, 1, 1);
grid = dim3(n/block.x, 1, 1);
/* Copy from Host to Device */
cudaMemcpy(d_a, h_a, n*sizeof(double),cudaMemcpyHostToDevice);
/* Launch Compute Kernel */
scaleit_kernel<<<grid,block>>>(d_a,n,scaleBy);
/* Copy from Device to Host */
cudaMemcpy(h_a, d_a, n*sizeof(double),cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
{
if(h_a[i] != ((double)scaleBy * (i+1)))
{
fprintf(stderr, "Error! %d: %lf\n",i,h_a[i]);
return 1;
}
}
fprintf(stdout, "Correct!\n");
/* Free Device Pointer */
cudaFree(d_a);
/* Free Host Pointer */
free(h_a);
return 0;
}
}
|
fd6e93f1987e9756866c83cdaa92f1de4acdb96b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "debug_hip.cuh"
void _cudaErrorCheck(hipError_t e, const char* file, int line){
if(e != hipSuccess){
printf("Failed to run statement (%s:%d): %s \n",
file, line, hipGetErrorString(e));
exit(1);
}
}
|
fd6e93f1987e9756866c83cdaa92f1de4acdb96b.cu
|
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
#include "debug.cuh"
void _cudaErrorCheck(cudaError_t e, const char* file, int line){
if(e != cudaSuccess){
printf("Failed to run statement (%s:%d): %s \n",
file, line, cudaGetErrorString(e));
exit(1);
}
}
|
9da30ced64bddf484de4ba0d23da18ecba05310a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__host__ __device__ bool nan_check(float a) {
// check to see whether float a is a nan
if (a != a || (abs(a) > 1.0e13)) {
return true;
} else {
return false;
}
}
__host__ __device__ float zbrent(fptr func, const float x1, const float x2,
const float tol,
float D, float Sx, float Sy, float Sz, float tau, float gamma,
float * gamma_up) {
/**
Using Brent's method, return the root of a function or functor func known
to lie between x1 and x2. The root will be regined until its accuracy is
tol.
Parameters
----------
func : fptr
function pointer to shallow water or compressible flux function.
x1, x2 : const float
limits of root
tol : const float
tolerance to which root shall be calculated to
D, Sx, Sy, tau: float
conserved variables
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
const int ITMAX = 300;
float a = x1, b = x2;
float c, d=0.0, e=0.0;
float fa = func(a, D, Sx, Sy, Sz, tau, gamma, gamma_up);
float fb = func(b, D, Sx, Sy, Sz, tau, gamma, gamma_up);
float fc=0.0, fs, s;
if (fa * fb >= 0.0) {
//cout << "Root must be bracketed in zbrent.\n";
//printf("Root must be bracketed in zbrent.\n");
return x2;
}
if (abs(fa) < abs(fb)) {
// swap a, b
d = a;
a = b;
b = d;
d = fa;
fa = fb;
fb = d;
}
c = a;
fc = fa;
bool mflag = true;
for (int i = 0; i < ITMAX; i++) {
if (fa != fc && fb != fc) {
s = a*fb*fc / ((fa-fb) * (fa-fc)) + b*fa*fc / ((fb-fa)*(fb-fc)) +
c*fa*fb / ((fc-fa)*(fc-fb));
} else {
s = b - fb * (b-a) / (fb-fa);
}
// list of conditions
bool con1 = false;
if (0.25*(3.0 * a + b) < b) {
if (s < 0.25*(3.0 * a + b) || s > b) {
con1 = true;
}
} else if (s < b || s > 0.25*(3.0 * a + b)) {
con1 = true;
}
bool con2 = false;
if (mflag && abs(s-b) >= 0.5*abs(b-c)) {
con2 = true;
}
bool con3 = false;
if (!(mflag) && abs(s-b) >= 0.5 * abs(c-d)) {
con3 = true;
}
bool con4 = false;
if (mflag && abs(b-c) < tol) {
con4 = true;
}
bool con5 = false;
if (!(mflag) && abs(c-d) < tol) {
con5 = true;
}
if (con1 || con2 || con3 || con4 || con5) {
s = 0.5 * (a+b);
mflag = true;
} else {
mflag = false;
}
fs = func(s, D, Sx, Sy, Sz, tau, gamma, gamma_up);
d = c;
c = b;
fc = fb;
if (fa * fs < 0.0) {
b = s;
fb = fs;
} else {
a = s;
fa = fs;
}
if (abs(fa) < abs(fb)) {
e = a;
a = b;
b = e;
e = fa;
fa = fb;
fb = e;
}
// test for convegence
if (fb == 0.0 || fs == 0.0 || abs(b-a) < tol) {
return b;
}
}
//printf("Maximum number of iterations exceeded in zbrent.\n");
return x1;
}
void check_mpi_error(int mpi_err) {
/**
Checks to see if the integer returned by an mpi function, mpi_err, is an MPI error. If so, it prints out some useful stuff to screen.
*/
int errclass, resultlen;
char err_buffer[MPI_MAX_ERROR_STRING];
if (mpi_err != MPI_SUCCESS) {
MPI_Error_class(mpi_err, &errclass);
if (errclass == MPI_ERR_RANK) {
fprintf(stderr,"%s","Invalid rank used in MPI send call\n");
MPI_Error_string(mpi_err,err_buffer,&resultlen);
fprintf(stderr,"%s",err_buffer);
MPI_Finalize();
} else {
fprintf(stderr, "%s","Other MPI error\n");
MPI_Error_string(mpi_err,err_buffer,&resultlen);
fprintf(stderr,"%s",err_buffer);
MPI_Finalize();
}
}
}
__host__ __device__ float W_swe(float * q, float * gamma_up) {
/**
calculate Lorentz factor for conserved swe state vector
*/
return sqrt((q[1]*q[1] * gamma_up[0] +
2.0 * q[1] * q[2] * gamma_up[1] +
q[2] * q[2] * gamma_up[4]) / (q[0]*q[0]) + 1.0);
}
__host__ __device__ float phi(float r) {
/**
calculate superbee slope limiter Phi(r)
*/
float ph = 0.0;
if (r >= 1.0) {
ph = min(float(2.0), min(r, float(2.0 / (1.0 + r))));
} else if (r >= 0.5) {
ph = 1.0;
} else if (r > 0.0) {
ph = 2.0 * r;
}
return ph;
}
__device__ float find_height(float ph) {
/**
Finds r given Phi.
*/
const float M = 1.0; // set this for convenience
return 2.0 * M / (1.0 - exp(-2.0 * ph));
}
__device__ float find_pot(float r) {
/**
Finds Phi given r.
*/
const float M = 1.0; // set this for convenience
return -0.5 * log(1.0 - 2.0 * M / r);
}
__device__ float rhoh_from_p(float p, float rho, float gamma) {
/**
calculate rhoh using p for gamma law equation of state
*/
return rho + gamma * p / (gamma - 1.0);
}
__device__ float p_from_rhoh(float rhoh, float rho, float gamma) {
/**
calculate p using rhoh for gamma law equation of state
*/
return (rhoh - rho) * (gamma - 1.0) / gamma;
}
__device__ __host__ float p_from_rho_eps(float rho, float eps, float gamma) {
/**
calculate p using rho and epsilon for gamma law equation of state
*/
return (gamma - 1.0) * rho * eps;
}
__device__ __host__ float phi_from_p(float p, float rho, float gamma, float A) {
/**
Calculate the metric potential Phi given p for gamma law equation of
state
Parameters
----------
p, rho : float
pressure and density
gamma : float
adiabatic index
A : float
constant used in Phi to p conversion
*/
return (gamma - 1.0) / gamma *
log((rho + gamma * p / (gamma - 1.0)) / A);
}
__device__ __host__ float f_of_p(float p, float D, float Sx, float Sy,
float Sz, float tau, float gamma,
float * gamma_up) {
/**
Function of p whose root is to be found when doing conserved to
primitive variable conversion
Parameters
----------
p : float
pressure
D, Sx, Sy, Sz, tau :float
components of conserved state vector
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
float sq = sqrt(pow(tau + p + D, 2) -
Sx*Sx*gamma_up[0] - 2.0*Sx*Sy*gamma_up[1] - 2.0*Sx*Sz*gamma_up[2] -
Sy*Sy*gamma_up[4] - 2.0*Sy*Sz*gamma_up[5] - Sz*Sz*gamma_up[8]);
//if (nan_check(sq)) cout << "sq is nan :(\n";
float rho = D * sq / (tau + p + D);
float eps = (sq - p * (tau + p + D) / sq - D) / D;
return (gamma - 1.0) * rho * eps - p;
}
__device__ float h_dot(float phi, float old_phi, float dt) {
/**
Calculates the time derivative of the height given the shallow water
variable phi at current time and previous timestep
NOTE: this is an upwinded approximation of hdot - there may be a better
way to do this which will more accurately give hdot at current time.
Parameters
----------
phi : float
Phi at current timestep
old_phi : float
Phi at previous timestep
dt : float
timestep
*/
float h = find_height(phi);
//float old_h = ight(old_phi);
return -2.0 * h * (phi - old_phi) / (dt * (exp(2.0 * phi) - 1.0));
}
__device__ float calc_Q_swe(float rho, float p, float gamma, float Y, float Cv) {
/**
Calculate the heating rate per unit mass from the shallow water variables
*/
float T = p / ((gamma - 1.0) * rho * Cv);
float A = 1.0e8; // constant of proportionality
float X_dot = A*rho*rho*Y*Y*Y / (T*T*T) * exp(-44.0 / T);
if (nan_check(X_dot)) {
X_dot = 0.0;
}
return X_dot;
}
void calc_Q(float * rho, float * q_cons, int nx, int ny, int nz,
float gamma, float * gamma_up, float * Q, float Cv) {
/**
Calculate the heating rate per unit mass
*/
// hack: need to actually interpolate rho here rather than just assume it's constant
float * q_prim = new float[nx*ny*nz*6];
cons_to_prim_comp(q_cons, q_prim, nx, ny, nz, gamma, gamma_up);
for (int i = 0; i < nx*ny*nz; i++) {
float eps = q_prim[i*6+4];
float Y = q_prim[i*6+5];
float T = eps / Cv;
float A = 1.0e8; // constant of proportionality
Q[i] = A*rho[0]*rho[0]*Y*Y*Y / (T*T*T) * exp(-44.0 / T);
//cout << "eps = " << eps << " Y = " << Y << " H = " << Q[i] << '\n';
}
delete[] q_prim;
}
__device__ void calc_As(float * rhos, float * phis, float * A,
int nlayers, float gamma, float surface_phi, float surface_rho) {
/**
Calculates the As used to calculate the pressure given Phi, given
the pressure at the sea floor
Parameters
----------
rhos : float array
densities of layers
phis : float array
Vector of Phi for different layers
A : float array
vector of As for layers
nlayers : int
number of layers
gamma : float
adiabatic index
surface_phi : float
Phi at surface
surface_rho : float
density at surface
*/
// define A at sea surface using condition that p = 0
float A_surface = surface_rho * exp(-gamma * surface_phi / (gamma-1.0));
A[0] = A_surface + exp(-gamma * phis[0] / (gamma-1.0)) * (rhos[0] - surface_rho);
for (int n = 0; n < (nlayers-1); n++) {
A[n+1] = A[n] +
exp(-gamma * phis[n+1] / (gamma - 1.0)) * (rhos[n+1] - rhos[n]);
}
}
__device__ void cons_to_prim_comp_d(float * q_cons, float * q_prim,
float gamma, float * gamma_up) {
/**
Convert compressible conserved variables to primitive variables
Parameters
----------
q_cons : float *
state vector of conserved variables
q_prim : float *
state vector of primitive variables
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
const float TOL = 1.0e-5;
float D = q_cons[0];
float Sx = q_cons[1];
float Sy = q_cons[2];
float Sz = q_cons[3];
float tau = q_cons[4];
float DX = q_cons[5];
// S^2
float Ssq = Sx*Sx*gamma_up[0] + 2.0*Sx*Sy*gamma_up[1] +
2.0*Sx*Sz*gamma_up[2] + Sy*Sy*gamma_up[4] + 2.0*Sy*Sz*gamma_up[5] +
Sz*Sz*gamma_up[8];
float pmin = (1.0 - Ssq) * (1.0 - Ssq) * tau * (gamma - 1.0);
float pmax = (gamma - 1.0) * (tau + D) / (2.0 - gamma);
if (pmin < 0.0) {
pmin = 0.0;//1.0e-9;
}
if (pmax < 0.0 || pmax < pmin) {
pmax = 1.0;
}
// check sign change
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmin = 0.0;
}
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmax *= 10.0;
}
float p = zbrent((fptr)f_of_p, pmin, pmax, TOL, D, Sx, Sy, Sz,
tau, gamma, gamma_up);
if (nan_check(p) || p < 0.0 || p > 1.0e9){
p = abs((gamma - 1.0) * (tau + D) / (2.0 - gamma)) > 1.0 ? 1.0 :
abs((gamma - 1.0) * (tau + D) / (2.0 - gamma));
}
float sq = sqrt(pow(tau + p + D, 2) - Ssq);
if (nan_check(sq)) {
//printf("\n\n\n sq is nan!!!! %f, %f, %f, %f, %f, %f\n\n\n", pow(tau + p + D, 2), p, Ssq, Sx, Sy, Sz);
sq = tau + p + D;
}
float eps = (sq - p * (tau + p + D)/sq - D) / D;
float h = 1.0 + gamma * eps;
float W = sqrt(1.0 + Ssq / (D*D*h*h));
float X = DX / D;
q_prim[0] = D * sq / (tau + p + D);//D / W;
q_prim[1] = Sx / (W*W * h * q_prim[0]);
q_prim[2] = Sy / (W*W * h * q_prim[0]);
q_prim[3] = Sz / (W*W * h * q_prim[0]);
q_prim[4] = eps;
q_prim[5] = X;
}
void cons_to_prim_comp(float * q_cons, float * q_prim, int nxf, int nyf,
int nz,
float gamma, float * gamma_up) {
/**
Convert compressible conserved variables to primitive variables
Parameters
----------
q_cons : float *
grid of conserved variables
q_prim : float *
grid where shall put the primitive variables
nxf, nyf, nz : int
grid dimensions
gamma : float
adiabatic index
gamma_up : float *
contravariant spatial metric
*/
const float TOL = 1.e-5;
for (int i = 0; i < nxf*nyf*nz; i++) {
float D = q_cons[i*6];
float Sx = q_cons[i*6+1];
float Sy = q_cons[i*6+2];
float Sz = q_cons[i*6+3];
float tau = q_cons[i*6+4];
float DX = q_cons[i*6+5];
// S^2
float Ssq = Sx*Sx*gamma_up[0] + 2.0*Sx*Sy*gamma_up[1] +
2.0*Sx*Sz*gamma_up[2] + Sy*Sy*gamma_up[4] + 2.0*Sy*Sz*gamma_up[5] +
Sz*Sz*gamma_up[8];
float pmin = (1.0 - Ssq) * (1.0 - Ssq) * tau * (gamma - 1.0);
float pmax = (gamma - 1.0) * (tau + D) / (2.0 - gamma);
if (pmin < 0.0) {
pmin = 0.0;//1.0e-9;
}
if (pmax < 0.0 || pmax < pmin) {
pmax = 1.0;
}
// check sign change
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmin = 0.0;
}
float p;
try {
p = zbrent((fptr)f_of_p, pmin, pmax, TOL, D, Sx, Sy, Sz,
tau, gamma, gamma_up);
} catch (char const*){
p = abs((gamma - 1.0) * (tau + D) / (2.0 - gamma)) > 1.0 ? 1.0 :
abs((gamma - 1.0) * (tau + D) / (2.0 - gamma));
}
float sq = sqrt(pow(tau + p + D, 2) - Ssq);
float eps = (sq - p * (tau + p + D)/sq - D) / D;
float h = 1.0 + gamma * eps;
float W = sqrt(1.0 + Ssq / (D*D*h*h));
float X = DX / D;
q_prim[i*6] = D * sq / (tau + p + D);//D / W;
q_prim[i*6+1] = Sx / (W*W * h * q_prim[i*6]);
q_prim[i*6+2] = Sy / (W*W * h * q_prim[i*6]);
q_prim[i*6+3] = Sz / (W*W * h * q_prim[i*6]);
q_prim[i*6+4] = eps;
q_prim[i*6+5] = X;
}
}
__device__ void shallow_water_fluxes(float * q, float * f, int dir,
float * gamma_up, float alpha, float * beta,
float gamma) {
/**
Calculate the flux vector of the shallow water equations
Parameters
----------
q : float *
state vector
f : float *
grid where fluxes shall be stored
dir : int
0 if calculating flux in x-direction, 1 if in y-direction
gamma_up : float *
spatial metric
alpha : float
lapse function
beta : float *
shift vector
gamma : float
adiabatic index
*/
if (nan_check(q[0])) q[0] = 1.0;
if (nan_check(q[1])) q[1] = 0.0;
if (nan_check(q[2])) q[2] = 0.0;
if (nan_check(q[3])) q[3] = 0.0;
float W = W_swe(q, gamma_up);
if (nan_check(W)) {
printf("W is nan! q0, q1, q2: %f, %f, %f\n", q[0], q[1], q[2]);
W = 1.0;
}
float u = q[1] / (q[0] * W);
float v = q[2] / (q[0] * W);
if (dir == 0) {
float qx = u * gamma_up[0] + v * gamma_up[1] -
beta[0] / alpha;
f[0] = q[0] * qx;
f[1] = q[1] * qx + 0.5 * q[0] * q[0] / (W * W);
f[2] = q[2] * qx;
f[3] = q[3] * qx;
} else {
float qy = v * gamma_up[4] + u * gamma_up[1] -
beta[1] / alpha;
f[0] = q[0] * qy;
f[1] = q[1] * qy;
f[2] = q[2] * qy + 0.5 * q[0] * q[0] / (W * W);
f[3] = q[3] * qy;
}
}
__device__ void compressible_fluxes(float * q, float * f, int dir,
float * gamma_up, float alpha, float * beta,
float gamma) {
/**
Calculate the flux vector of the compressible GR hydrodynamics equations
Parameters
----------
q : float *
state vector
f : float *
grid where fluxes shall be stored
dir : int
0 if calculating flux in x-direction, 1 if in y-direction,
2 if in z-direction
gamma_up : float *
spatial metric
alpha : float
lapse function
beta : float *
shift vector
gamma : float
adiabatic index
*/
// this is worked out on the fine grid
float * q_prim;
q_prim = (float *)malloc(6 * sizeof(float));
cons_to_prim_comp_d(q, q_prim, gamma, gamma_up);
float p = p_from_rho_eps(q_prim[0], q_prim[4], gamma);
float u = q_prim[1];
float v = q_prim[2];
float w = q_prim[3];
//printf("p: %f, D: %f, rho: %f, u: %f, v: %f, w: %f, tau: %f, eps: %f\n", p, q[0], q_prim[0], u, v, w, q[4], q_prim[4]);
if (dir == 0) {
float qx = u * gamma_up[0] + v * gamma_up[1] + w * gamma_up[2] - beta[0] / alpha;
f[0] = q[0] * qx;
f[1] = q[1] * qx + p;
f[2] = q[2] * qx;
f[3] = q[3] * qx;
f[4] = q[4] * qx + p * u;
f[5] = q[5] * qx;
} else if (dir == 1){
float qy = v * gamma_up[4] + u * gamma_up[1] + w * gamma_up[5] - beta[1] / alpha;
f[0] = q[0] * qy;
f[1] = q[1] * qy;
f[2] = q[2] * qy + p;
f[3] = q[3] * qy;
f[4] = q[4] * qy + p * v;
f[5] = q[5] * qy;
} else {
float qz = w * gamma_up[8] + u * gamma_up[2] + v * gamma_up[5] - beta[2] / alpha;
f[0] = q[0] * qz;
f[1] = q[1] * qz;
f[2] = q[2] * qz;
f[3] = q[3] * qz + p;
f[4] = q[4] * qz + p * w;
f[5] = q[5] * qz;
}
//printf("f(tau): %f\n", f[4]);
free(q_prim);
}
void p_from_swe(float * q, float * p, int nx, int ny, int nz,
float * gamma_up, float rho, float gamma, float A) {
/**
Calculate p using SWE conserved variables
Parameters
----------
q : float *
state vector
p : float *
grid where pressure shall be stored
nx, ny, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho : float
density
gamma : float
adiabatic index
A : float
variable required in p(Phi) calculation
*/
for (int i = 0; i < nx*ny*nz; i++) {
float W = W_swe(q, gamma_up);
float ph = q[i*4] / W;
p[i] = (gamma - 1.0) * (A * exp(gamma * ph /
(gamma - 1.0)) - rho) / gamma;
}
}
__device__ float p_from_swe(float * q, float * gamma_up, float rho,
float gamma, float W, float A) {
/**
Calculates p and returns using SWE conserved variables
Parameters
----------
q : float *
state vector
gamma_up : float *
spatial metric
rho : float
density
gamma : float
adiabatic index
W : float
Lorentz factor
A : float
variable required in p(Phi) calculation
*/
float ph = q[0] / W;
return (gamma - 1.0) * (A * exp(gamma * ph /
(gamma - 1.0)) - rho) / gamma;
}
__global__ void compressible_from_swe(float * q, float * q_comp,
int nx, int ny, int nz,
float * gamma_up, float * rho, float gamma,
int kx_offset, int ky_offset, float dt,
float * old_phi) {
/**
Calculates the compressible state vector from the SWE variables.
Parameters
----------
q : float *
grid of SWE state vector
q_comp : float *
grid where compressible state vector to be stored
nx, ny, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho, gamma : float
density and adiabatic index
kx_offset, ky_offset : int
kernel offsets in the x and y directions
dt : float
timestep
old_phi : float *
Phi at previous timestep
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
if ((x < nx) && (y < ny) && (z < nz)) {
//printf("(%d, %d, %d): %f, %f, %f\n", x, y, z, q[offset*4], q[offset*4+1], q[offset*4+2]);
float * q_swe;
q_swe = (float *)malloc(4 * sizeof(float));
for (int i = 0; i < 4; i++) {
q_swe[i] = q[offset * 4 + i];
}
// calculate hdot = w (?)
float hdot = h_dot(q[offset*4], old_phi[offset], dt);
//printf("hdot(%d, %d, %d): %f, \n", x, y, z, hdot);
float W = sqrt((q[offset*4+1] * q[offset*4+1] * gamma_up[0] +
2.0 * q[offset*4+1] * q[offset*4+2] * gamma_up[1] +
q[offset*4+2] * q[offset*4+2] * gamma_up[4]) /
(q[offset*4] * q[offset*4]) +
2.0 * hdot * (q[offset*4+1] * gamma_up[2] +
q[offset*4+2] * gamma_up[5]) / q[offset*4] +
hdot * hdot * gamma_up[8] + 1.0);
//printf("%d\n", gamma_up[8]);
//printf("W(%d, %d, %d): %f, \n", x, y, z, W);
// TODO: this is really inefficient as redoing the same calculation
// on differnt layers
float * A, * phis;
A = (float *)malloc(nz * sizeof(float));
phis = (float *)malloc(nz * sizeof(float));
for (int i = 0; i < nz; i++) {
phis[i] = q[((i * ny + y) * nx + x) * 4];
}
calc_As(rho, phis, A, nz, gamma, phis[0], rho[0]);
float p = p_from_swe(q_swe, gamma_up, rho[z], gamma, W, A[z]);
float rhoh = rhoh_from_p(p, rho[z], gamma);
free(phis);
free(A);
q_comp[offset*6] = rho[z] * W;
q_comp[offset*6+1] = rhoh * W * q[offset*4+1] / q[offset*4];
q_comp[offset*6+2] = rhoh * W * q[offset*4+2] / q[offset*4];
q_comp[offset*6+3] = rho[z] * W * hdot;
q_comp[offset*6+4] = rhoh*W*W - p - rho[z] * W;
q_comp[offset*6+5] = rho[z] * W * q[offset*4+3] / q[offset*4];
//printf("s2c (%d, %d, %d): %f, %f\n", x, y, z, q_comp[offset*6+4], p);
// NOTE: hack?
if (q_comp[offset*6+4] < 0.0) {
//printf("tau < 0, p: %f, tau: %f\n", p, q_comp[offset*6+4]);
q_comp[offset*6+4] = 0.0;
}
// cannot have X < 0.0
if (q_comp[offset*6+5] < 0.0) {
q_comp[offset*6+5] = 0.0;
}
free(q_swe);
}
}
__device__ float slope_limit(float layer_frac, float left, float middle, float right, float aleft, float amiddle, float aright) {
/**
Calculates slope limited verticle gradient at layer_frac between middle and amiddle.
Left, middle and right are from row n, aleft, amiddle and aright are from row above it (n-1)
*/
float S_upwind = (layer_frac * (right - middle) +
(1.0 - layer_frac) * (aright - amiddle));
float S_downwind = (layer_frac * (middle - left)
+ (1.0 - layer_frac) * (amiddle - aleft));
float S = 0.5 * (S_upwind + S_downwind);
float r = 1.0e6;
if (abs(S_downwind) > 1.0e-10) {
r = S_upwind / S_downwind;
}
return S * phi(r);
}
__global__ void swe_from_compressible(float * q, float * q_swe,
int nx, int ny,
int nxf, int nyf, int nz,
float * gamma_up, float * rho,
float gamma,
int kx_offset, int ky_offset,
float * qc,
int * matching_indices) {
/**
Calculates the SWE state vector from the compressible variables.
Parameters
----------
q : float *
grid of compressible state vector
q_swe : float *
grid where SWE state vector to be stored
nxf, nyf, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho, gamma : float
density and adiabatic index
kx_offset, ky_offset : int
kernel offsets in the x and y directions
qc : float *
coarse grid
matching_indices : int *
indices of fine grid wrt coarse grid
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * nyf + y) * nxf + x;
/*if (x == 0 && y == 0 && z == 0) {
for (int j = 0; j < 40; j++) {
for (int i = 0; i < 40; i++) {
printf("%d, ", q[(j*nxf+i)*6]);
}
}
printf("\n\n");
}*/
float W, u, v, w, p, X;
float * q_prim, * q_con;
q_con = (float *)malloc(6 * sizeof(float));
q_prim = (float *)malloc(6 * sizeof(float));
if ((x < nxf) && (y < nyf) && (z < nz)) {
for (int i = 0; i < 6; i++) {
q_con[i] = q[offset*6 + i];
}
// find primitive variables
cons_to_prim_comp_d(q_con, q_prim, gamma, gamma_up);
u = q_prim[1];
v = q_prim[2];
w = q_prim[3];
X = q_prim[5];
W = 1.0 / sqrt(1.0 -
u*u*gamma_up[0] - 2.0 * u*v * gamma_up[1] -
2.0 * u*w * gamma_up[2] - v*v*gamma_up[4] -
2.0 * v*w*gamma_up[5] - w*w*gamma_up[8]);
//rho = q_prim[0];
// calculate SWE conserved variables on fine grid.
p = p_from_rho_eps(q_prim[0], q_prim[4], gamma);
// save to q_swe
q_swe[offset*4] = p;
//printf("x: (%d, %d, %d), U: (%f, %f, %f), v: (%f,%f,%f), W: %f, p: %f\n", x, y, z, q_con[1], q[offset*6+2], q[offset*6+3], u, v, w, W, p);
}
__syncthreads();
float ph;
if ((x < nxf) && (y < nyf) && (z < nz)) {
float * A, * phis, *rhos;
A = (float *)malloc(nz * sizeof(float));
phis = (float *)malloc(nz * sizeof(float));
rhos = (float *)malloc(nz * sizeof(float));
for (int i = 0; i < nz; i++) {
phis[i] = q_swe[((i * nyf + y) * nxf + x)*4];
if (sizeof(rho) > nz) {
// rho varies with position
rhos[i] = rho[(i * nyf + y) * nxf + x];
} else {
// HACK: rho is only nlayers long - need to find a way to define on fine grid too
rhos[i] = rho[0];
}
}
int c_x = round(x*0.5) + matching_indices[0];
int c_y = round(y*0.5) + matching_indices[2];
float interp_q_comp = qc[(c_y * nx + c_x) * 4];
float Sx = slope_limit(1.0, qc[(c_y * nx + c_x-1) * 4], qc[(c_y * nx + c_x) * 4], qc[(c_y * nx + c_x+1) * 4], 0.0, 0.0, 0.0);
float Sy = slope_limit(1.0, qc[((c_y-1) * nx + c_x) * 4], qc[(c_y * nx + c_x) * 4], qc[((c_y+1) * nx + c_x) * 4], 0.0, 0.0, 0.0);
float phi_surface = interp_q_comp;
if (x % 2 == 1) {
phi_surface += 0.25 * Sx;
} else {
phi_surface -= 0.25 * Sx;
}
if (y % 2 == 1) {
phi_surface += 0.25 * Sy;
} else {
phi_surface -= 0.25 * Sy;
}
// TODO; this will not work as this function uses fact p = 0 on
// surface layer, which is not true for compressible code
calc_As(rhos, phis, A, nz, gamma, phi_surface, rho[0]);
// NOTE: hack to get this to not nan
if (nan_check(A[z]) || A[z] < 0.0) A[z] = 1.0;
ph = phi_from_p(p, q_prim[0], gamma, A[z]);
free(phis);
free(A);
free(rhos);
//printf("W: %f, ph: %f, tau: %f, eps: %f, A[z]: %f, p: %f, rho: %f\n", W, ph, q_con[4], q_prim[4], A[z], p, q_prim[0]);
}
__syncthreads();
if ((x < nxf) && (y < nyf) && (z < nz)) {
q_swe[offset*4] = ph * W;
q_swe[offset*4+1] = ph * W * W * u;
q_swe[offset*4+2] = ph * W * W * v;
q_swe[offset*4+3] = ph * W * X;
//printf("(x,y,z): %d, %d, %d Phi, Sx, Sy: %f, %f, %f\n", x,y,z,q_swe[offset*4], q_swe[offset*4+1], q_swe[offset*4+2]);
}
free(q_con);
free(q_prim);
}
// device-side function pointers to __device__ functions
__device__ flux_func_ptr d_compressible_fluxes = compressible_fluxes;
__device__ flux_func_ptr d_shallow_water_fluxes = shallow_water_fluxes;
|
9da30ced64bddf484de4ba0d23da18ecba05310a.cu
|
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__host__ __device__ bool nan_check(float a) {
// check to see whether float a is a nan
if (a != a || (abs(a) > 1.0e13)) {
return true;
} else {
return false;
}
}
__host__ __device__ float zbrent(fptr func, const float x1, const float x2,
const float tol,
float D, float Sx, float Sy, float Sz, float tau, float gamma,
float * gamma_up) {
/**
Using Brent's method, return the root of a function or functor func known
to lie between x1 and x2. The root will be regined until its accuracy is
tol.
Parameters
----------
func : fptr
function pointer to shallow water or compressible flux function.
x1, x2 : const float
limits of root
tol : const float
tolerance to which root shall be calculated to
D, Sx, Sy, tau: float
conserved variables
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
const int ITMAX = 300;
float a = x1, b = x2;
float c, d=0.0, e=0.0;
float fa = func(a, D, Sx, Sy, Sz, tau, gamma, gamma_up);
float fb = func(b, D, Sx, Sy, Sz, tau, gamma, gamma_up);
float fc=0.0, fs, s;
if (fa * fb >= 0.0) {
//cout << "Root must be bracketed in zbrent.\n";
//printf("Root must be bracketed in zbrent.\n");
return x2;
}
if (abs(fa) < abs(fb)) {
// swap a, b
d = a;
a = b;
b = d;
d = fa;
fa = fb;
fb = d;
}
c = a;
fc = fa;
bool mflag = true;
for (int i = 0; i < ITMAX; i++) {
if (fa != fc && fb != fc) {
s = a*fb*fc / ((fa-fb) * (fa-fc)) + b*fa*fc / ((fb-fa)*(fb-fc)) +
c*fa*fb / ((fc-fa)*(fc-fb));
} else {
s = b - fb * (b-a) / (fb-fa);
}
// list of conditions
bool con1 = false;
if (0.25*(3.0 * a + b) < b) {
if (s < 0.25*(3.0 * a + b) || s > b) {
con1 = true;
}
} else if (s < b || s > 0.25*(3.0 * a + b)) {
con1 = true;
}
bool con2 = false;
if (mflag && abs(s-b) >= 0.5*abs(b-c)) {
con2 = true;
}
bool con3 = false;
if (!(mflag) && abs(s-b) >= 0.5 * abs(c-d)) {
con3 = true;
}
bool con4 = false;
if (mflag && abs(b-c) < tol) {
con4 = true;
}
bool con5 = false;
if (!(mflag) && abs(c-d) < tol) {
con5 = true;
}
if (con1 || con2 || con3 || con4 || con5) {
s = 0.5 * (a+b);
mflag = true;
} else {
mflag = false;
}
fs = func(s, D, Sx, Sy, Sz, tau, gamma, gamma_up);
d = c;
c = b;
fc = fb;
if (fa * fs < 0.0) {
b = s;
fb = fs;
} else {
a = s;
fa = fs;
}
if (abs(fa) < abs(fb)) {
e = a;
a = b;
b = e;
e = fa;
fa = fb;
fb = e;
}
// test for convegence
if (fb == 0.0 || fs == 0.0 || abs(b-a) < tol) {
return b;
}
}
//printf("Maximum number of iterations exceeded in zbrent.\n");
return x1;
}
void check_mpi_error(int mpi_err) {
/**
Checks to see if the integer returned by an mpi function, mpi_err, is an MPI error. If so, it prints out some useful stuff to screen.
*/
int errclass, resultlen;
char err_buffer[MPI_MAX_ERROR_STRING];
if (mpi_err != MPI_SUCCESS) {
MPI_Error_class(mpi_err, &errclass);
if (errclass == MPI_ERR_RANK) {
fprintf(stderr,"%s","Invalid rank used in MPI send call\n");
MPI_Error_string(mpi_err,err_buffer,&resultlen);
fprintf(stderr,"%s",err_buffer);
MPI_Finalize();
} else {
fprintf(stderr, "%s","Other MPI error\n");
MPI_Error_string(mpi_err,err_buffer,&resultlen);
fprintf(stderr,"%s",err_buffer);
MPI_Finalize();
}
}
}
__host__ __device__ float W_swe(float * q, float * gamma_up) {
/**
calculate Lorentz factor for conserved swe state vector
*/
return sqrt((q[1]*q[1] * gamma_up[0] +
2.0 * q[1] * q[2] * gamma_up[1] +
q[2] * q[2] * gamma_up[4]) / (q[0]*q[0]) + 1.0);
}
__host__ __device__ float phi(float r) {
/**
calculate superbee slope limiter Phi(r)
*/
float ph = 0.0;
if (r >= 1.0) {
ph = min(float(2.0), min(r, float(2.0 / (1.0 + r))));
} else if (r >= 0.5) {
ph = 1.0;
} else if (r > 0.0) {
ph = 2.0 * r;
}
return ph;
}
__device__ float find_height(float ph) {
/**
Finds r given Phi.
*/
const float M = 1.0; // set this for convenience
return 2.0 * M / (1.0 - exp(-2.0 * ph));
}
__device__ float find_pot(float r) {
/**
Finds Phi given r.
*/
const float M = 1.0; // set this for convenience
return -0.5 * log(1.0 - 2.0 * M / r);
}
__device__ float rhoh_from_p(float p, float rho, float gamma) {
/**
calculate rhoh using p for gamma law equation of state
*/
return rho + gamma * p / (gamma - 1.0);
}
__device__ float p_from_rhoh(float rhoh, float rho, float gamma) {
/**
calculate p using rhoh for gamma law equation of state
*/
return (rhoh - rho) * (gamma - 1.0) / gamma;
}
__device__ __host__ float p_from_rho_eps(float rho, float eps, float gamma) {
/**
calculate p using rho and epsilon for gamma law equation of state
*/
return (gamma - 1.0) * rho * eps;
}
__device__ __host__ float phi_from_p(float p, float rho, float gamma, float A) {
/**
Calculate the metric potential Phi given p for gamma law equation of
state
Parameters
----------
p, rho : float
pressure and density
gamma : float
adiabatic index
A : float
constant used in Phi to p conversion
*/
return (gamma - 1.0) / gamma *
log((rho + gamma * p / (gamma - 1.0)) / A);
}
__device__ __host__ float f_of_p(float p, float D, float Sx, float Sy,
float Sz, float tau, float gamma,
float * gamma_up) {
/**
Function of p whose root is to be found when doing conserved to
primitive variable conversion
Parameters
----------
p : float
pressure
D, Sx, Sy, Sz, tau :float
components of conserved state vector
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
float sq = sqrt(pow(tau + p + D, 2) -
Sx*Sx*gamma_up[0] - 2.0*Sx*Sy*gamma_up[1] - 2.0*Sx*Sz*gamma_up[2] -
Sy*Sy*gamma_up[4] - 2.0*Sy*Sz*gamma_up[5] - Sz*Sz*gamma_up[8]);
//if (nan_check(sq)) cout << "sq is nan :(\n";
float rho = D * sq / (tau + p + D);
float eps = (sq - p * (tau + p + D) / sq - D) / D;
return (gamma - 1.0) * rho * eps - p;
}
__device__ float h_dot(float phi, float old_phi, float dt) {
/**
Calculates the time derivative of the height given the shallow water
variable phi at current time and previous timestep
NOTE: this is an upwinded approximation of hdot - there may be a better
way to do this which will more accurately give hdot at current time.
Parameters
----------
phi : float
Phi at current timestep
old_phi : float
Phi at previous timestep
dt : float
timestep
*/
float h = find_height(phi);
//float old_h = ight(old_phi);
return -2.0 * h * (phi - old_phi) / (dt * (exp(2.0 * phi) - 1.0));
}
__device__ float calc_Q_swe(float rho, float p, float gamma, float Y, float Cv) {
/**
Calculate the heating rate per unit mass from the shallow water variables
*/
float T = p / ((gamma - 1.0) * rho * Cv);
float A = 1.0e8; // constant of proportionality
float X_dot = A*rho*rho*Y*Y*Y / (T*T*T) * exp(-44.0 / T);
if (nan_check(X_dot)) {
X_dot = 0.0;
}
return X_dot;
}
void calc_Q(float * rho, float * q_cons, int nx, int ny, int nz,
float gamma, float * gamma_up, float * Q, float Cv) {
/**
Calculate the heating rate per unit mass
*/
// hack: need to actually interpolate rho here rather than just assume it's constant
float * q_prim = new float[nx*ny*nz*6];
cons_to_prim_comp(q_cons, q_prim, nx, ny, nz, gamma, gamma_up);
for (int i = 0; i < nx*ny*nz; i++) {
float eps = q_prim[i*6+4];
float Y = q_prim[i*6+5];
float T = eps / Cv;
float A = 1.0e8; // constant of proportionality
Q[i] = A*rho[0]*rho[0]*Y*Y*Y / (T*T*T) * exp(-44.0 / T);
//cout << "eps = " << eps << " Y = " << Y << " H = " << Q[i] << '\n';
}
delete[] q_prim;
}
__device__ void calc_As(float * rhos, float * phis, float * A,
int nlayers, float gamma, float surface_phi, float surface_rho) {
/**
Calculates the As used to calculate the pressure given Phi, given
the pressure at the sea floor
Parameters
----------
rhos : float array
densities of layers
phis : float array
Vector of Phi for different layers
A : float array
vector of As for layers
nlayers : int
number of layers
gamma : float
adiabatic index
surface_phi : float
Phi at surface
surface_rho : float
density at surface
*/
// define A at sea surface using condition that p = 0
float A_surface = surface_rho * exp(-gamma * surface_phi / (gamma-1.0));
A[0] = A_surface + exp(-gamma * phis[0] / (gamma-1.0)) * (rhos[0] - surface_rho);
for (int n = 0; n < (nlayers-1); n++) {
A[n+1] = A[n] +
exp(-gamma * phis[n+1] / (gamma - 1.0)) * (rhos[n+1] - rhos[n]);
}
}
__device__ void cons_to_prim_comp_d(float * q_cons, float * q_prim,
float gamma, float * gamma_up) {
/**
Convert compressible conserved variables to primitive variables
Parameters
----------
q_cons : float *
state vector of conserved variables
q_prim : float *
state vector of primitive variables
gamma : float
adiabatic index
gamma_up : float *
spatial metric
*/
const float TOL = 1.0e-5;
float D = q_cons[0];
float Sx = q_cons[1];
float Sy = q_cons[2];
float Sz = q_cons[3];
float tau = q_cons[4];
float DX = q_cons[5];
// S^2
float Ssq = Sx*Sx*gamma_up[0] + 2.0*Sx*Sy*gamma_up[1] +
2.0*Sx*Sz*gamma_up[2] + Sy*Sy*gamma_up[4] + 2.0*Sy*Sz*gamma_up[5] +
Sz*Sz*gamma_up[8];
float pmin = (1.0 - Ssq) * (1.0 - Ssq) * tau * (gamma - 1.0);
float pmax = (gamma - 1.0) * (tau + D) / (2.0 - gamma);
if (pmin < 0.0) {
pmin = 0.0;//1.0e-9;
}
if (pmax < 0.0 || pmax < pmin) {
pmax = 1.0;
}
// check sign change
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmin = 0.0;
}
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmax *= 10.0;
}
float p = zbrent((fptr)f_of_p, pmin, pmax, TOL, D, Sx, Sy, Sz,
tau, gamma, gamma_up);
if (nan_check(p) || p < 0.0 || p > 1.0e9){
p = abs((gamma - 1.0) * (tau + D) / (2.0 - gamma)) > 1.0 ? 1.0 :
abs((gamma - 1.0) * (tau + D) / (2.0 - gamma));
}
float sq = sqrt(pow(tau + p + D, 2) - Ssq);
if (nan_check(sq)) {
//printf("\n\n\n sq is nan!!!! %f, %f, %f, %f, %f, %f\n\n\n", pow(tau + p + D, 2), p, Ssq, Sx, Sy, Sz);
sq = tau + p + D;
}
float eps = (sq - p * (tau + p + D)/sq - D) / D;
float h = 1.0 + gamma * eps;
float W = sqrt(1.0 + Ssq / (D*D*h*h));
float X = DX / D;
q_prim[0] = D * sq / (tau + p + D);//D / W;
q_prim[1] = Sx / (W*W * h * q_prim[0]);
q_prim[2] = Sy / (W*W * h * q_prim[0]);
q_prim[3] = Sz / (W*W * h * q_prim[0]);
q_prim[4] = eps;
q_prim[5] = X;
}
void cons_to_prim_comp(float * q_cons, float * q_prim, int nxf, int nyf,
int nz,
float gamma, float * gamma_up) {
/**
Convert compressible conserved variables to primitive variables
Parameters
----------
q_cons : float *
grid of conserved variables
q_prim : float *
grid where shall put the primitive variables
nxf, nyf, nz : int
grid dimensions
gamma : float
adiabatic index
gamma_up : float *
contravariant spatial metric
*/
const float TOL = 1.e-5;
for (int i = 0; i < nxf*nyf*nz; i++) {
float D = q_cons[i*6];
float Sx = q_cons[i*6+1];
float Sy = q_cons[i*6+2];
float Sz = q_cons[i*6+3];
float tau = q_cons[i*6+4];
float DX = q_cons[i*6+5];
// S^2
float Ssq = Sx*Sx*gamma_up[0] + 2.0*Sx*Sy*gamma_up[1] +
2.0*Sx*Sz*gamma_up[2] + Sy*Sy*gamma_up[4] + 2.0*Sy*Sz*gamma_up[5] +
Sz*Sz*gamma_up[8];
float pmin = (1.0 - Ssq) * (1.0 - Ssq) * tau * (gamma - 1.0);
float pmax = (gamma - 1.0) * (tau + D) / (2.0 - gamma);
if (pmin < 0.0) {
pmin = 0.0;//1.0e-9;
}
if (pmax < 0.0 || pmax < pmin) {
pmax = 1.0;
}
// check sign change
if (f_of_p(pmin, D, Sx, Sy, Sz, tau, gamma, gamma_up) *
f_of_p(pmax, D, Sx, Sy, Sz, tau, gamma, gamma_up) > 0.0) {
pmin = 0.0;
}
float p;
try {
p = zbrent((fptr)f_of_p, pmin, pmax, TOL, D, Sx, Sy, Sz,
tau, gamma, gamma_up);
} catch (char const*){
p = abs((gamma - 1.0) * (tau + D) / (2.0 - gamma)) > 1.0 ? 1.0 :
abs((gamma - 1.0) * (tau + D) / (2.0 - gamma));
}
float sq = sqrt(pow(tau + p + D, 2) - Ssq);
float eps = (sq - p * (tau + p + D)/sq - D) / D;
float h = 1.0 + gamma * eps;
float W = sqrt(1.0 + Ssq / (D*D*h*h));
float X = DX / D;
q_prim[i*6] = D * sq / (tau + p + D);//D / W;
q_prim[i*6+1] = Sx / (W*W * h * q_prim[i*6]);
q_prim[i*6+2] = Sy / (W*W * h * q_prim[i*6]);
q_prim[i*6+3] = Sz / (W*W * h * q_prim[i*6]);
q_prim[i*6+4] = eps;
q_prim[i*6+5] = X;
}
}
__device__ void shallow_water_fluxes(float * q, float * f, int dir,
float * gamma_up, float alpha, float * beta,
float gamma) {
/**
Calculate the flux vector of the shallow water equations
Parameters
----------
q : float *
state vector
f : float *
grid where fluxes shall be stored
dir : int
0 if calculating flux in x-direction, 1 if in y-direction
gamma_up : float *
spatial metric
alpha : float
lapse function
beta : float *
shift vector
gamma : float
adiabatic index
*/
if (nan_check(q[0])) q[0] = 1.0;
if (nan_check(q[1])) q[1] = 0.0;
if (nan_check(q[2])) q[2] = 0.0;
if (nan_check(q[3])) q[3] = 0.0;
float W = W_swe(q, gamma_up);
if (nan_check(W)) {
printf("W is nan! q0, q1, q2: %f, %f, %f\n", q[0], q[1], q[2]);
W = 1.0;
}
float u = q[1] / (q[0] * W);
float v = q[2] / (q[0] * W);
if (dir == 0) {
float qx = u * gamma_up[0] + v * gamma_up[1] -
beta[0] / alpha;
f[0] = q[0] * qx;
f[1] = q[1] * qx + 0.5 * q[0] * q[0] / (W * W);
f[2] = q[2] * qx;
f[3] = q[3] * qx;
} else {
float qy = v * gamma_up[4] + u * gamma_up[1] -
beta[1] / alpha;
f[0] = q[0] * qy;
f[1] = q[1] * qy;
f[2] = q[2] * qy + 0.5 * q[0] * q[0] / (W * W);
f[3] = q[3] * qy;
}
}
__device__ void compressible_fluxes(float * q, float * f, int dir,
float * gamma_up, float alpha, float * beta,
float gamma) {
/**
Calculate the flux vector of the compressible GR hydrodynamics equations
Parameters
----------
q : float *
state vector
f : float *
grid where fluxes shall be stored
dir : int
0 if calculating flux in x-direction, 1 if in y-direction,
2 if in z-direction
gamma_up : float *
spatial metric
alpha : float
lapse function
beta : float *
shift vector
gamma : float
adiabatic index
*/
// this is worked out on the fine grid
float * q_prim;
q_prim = (float *)malloc(6 * sizeof(float));
cons_to_prim_comp_d(q, q_prim, gamma, gamma_up);
float p = p_from_rho_eps(q_prim[0], q_prim[4], gamma);
float u = q_prim[1];
float v = q_prim[2];
float w = q_prim[3];
//printf("p: %f, D: %f, rho: %f, u: %f, v: %f, w: %f, tau: %f, eps: %f\n", p, q[0], q_prim[0], u, v, w, q[4], q_prim[4]);
if (dir == 0) {
float qx = u * gamma_up[0] + v * gamma_up[1] + w * gamma_up[2] - beta[0] / alpha;
f[0] = q[0] * qx;
f[1] = q[1] * qx + p;
f[2] = q[2] * qx;
f[3] = q[3] * qx;
f[4] = q[4] * qx + p * u;
f[5] = q[5] * qx;
} else if (dir == 1){
float qy = v * gamma_up[4] + u * gamma_up[1] + w * gamma_up[5] - beta[1] / alpha;
f[0] = q[0] * qy;
f[1] = q[1] * qy;
f[2] = q[2] * qy + p;
f[3] = q[3] * qy;
f[4] = q[4] * qy + p * v;
f[5] = q[5] * qy;
} else {
float qz = w * gamma_up[8] + u * gamma_up[2] + v * gamma_up[5] - beta[2] / alpha;
f[0] = q[0] * qz;
f[1] = q[1] * qz;
f[2] = q[2] * qz;
f[3] = q[3] * qz + p;
f[4] = q[4] * qz + p * w;
f[5] = q[5] * qz;
}
//printf("f(tau): %f\n", f[4]);
free(q_prim);
}
void p_from_swe(float * q, float * p, int nx, int ny, int nz,
float * gamma_up, float rho, float gamma, float A) {
/**
Calculate p using SWE conserved variables
Parameters
----------
q : float *
state vector
p : float *
grid where pressure shall be stored
nx, ny, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho : float
density
gamma : float
adiabatic index
A : float
variable required in p(Phi) calculation
*/
for (int i = 0; i < nx*ny*nz; i++) {
float W = W_swe(q, gamma_up);
float ph = q[i*4] / W;
p[i] = (gamma - 1.0) * (A * exp(gamma * ph /
(gamma - 1.0)) - rho) / gamma;
}
}
__device__ float p_from_swe(float * q, float * gamma_up, float rho,
float gamma, float W, float A) {
/**
Calculates p and returns using SWE conserved variables
Parameters
----------
q : float *
state vector
gamma_up : float *
spatial metric
rho : float
density
gamma : float
adiabatic index
W : float
Lorentz factor
A : float
variable required in p(Phi) calculation
*/
float ph = q[0] / W;
return (gamma - 1.0) * (A * exp(gamma * ph /
(gamma - 1.0)) - rho) / gamma;
}
__global__ void compressible_from_swe(float * q, float * q_comp,
int nx, int ny, int nz,
float * gamma_up, float * rho, float gamma,
int kx_offset, int ky_offset, float dt,
float * old_phi) {
/**
Calculates the compressible state vector from the SWE variables.
Parameters
----------
q : float *
grid of SWE state vector
q_comp : float *
grid where compressible state vector to be stored
nx, ny, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho, gamma : float
density and adiabatic index
kx_offset, ky_offset : int
kernel offsets in the x and y directions
dt : float
timestep
old_phi : float *
Phi at previous timestep
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
if ((x < nx) && (y < ny) && (z < nz)) {
//printf("(%d, %d, %d): %f, %f, %f\n", x, y, z, q[offset*4], q[offset*4+1], q[offset*4+2]);
float * q_swe;
q_swe = (float *)malloc(4 * sizeof(float));
for (int i = 0; i < 4; i++) {
q_swe[i] = q[offset * 4 + i];
}
// calculate hdot = w (?)
float hdot = h_dot(q[offset*4], old_phi[offset], dt);
//printf("hdot(%d, %d, %d): %f, \n", x, y, z, hdot);
float W = sqrt((q[offset*4+1] * q[offset*4+1] * gamma_up[0] +
2.0 * q[offset*4+1] * q[offset*4+2] * gamma_up[1] +
q[offset*4+2] * q[offset*4+2] * gamma_up[4]) /
(q[offset*4] * q[offset*4]) +
2.0 * hdot * (q[offset*4+1] * gamma_up[2] +
q[offset*4+2] * gamma_up[5]) / q[offset*4] +
hdot * hdot * gamma_up[8] + 1.0);
//printf("%d\n", gamma_up[8]);
//printf("W(%d, %d, %d): %f, \n", x, y, z, W);
// TODO: this is really inefficient as redoing the same calculation
// on differnt layers
float * A, * phis;
A = (float *)malloc(nz * sizeof(float));
phis = (float *)malloc(nz * sizeof(float));
for (int i = 0; i < nz; i++) {
phis[i] = q[((i * ny + y) * nx + x) * 4];
}
calc_As(rho, phis, A, nz, gamma, phis[0], rho[0]);
float p = p_from_swe(q_swe, gamma_up, rho[z], gamma, W, A[z]);
float rhoh = rhoh_from_p(p, rho[z], gamma);
free(phis);
free(A);
q_comp[offset*6] = rho[z] * W;
q_comp[offset*6+1] = rhoh * W * q[offset*4+1] / q[offset*4];
q_comp[offset*6+2] = rhoh * W * q[offset*4+2] / q[offset*4];
q_comp[offset*6+3] = rho[z] * W * hdot;
q_comp[offset*6+4] = rhoh*W*W - p - rho[z] * W;
q_comp[offset*6+5] = rho[z] * W * q[offset*4+3] / q[offset*4];
//printf("s2c (%d, %d, %d): %f, %f\n", x, y, z, q_comp[offset*6+4], p);
// NOTE: hack?
if (q_comp[offset*6+4] < 0.0) {
//printf("tau < 0, p: %f, tau: %f\n", p, q_comp[offset*6+4]);
q_comp[offset*6+4] = 0.0;
}
// cannot have X < 0.0
if (q_comp[offset*6+5] < 0.0) {
q_comp[offset*6+5] = 0.0;
}
free(q_swe);
}
}
__device__ float slope_limit(float layer_frac, float left, float middle, float right, float aleft, float amiddle, float aright) {
/**
Calculates slope limited verticle gradient at layer_frac between middle and amiddle.
Left, middle and right are from row n, aleft, amiddle and aright are from row above it (n-1)
*/
float S_upwind = (layer_frac * (right - middle) +
(1.0 - layer_frac) * (aright - amiddle));
float S_downwind = (layer_frac * (middle - left)
+ (1.0 - layer_frac) * (amiddle - aleft));
float S = 0.5 * (S_upwind + S_downwind);
float r = 1.0e6;
if (abs(S_downwind) > 1.0e-10) {
r = S_upwind / S_downwind;
}
return S * phi(r);
}
__global__ void swe_from_compressible(float * q, float * q_swe,
int nx, int ny,
int nxf, int nyf, int nz,
float * gamma_up, float * rho,
float gamma,
int kx_offset, int ky_offset,
float * qc,
int * matching_indices) {
/**
Calculates the SWE state vector from the compressible variables.
Parameters
----------
q : float *
grid of compressible state vector
q_swe : float *
grid where SWE state vector to be stored
nxf, nyf, nz : int
grid dimensions
gamma_up : float *
spatial metric
rho, gamma : float
density and adiabatic index
kx_offset, ky_offset : int
kernel offsets in the x and y directions
qc : float *
coarse grid
matching_indices : int *
indices of fine grid wrt coarse grid
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * nyf + y) * nxf + x;
/*if (x == 0 && y == 0 && z == 0) {
for (int j = 0; j < 40; j++) {
for (int i = 0; i < 40; i++) {
printf("%d, ", q[(j*nxf+i)*6]);
}
}
printf("\n\n");
}*/
float W, u, v, w, p, X;
float * q_prim, * q_con;
q_con = (float *)malloc(6 * sizeof(float));
q_prim = (float *)malloc(6 * sizeof(float));
if ((x < nxf) && (y < nyf) && (z < nz)) {
for (int i = 0; i < 6; i++) {
q_con[i] = q[offset*6 + i];
}
// find primitive variables
cons_to_prim_comp_d(q_con, q_prim, gamma, gamma_up);
u = q_prim[1];
v = q_prim[2];
w = q_prim[3];
X = q_prim[5];
W = 1.0 / sqrt(1.0 -
u*u*gamma_up[0] - 2.0 * u*v * gamma_up[1] -
2.0 * u*w * gamma_up[2] - v*v*gamma_up[4] -
2.0 * v*w*gamma_up[5] - w*w*gamma_up[8]);
//rho = q_prim[0];
// calculate SWE conserved variables on fine grid.
p = p_from_rho_eps(q_prim[0], q_prim[4], gamma);
// save to q_swe
q_swe[offset*4] = p;
//printf("x: (%d, %d, %d), U: (%f, %f, %f), v: (%f,%f,%f), W: %f, p: %f\n", x, y, z, q_con[1], q[offset*6+2], q[offset*6+3], u, v, w, W, p);
}
__syncthreads();
float ph;
if ((x < nxf) && (y < nyf) && (z < nz)) {
float * A, * phis, *rhos;
A = (float *)malloc(nz * sizeof(float));
phis = (float *)malloc(nz * sizeof(float));
rhos = (float *)malloc(nz * sizeof(float));
for (int i = 0; i < nz; i++) {
phis[i] = q_swe[((i * nyf + y) * nxf + x)*4];
if (sizeof(rho) > nz) {
// rho varies with position
rhos[i] = rho[(i * nyf + y) * nxf + x];
} else {
// HACK: rho is only nlayers long - need to find a way to define on fine grid too
rhos[i] = rho[0];
}
}
int c_x = round(x*0.5) + matching_indices[0];
int c_y = round(y*0.5) + matching_indices[2];
float interp_q_comp = qc[(c_y * nx + c_x) * 4];
float Sx = slope_limit(1.0, qc[(c_y * nx + c_x-1) * 4], qc[(c_y * nx + c_x) * 4], qc[(c_y * nx + c_x+1) * 4], 0.0, 0.0, 0.0);
float Sy = slope_limit(1.0, qc[((c_y-1) * nx + c_x) * 4], qc[(c_y * nx + c_x) * 4], qc[((c_y+1) * nx + c_x) * 4], 0.0, 0.0, 0.0);
float phi_surface = interp_q_comp;
if (x % 2 == 1) {
phi_surface += 0.25 * Sx;
} else {
phi_surface -= 0.25 * Sx;
}
if (y % 2 == 1) {
phi_surface += 0.25 * Sy;
} else {
phi_surface -= 0.25 * Sy;
}
// TODO; this will not work as this function uses fact p = 0 on
// surface layer, which is not true for compressible code
calc_As(rhos, phis, A, nz, gamma, phi_surface, rho[0]);
// NOTE: hack to get this to not nan
if (nan_check(A[z]) || A[z] < 0.0) A[z] = 1.0;
ph = phi_from_p(p, q_prim[0], gamma, A[z]);
free(phis);
free(A);
free(rhos);
//printf("W: %f, ph: %f, tau: %f, eps: %f, A[z]: %f, p: %f, rho: %f\n", W, ph, q_con[4], q_prim[4], A[z], p, q_prim[0]);
}
__syncthreads();
if ((x < nxf) && (y < nyf) && (z < nz)) {
q_swe[offset*4] = ph * W;
q_swe[offset*4+1] = ph * W * W * u;
q_swe[offset*4+2] = ph * W * W * v;
q_swe[offset*4+3] = ph * W * X;
//printf("(x,y,z): %d, %d, %d Phi, Sx, Sy: %f, %f, %f\n", x,y,z,q_swe[offset*4], q_swe[offset*4+1], q_swe[offset*4+2]);
}
free(q_con);
free(q_prim);
}
// device-side function pointers to __device__ functions
__device__ flux_func_ptr d_compressible_fluxes = compressible_fluxes;
__device__ flux_func_ptr d_shallow_water_fluxes = shallow_water_fluxes;
|
2fda957aaf14f30b85c29441ced7e792be47001e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from dznrm2.cu normal z -> s, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
//==============================================================================
__global__ void
magmablas_snrm2_kernel( int m, float *dA, int ldda, float *dxnorm )
{
const int tx = threadIdx.x;
float *dx = dA + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
if (tx==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_snrm2_check_kernel( int m, float *dA, int ldda, float *dxnorm,
float *lsticc )
{
const int tx = threadIdx.x;
float *dx = dA + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if ( lsticc[blockIdx.x + 1] == 0 )
return;
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
if (tx==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_snrm2_check(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dxnorm,
magmaFloat_ptr dlsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_snrm2_check_kernel), dim3(blocks), dim3(threads) , 0, 0, m, dA, ldda, dxnorm, dlsticc );
}
//==============================================================================
__global__ void
magmablas_snrm2_smkernel( int m, int n, float *dA, int ldda,
float *dxnorm )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float re, lsum;
for( int k = ty; k < n; k += BLOCK_SIZEy ) {
float *dx = dA + k * ldda;
// get norm of dx
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx][ty] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum );
if (tx == 0)
dxnorm[k] = sqrt(sum[0][ty]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the snrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_snrm2_sm(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magmablas_snrm2_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, dA, ldda, dxnorm );
}
//==============================================================================
extern "C"
__global__ void
magma_snrm2_adjust_kernel(float *xnorm, float *c)
{
const int tx = threadIdx.x;
__shared__ float sum[ BLOCK_SIZE ];
float temp;
temp = MAGMA_S_ABS( c[tx] ) / xnorm[0];
sum[tx] = -temp * temp;
magma_sum_reduce_n( blockDim.x, tx, sum );
__syncthreads();
if (tx == 0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assuming that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_snrm2_adjust(magma_int_t k, magmaFloat_ptr dxnorm, magmaFloat_ptr dc)
{
hipLaunchKernelGGL(( magma_snrm2_adjust_kernel), dim3(1), dim3(k), 0, magma_stream , dxnorm, dc);
}
//==============================================================================
#define BS 256
__global__ void
magma_snrm2_row_check_adjust_kernel(
int n, float tol, float *xnorm, float *xnorm2,
float *C, int ldc, float *lsticc)
{
const int tx = threadIdx.x + blockIdx.x*BS;
lsticc[tx+1] = 0;
if (tx < n) {
float temp = MAGMA_S_ABS( C[tx*ldc] ) / xnorm[tx];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
float temp2 = xnorm[tx] / xnorm2[tx];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[tx+1] = 1;
} else {
xnorm[tx] *= sqrt(temp);
}
}
if (tx == 0)
lsticc[0] = 0;
magma_sum_reduce_n( blockDim.x, tx, lsticc );
}
/*
Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that
C was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_snrm2_row_check_adjust(
magma_int_t k, float tol,
magmaFloat_ptr dxnorm,
magmaFloat_ptr dxnorm2,
magmaFloat_ptr dC, magma_int_t lddc,
magmaFloat_ptr dlsticc)
{
int nblocks = (k+BS-1)/BS;
hipLaunchKernelGGL(( magma_snrm2_row_check_adjust_kernel), dim3(nblocks), dim3(BS) , 0, 0, k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc);
}
//==============================================================================
/*
Compute the snrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_snrm2_cols(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_snrm2_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, dA, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_snrm2_sm(m, n, dA, ldda, dxnorm);
}
//==============================================================================
|
2fda957aaf14f30b85c29441ced7e792be47001e.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from dznrm2.cu normal z -> s, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
//==============================================================================
__global__ void
magmablas_snrm2_kernel( int m, float *dA, int ldda, float *dxnorm )
{
const int tx = threadIdx.x;
float *dx = dA + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
if (tx==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_snrm2_check_kernel( int m, float *dA, int ldda, float *dxnorm,
float *lsticc )
{
const int tx = threadIdx.x;
float *dx = dA + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if ( lsticc[blockIdx.x + 1] == 0 )
return;
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
if (tx==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_snrm2_check(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dxnorm,
magmaFloat_ptr dlsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_snrm2_check_kernel<<< blocks, threads >>>( m, dA, ldda, dxnorm, dlsticc );
}
//==============================================================================
__global__ void
magmablas_snrm2_smkernel( int m, int n, float *dA, int ldda,
float *dxnorm )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float re, lsum;
for( int k = ty; k < n; k += BLOCK_SIZEy ) {
float *dx = dA + k * ldda;
// get norm of dx
lsum = 0;
for( int j = tx; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[tx][ty] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum );
if (tx == 0)
dxnorm[k] = sqrt(sum[0][ty]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the snrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_snrm2_sm(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magmablas_snrm2_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, dA, ldda, dxnorm );
}
//==============================================================================
extern "C"
__global__ void
magma_snrm2_adjust_kernel(float *xnorm, float *c)
{
const int tx = threadIdx.x;
__shared__ float sum[ BLOCK_SIZE ];
float temp;
temp = MAGMA_S_ABS( c[tx] ) / xnorm[0];
sum[tx] = -temp * temp;
magma_sum_reduce_n( blockDim.x, tx, sum );
__syncthreads();
if (tx == 0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assuming that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_snrm2_adjust(magma_int_t k, magmaFloat_ptr dxnorm, magmaFloat_ptr dc)
{
magma_snrm2_adjust_kernel<<< 1, k, 0, magma_stream >>> (dxnorm, dc);
}
//==============================================================================
#define BS 256
__global__ void
magma_snrm2_row_check_adjust_kernel(
int n, float tol, float *xnorm, float *xnorm2,
float *C, int ldc, float *lsticc)
{
const int tx = threadIdx.x + blockIdx.x*BS;
lsticc[tx+1] = 0;
if (tx < n) {
float temp = MAGMA_S_ABS( C[tx*ldc] ) / xnorm[tx];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
float temp2 = xnorm[tx] / xnorm2[tx];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[tx+1] = 1;
} else {
xnorm[tx] *= sqrt(temp);
}
}
if (tx == 0)
lsticc[0] = 0;
magma_sum_reduce_n( blockDim.x, tx, lsticc );
}
/*
Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that
C was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_snrm2_row_check_adjust(
magma_int_t k, float tol,
magmaFloat_ptr dxnorm,
magmaFloat_ptr dxnorm2,
magmaFloat_ptr dC, magma_int_t lddc,
magmaFloat_ptr dlsticc)
{
int nblocks = (k+BS-1)/BS;
magma_snrm2_row_check_adjust_kernel<<< nblocks, BS >>> (k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc);
}
//==============================================================================
/*
Compute the snrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_snrm2_cols(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_snrm2_kernel<<< blocks, threads, 0, magma_stream >>>( m, dA, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_snrm2_sm(m, n, dA, ldda, dxnorm);
}
//==============================================================================
|
f62ebbeaa0cffa410935d4695c839126d625acca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
// Defining number of elements in Array
#define N 50000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
hipStream_t stream0, stream1;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
hipEvent_t e_start, e_stop;
hipEventCreate(&e_start);
hipEventCreate(&e_stop);
hipEventRecord(e_start, 0);
hipHostMalloc((void **)&h_a, 2 * N * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void **)&h_b, 2 * N * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void **)&h_c, 2 * N * sizeof(int), hipHostMallocDefault);
hipMalloc((void **)&d_a0, N * sizeof(int));
hipMalloc((void **)&d_b0, N * sizeof(int));
hipMalloc((void **)&d_c0, N * sizeof(int));
hipMalloc((void **)&d_a1, N * sizeof(int));
hipMalloc((void **)&d_b1, N * sizeof(int));
hipMalloc((void **)&d_c1, N * sizeof(int));
for (int i = 0; i < N * 2; i++) {
h_a[i] = 2 * i * i;
h_b[i] = i;
}
hipMemcpyAsync(d_a0, h_a, N * sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_a1, h_a + N, N * sizeof(int), hipMemcpyHostToDevice,
stream1);
hipMemcpyAsync(d_b0, h_b, N * sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_b1, h_b + N, N * sizeof(int), hipMemcpyHostToDevice,
stream1);
hipLaunchKernelGGL(( gpuAdd), dim3(512), dim3(512), 0, stream0, d_a0, d_b0, d_c0);
hipLaunchKernelGGL(( gpuAdd), dim3(512), dim3(512), 0, stream1, d_a1, d_b1, d_c1);
hipMemcpyAsync(h_c, d_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0);
hipMemcpyAsync(h_c + N, d_c1, N * sizeof(int), hipMemcpyDeviceToHost,
stream0);
hipDeviceSynchronize();
hipStreamSynchronize(stream0);
hipStreamSynchronize(stream1);
hipEventRecord(e_stop, 0);
hipEventSynchronize(e_stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, e_start, e_stop);
printf("Time to add %d numbers: %3.1f ms\n", 2 * N, elapsedTime);
int Correct = 1;
printf("Vector addition on GPU \n");
// Printing result on console
for (int i = 0; i < 2 * N; i++) {
if ((h_a[i] + h_b[i] != h_c[i])) {
Correct = 0;
}
}
if (Correct == 1) {
printf("GPU has computed Sum Correctly\n");
} else {
printf("There is an Error in GPU Computation\n");
}
// Free up memory
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
return 0;
}
|
f62ebbeaa0cffa410935d4695c839126d625acca.cu
|
#include "stdio.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
// Defining number of elements in Array
#define N 50000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaEvent_t e_start, e_stop;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaEventRecord(e_start, 0);
cudaHostAlloc((void **)&h_a, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&h_b, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&h_c, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaMalloc((void **)&d_a0, N * sizeof(int));
cudaMalloc((void **)&d_b0, N * sizeof(int));
cudaMalloc((void **)&d_c0, N * sizeof(int));
cudaMalloc((void **)&d_a1, N * sizeof(int));
cudaMalloc((void **)&d_b1, N * sizeof(int));
cudaMalloc((void **)&d_c1, N * sizeof(int));
for (int i = 0; i < N * 2; i++) {
h_a[i] = 2 * i * i;
h_b[i] = i;
}
cudaMemcpyAsync(d_a0, h_a, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a + N, N * sizeof(int), cudaMemcpyHostToDevice,
stream1);
cudaMemcpyAsync(d_b0, h_b, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_b1, h_b + N, N * sizeof(int), cudaMemcpyHostToDevice,
stream1);
gpuAdd<<<512, 512, 0, stream0>>>(d_a0, d_b0, d_c0);
gpuAdd<<<512, 512, 0, stream1>>>(d_a1, d_b1, d_c1);
cudaMemcpyAsync(h_c, d_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c + N, d_c1, N * sizeof(int), cudaMemcpyDeviceToHost,
stream0);
cudaDeviceSynchronize();
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(e_stop, 0);
cudaEventSynchronize(e_stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, e_start, e_stop);
printf("Time to add %d numbers: %3.1f ms\n", 2 * N, elapsedTime);
int Correct = 1;
printf("Vector addition on GPU \n");
// Printing result on console
for (int i = 0; i < 2 * N; i++) {
if ((h_a[i] + h_b[i] != h_c[i])) {
Correct = 0;
}
}
if (Correct == 1) {
printf("GPU has computed Sum Correctly\n");
} else {
printf("There is an Error in GPU Computation\n");
}
// Free up memory
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
590a93520eabf12d18be1c68aa65ff9c5dab2041.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
#include "skelft.h"
#include <cstdio>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
using namespace std;
// Parameters for CUDA kernel executions; more or less optimized for a 1024x1024 image.
#define BLOCKX 16
#define BLOCKY 16
#define BLOCKSIZE 64
#define TILE_DIM 32
#define BLOCK_ROWS 16
/****** Global Variables *******/
const int NB = 7; // Nr buffers we use and store in the entire framework
short2 **pbaTextures; // Work buffers used to compute and store resident images
// 0: work buffer
// 1: FT
// 2: thresholded DT or exact floating-point DT
// 3: thresholded skeleton
// 4: topology analysis
// 5: work buffer for topology
// 6: skeleton FT
//
float* pbaTexSiteParam; // Stores boundary parameterization
float2* pbaTexAdvectSites;
float* pbaTexDensityData; // The density map of the sites
float2* pbaTexGradientData; // The gradient of the density map
int pbaTexSize; // Texture size (squared) actually used in all computations
int floodBand = 4, // Various FT computation parameters; defaults are good for an 1024x1024 image.
maurerBand = 4,
colorBand = 4;
texture<short2> pbaTexColor; // 2D textures (bound to various buffers defined above as needed)
texture<short2> pbaTexColor2;
texture<short2> pbaTexLinks;
texture<float> pbaTexParam; // 1D site parameterization texture (bound to pbaTexSiteParam)
texture<unsigned char>
pbaTexGray; // 2D texture of unsigned char values, e.g. the binary skeleton
texture<float2> pbaTexAdvect;
texture<float,hipTextureType2D,hipReadModeElementType> pbaTexDensity;
texture<float2,hipTextureType2D,hipReadModeElementType> pbaTexGradient;
texture<float,hipTextureType2D,hipReadModeElementType> pbaTexDT;
__device__ bool fill_gc; //Indicates if a fill-sweep did fill anything or not
#if __CUDA_ARCH__ < 110 // We cannot use atomic intrinsics on SM10 or below. Thus, we define these as nop.
#define atomicInc(a,b) 0 // The default will be that some code e.g. endpoint detection will thus not do anything.
#endif
/********* Kernels ********/
#include "skelftkernel.h"
// Initialize necessary memory (CPU/GPU sides)
// - textureSize: The max size of any image we will process until re-initialization
void skelft2DInitialization(int maxTexSize)
{
int pbaMemSize = maxTexSize * maxTexSize * sizeof(short2); // A buffer has 2 shorts / pixel
pbaTextures = (short2**) malloc(NB * sizeof(short2*)); // We will use NB buffers
for(int i=0;i<NB;++i)
hipMalloc((void **) &pbaTextures[i], pbaMemSize); // Allocate work buffer 'i' (on GPU)
hipMalloc((void**) &pbaTexSiteParam, maxTexSize * maxTexSize * sizeof(float)); // Sites texture (for FT)
hipMalloc((void**) &pbaTexAdvectSites, maxTexSize * maxTexSize * sizeof(float2)); // Sites 2D-coords
hipMalloc((void**) &pbaTexDensityData, maxTexSize * maxTexSize * sizeof(float));
hipMalloc((void**) &pbaTexGradientData, maxTexSize * maxTexSize * sizeof(float2));
}
// Deallocate all allocated memory
void skelft2DDeinitialization()
{
for(int i=0;i<NB;++i) hipFree(pbaTextures[i]);
hipFree(pbaTexSiteParam);
hipFree(pbaTexAdvectSites);
hipFree(pbaTexDensityData);
hipFree(pbaTexGradientData);
free(pbaTextures);
}
// Initialize the Voronoi textures from the sites' encoding texture (parameterization)
// REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this
__global__ void kernelSiteParamInit(short2* inputVoro, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) // Careful not to go outside the image..
{
int i = TOID(tx,ty,size);
// The sites-param has non-zero (parameter) values precisely on boundary points
float param = tex1Dfetch(pbaTexParam,i);
short2& v = inputVoro[i];
// Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT.
v.x = v.y = MARKER;
// These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity)
// We could use an if-then-else here, but it's faster with an if-then
if (param>0)
{
v.x = tx;
v.y = ty;
}
}
}
void skelft2DInitializeInput(float* sites, int size) // Copy input sites from CPU to GPU; Also set up site param initialization in pbaTextures[0]
{
pbaTexSize = size; // Size of the actual texture being used in this run; can be smaller than the max-tex-size
// which was used in skelft2DInitialization()
hipMemcpy(pbaTexSiteParam, sites, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice);
// Pass sites parameterization to CUDA. Must be done before calling the initialization
// kernel, since we use the sites-param as a texture in that kernel
hipBindTexture(0, pbaTexParam, pbaTexSiteParam); // Bind the sites-param as a 1D texture so we can quickly index it next
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipLaunchKernelGGL(( kernelSiteParamInit), dim3(grid),dim3(block), 0, 0, pbaTextures[0],pbaTexSize); // Do the site param initialization. This sets up pbaTextures[0]
hipUnbindTexture(pbaTexParam);
}
// In-place transpose a squared texture.
// Block orders are modified to optimize memory access.
// Point coordinates are also swapped.
void pba2DTranspose(short2 *texture)
{
dim3 block(TILE_DIM, BLOCK_ROWS);
dim3 grid(pbaTexSize / TILE_DIM, pbaTexSize / TILE_DIM);
hipBindTexture(0, pbaTexColor, texture);
hipLaunchKernelGGL(( kernelTranspose), dim3(grid), dim3(block) , 0, 0, texture, pbaTexSize);
hipUnbindTexture(pbaTexColor);
}
// Phase 1 of PBA. m1 must divides texture size
void pba2DPhase1(int m1, short xm, short ym, short xM, short yM)
{
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3(pbaTexSize / block.x, m1);
// Flood vertically in their own bands
hipBindTexture(0, pbaTexColor, pbaTextures[0]);
hipLaunchKernelGGL(( kernelFloodDown), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, pbaTexSize / m1);
hipUnbindTexture(pbaTexColor);
hipBindTexture(0, pbaTexColor, pbaTextures[1]);
hipLaunchKernelGGL(( kernelFloodUp), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, pbaTexSize / m1);
// Passing information between bands
grid = dim3(pbaTexSize / block.x, m1);
hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m1);
hipBindTexture(0, pbaTexLinks, pbaTextures[0]);
hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, m1, pbaTexSize / m1);
hipUnbindTexture(pbaTexLinks);
hipUnbindTexture(pbaTexColor);
}
// Phase 2 of PBA. m2 must divides texture size
void pba2DPhase2(int m2)
{
// Compute proximate points locally in each band
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3(pbaTexSize / block.x, m2);
hipBindTexture(0, pbaTexColor, pbaTextures[1]);
hipLaunchKernelGGL(( kernelProximatePoints), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m2);
hipBindTexture(0, pbaTexLinks, pbaTextures[0]);
hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m2);
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2) {
grid = dim3(pbaTexSize / block.x, noBand / 2);
hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / noBand);
}
// Replace the forward link with the X coordinate of the seed to remove
// the need of looking at the other texture. We need it for coloring.
grid = dim3(pbaTexSize / block.x, pbaTexSize);
hipLaunchKernelGGL(( kernelDoubleToSingleList), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize);
hipUnbindTexture(pbaTexLinks);
hipUnbindTexture(pbaTexColor);
}
// Phase 3 of PBA. m3 must divides texture size
void pba2DPhase3(int m3)
{
dim3 block = dim3(BLOCKSIZE / m3, m3);
dim3 grid = dim3(pbaTexSize / block.x);
hipBindTexture(0, pbaTexColor, pbaTextures[0]);
hipLaunchKernelGGL(( kernelColor), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize);
hipUnbindTexture(pbaTexColor);
}
void skel2DFTCompute(short xm, short ym, short xM, short yM, int floodBand, int maurerBand, int colorBand)
{
pba2DPhase1(floodBand,xm,ym,xM,yM); //Vertical sweep
pba2DTranspose(pbaTextures[1]); //
pba2DPhase2(maurerBand); //Horizontal coloring
pba2DPhase3(colorBand); //Row coloring
pba2DTranspose(pbaTextures[1]);
}
__global__ void kernelThresholdDT(unsigned char* output, int size, float threshold2, short xm, short ym, short xM, short yM)
//Input: pbaTexColor: closest-site-ids per pixel, i.e. FT
//Output: output: thresholded DT
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y);
output[id] = (d2<=threshold2); //threshold DT into binary image
}
}
__global__ void kernelDT(float* output, int size, short xm, short ym, short xM, short yM)
//Input: pbaTexColor: closest-site-ids per pixel, i.e. FT
//Output: output: DT
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y);
output[id] = sqrtf(d2); //save the Euclidean DT
}
}
__global__ void kernelSkel(unsigned char* output, short xm, short ym,
short xM, short yM, short size, float threshold, float length)
//Input: pbaTexColor: closest-site-ids per pixel
// pbaTexParam: labels for sites (only valid at site locations)
{ //Output: output: binary thresholded skeleton
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM)
{
int id = TOID(tx, ty, size);
int Id = id;
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
int id2 = TOID(voroid.x,voroid.y,size); //convert the site's coord to an index into pbaTexParam[], the site-label-texture
float imp = tex1Dfetch(pbaTexParam,id2); //get the site's label
++id; //TOID(tx+1,ty,size)
voroid = tex1Dfetch(pbaTexColor,id); //
id2 = TOID(voroid.x,voroid.y,size); //
float imp_r = tex1Dfetch(pbaTexParam,id2); //
id += size-1; //TOID(tx,ty+1,size)
voroid = tex1Dfetch(pbaTexColor,id); //
id2 = TOID(voroid.x,voroid.y,size); //
float imp_u = tex1Dfetch(pbaTexParam,id2); //
float imp_dx = fabsf(imp_r-imp);
float imp_dy = fabsf(imp_u-imp);
float Imp = max(imp_dx,imp_dy);
Imp = min(Imp,fabsf(length-Imp));
if (Imp>=threshold) output[Id] = 1; //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive)
}
//WARNING: this kernel may sometimes creates 2-pixel-thick branches.. Study the AFMM original code to see if this is correct.
}
#define X 1
__constant__ const //REMARK: put following constants (for kernelTopology) in CUDA constant-memory, as this gives a huge speed difference
unsigned char topo_patterns[][9] = { {0,0,0, //These are the 3x3 templates that we use to detect skeleton endpoints
0,X,0, //(with four 90-degree rotations for each)
0,X,0},
{0,0,0,
0,X,0,
0,0,X},
{0,0,0,
0,X,0,
0,X,X},
{0,0,0,
0,X,0,
X,X,0}
};
#define topo_NPATTERNS 4 //Number of patterns we try to match (for kernelTopology)
//REMARK: #define faster than __constant__
__constant__ const unsigned char topo_rot[][9] = { {0,1,2,3,4,5,6,7,8}, {2,5,8,1,4,7,0,3,6}, {8,7,6,5,4,3,2,1,0}, {6,3,0,7,4,1,8,5,2} };
//These encode the four 90-degree rotations of the patterns (for kernelTopology);
__device__ unsigned int topo_gc = 0;
__device__ unsigned int topo_gc_last = 0;
__global__ void kernelTopology(unsigned char* output, short2* output_set, short xm, short ym, short xM, short yM, short size, int maxpts)
{
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned char t[9];
if (tx>xm && ty>ym && tx<xM-1 && ty<yM-1) //careful not to index outside the image; take into account the template size too
{
int id = TOID(tx, ty, size);
unsigned char p = tex1Dfetch(pbaTexGray,id); //get the skeleton pixel at tx,ty
if (p) //if the pixel isn't skeleton, nothing to do
{
unsigned char idx=0;
for(int j=ty-1;j<=ty+1;++j) //read the template into t[] for easier use
{
int id = TOID(tx-1, j, size);
for(int i=0;i<=2;++i,++id,++idx)
t[idx] = tex1Dfetch(pbaTexGray,id); //get the 3x3 template centered at the skel point tx,ty
}
for(unsigned char r=0;r<4;++r) //try to match all rotations of a pattern:
{
const unsigned char* rr = topo_rot[r];
for(unsigned char p=0;p<topo_NPATTERNS;++p) //try to match all patterns:
{
const unsigned char* pat = topo_patterns[p];
unsigned char j = (p==0)? 0 : 7; //Speedup: for all patterns except 1st, check only last 3 entries, the first 6 are identical for all patterns
for(;j<9;++j) //try to match rotated pattern vs actual pattern
if (pat[j]!=t[rr[j]]) break; //this rotation failed
if (j<6) break; //Speedup: if we have a mismatch on the 1st 6 pattern entries, then none of the patterns can match
// since all templates have the same first 6 entries.
if (j==9) //this rotation succeeded: mark the pixel as a topology event and we're done
{
int crt_gc = atomicInc(&topo_gc,maxpts); //REMARK: this serializes (compacts) all detected endpoints in one array.
output_set[crt_gc] = make_short2(tx,ty); //To do this, we use an atomic read-increment-return on a global counter,
//which is guaranteed to give all threads unique consecutive indexes in the array.
output[id] = 1; //Also create the topology image
return;
}
}
}
}
}
else //Last thread: add zero-marker to the output point-set, so the reader knows how many points are really in there
if (tx==xM-1 && ty==yM-1) //Also reset the global vector counter topo_gc, for the next parallel-run of this function
{ topo_gc_last = topo_gc; topo_gc = 0; } //We do this in the last thread so that no one modifies topo_gc from now on.
//REMARK: this seems to be the only way I can read a __device__ variable back to the CPU
}
void skelft2DParams(int floodBand_, int maurerBand_, int colorBand_) //Set up some params of the FT algorithm
{
floodBand = floodBand_;
maurerBand = maurerBand_;
colorBand = colorBand_;
}
// Compute 2D FT / Voronoi diagram of a set of sites
// siteParam: Site parameterization. 0 = non-site points; >0 = site parameter value.
// output: FT. The (x,y) at (i,j) are the coords of the closest site to (i,j)
// size: Texture size (pow 2)
void skelft2DFT(short* output, float* siteParam, short xm, short ym, short xM, short yM, int size)
{
skelft2DInitializeInput(siteParam,size); // Initialization of already-allocated data structures
skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); // Compute FT
// Copy FT to CPU, if required
if (output) hipMemcpy(output, pbaTextures[1], size*size*sizeof(short2), hipMemcpyDeviceToHost);
}
void skelft2DDT(float* outputDT, short xm, short ym, short xM, short yM) //Compute exact DT from resident FT (in pbaTextures[1])
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from
xm = ym = 0; xM = yM = pbaTexSize-1;
hipLaunchKernelGGL(( kernelDT) , dim3(grid), dim3(block) , 0, 0, (float*)pbaTextures[2], pbaTexSize, xm-1, ym-1, xM+1, yM+1);
hipUnbindTexture(pbaTexColor);
//Copy DT to CPU
if (outputDT) hipMemcpy(outputDT, pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost);
}
void skelft2DDT(short* outputDT, float threshold, //Compute (thresholded) DT (into pbaTextures[2]) from resident FT (in pbaTextures[1])
short xm, short ym, short xM, short yM)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from
if (threshold>=0)
{
xm -= (short)threshold; if (xm<0) xm=0;
ym -= (short)threshold; if (ym<0) ym=0;
xM += (short)threshold; if (xM>pbaTexSize-1) xM=pbaTexSize-1;
yM += (short)threshold; if (yM>pbaTexSize-1) yM=pbaTexSize-1;
hipLaunchKernelGGL(( kernelThresholdDT), dim3(grid), dim3(block) , 0, 0, (unsigned char*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1);
hipUnbindTexture(pbaTexColor);
//Copy thresholded image to CPU
if (outputDT) hipMemcpy(outputDT, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost);
}
}
void skelft2DSkeleton(unsigned char* outputSkel, float length, float threshold, //Compute thresholded skeleton (into pbaTextures[3]) from resident FT (in pbaTextures[1])
short xm,short ym,short xM,short yM)
{ //length: boundary length
dim3 block = dim3(BLOCKX,BLOCKY); //threshold: skeleton importance min-value (below this, we ignore branches)
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the resident FT
hipBindTexture(0, pbaTexParam, pbaTexSiteParam); //Used to read the resident boundary parameterization
hipMemset(pbaTextures[3],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel)
hipLaunchKernelGGL(( kernelSkel), dim3(grid), dim3(block) , 0, 0, (unsigned char*)pbaTextures[3], xm, ym, xM-1, yM-1, pbaTexSize, threshold, length);
hipUnbindTexture(pbaTexColor);
hipUnbindTexture(pbaTexParam);
//Copy skeleton to CPU
if (outputSkel) hipMemcpy(outputSkel, pbaTextures[3], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost);
}
void skelft2DTopology(unsigned char* outputTopo, int* npts, short* outputPoints, //Compute topology-points of the resident skeleton (in pbaTextures[3])
short xm,short ym,short xM,short yM)
{
int maxpts = (npts)? *npts : pbaTexSize*pbaTexSize; //This is the max # topo-points we are going to return in outputPoints[]
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0, pbaTexGray, pbaTextures[3]); //Used to read the resident skeleton
hipMemset(pbaTextures[4],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel)
unsigned int zero = 0;
hipMemcpyToSymbol(topo_gc,&zero,sizeof(unsigned int),0,hipMemcpyHostToDevice); //Set topo_gc to 0
hipLaunchKernelGGL(( kernelTopology), dim3(grid), dim3(block) , 0, 0, (unsigned char*)pbaTextures[4], pbaTextures[5], xm, ym, xM, yM, pbaTexSize, maxpts+1);
hipUnbindTexture(pbaTexGray);
if (outputPoints && maxpts) //If output-point vector desired, copy the end-points, put in pbaTexture[5] as a vector of short2's,
{ //into caller space. We copy only 'maxpts' elements, as the user instructed us.
unsigned int num_pts;
hipMemcpyFromSymbol(&num_pts,topo_gc_last,sizeof(unsigned int),0,hipMemcpyDeviceToHost); //Get #topo-points we have detected from the device-var from CUDA
if (npts && num_pts) //Copy the topo-points to caller
hipMemcpy(outputPoints,pbaTextures[5],num_pts*sizeof(short2),hipMemcpyDeviceToHost);
if (npts) *npts = num_pts; //Return #detected topo-points to caller
}
if (outputTopo) //If topology image desired, copy it into user space
hipMemcpy(outputTopo,pbaTextures[4],pbaTexSize*pbaTexSize*sizeof(unsigned char), hipMemcpyDeviceToHost);
}
__global__ void kernelSiteFromSkeleton(short2* outputSites, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization)
{ //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) //Careful not to go outside the image..
{
int i = TOID(tx,ty,size);
unsigned char param = tex1Dfetch(pbaTexGray,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points
short2& v = outputSites[i];
v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT.
if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity)
{ //We could use an if-then-else here, but it's faster with an if-then
v.x = tx; v.y = ty;
}
}
}
__global__ void kernelSkelInterpolate(float* output, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) //Careful not to go outside the image..
{
int id = TOID(tx, ty, size);
short2 vid = tex1Dfetch(pbaTexColor,id);
float T = sqrtf((tx-vid.x)*(tx-vid.x)+(ty-vid.y)*(ty-vid.y));
short2 vid2 = tex1Dfetch(pbaTexColor2,id);
float D = sqrtf((tx-vid2.x)*(tx-vid2.x)+(ty-vid2.y)*(ty-vid2.y));
float B = ((D)? min(T/2/D,0.5f):0.5) + 0.5*((T)? max(1-D/T,0.0f):0);
output[id] = pow(B,0.2f);
}
}
void skel2DSkeletonDT(float* outputSkelDT,short xm,short ym,short xM,short yM)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton
hipLaunchKernelGGL(( kernelSiteFromSkeleton), dim3(grid),dim3(block), 0, 0, pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray
hipUnbindTexture(pbaTexGray);
//Must first save pbaTextures[1] since we may need it later..
hipMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice);
skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6]
hipMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice);
hipMemcpy(pbaTextures[1],pbaTextures[5],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice);
//Compute interpolation
hipBindTexture(0,pbaTexColor,pbaTextures[1]); // FT of boundary
hipBindTexture(0,pbaTexColor2,pbaTextures[6]); // FT of skeleton
hipLaunchKernelGGL(( kernelSkelInterpolate), dim3(grid),dim3(block), 0, 0, (float*)pbaTextures[0],pbaTexSize);
hipUnbindTexture(pbaTexColor);
hipUnbindTexture(pbaTexColor2);
if (outputSkelDT) hipMemcpy(outputSkelDT, pbaTextures[0], pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost);
}
__global__ void kernelFill(unsigned char* output, int size, unsigned char bg, unsigned char fg, short xm, short ym, short xM, short yM, bool ne)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id0 = TOID(tx, ty, size);
unsigned char val = tex1Dfetch(pbaTexGray,id0); //
if (val==fg) //do we have a filled pixel? Then fill all to left/top/up/bottom of it which is background
{
bool fill = false;
int id = id0;
if (ne) //fill in north-east direction:
{
for(short x=tx+1;x<xM;++x) //REMARK: here and below, the interesting thing is that it's faster, by about 10-15%, to fill a whole
{ // scanline rather than oly until the current block's borders (+1). The reason is that filling a whole
// scanline decreases the total #sweeps, which seems to be the limiting speed factor
if (tex1Dfetch(pbaTexGray,++id)!=bg) break;
output[id] = fg; fill = true;
}
id = id0;
for(short y=ty-1;y>ym;--y)
{
if (tex1Dfetch(pbaTexGray,id-=size)!=bg) break;
output[id] = fg; fill = true;
}
}
else //fill in south-west direction:
{
for(short x=tx-1;x>xm;--x)
{
if (tex1Dfetch(pbaTexGray,--id)!=bg) break;
output[id] = fg; fill = true;
}
id = id0;
for(short y=ty+1;y<yM;++y)
{
if (tex1Dfetch(pbaTexGray,id+=size)!=bg) break;
output[id] = fg; fill = true;
}
}
if (fill) fill_gc = true; //if we filled anything, inform caller; we 'gather' this info from a local var into the
//global var here, since it's faster than writing the global var in the for loops
}
}
}
__global__ void kernelFillHoles(unsigned char* output, int size, unsigned char bg, unsigned char fg, unsigned char fill_fg)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>=0 && ty>=0 && tx<size && ty<size) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
unsigned char val = tex1Dfetch(pbaTexGray,id); //
if (val==fill_fg)
output[id] = bg;
else if (val==bg)
output[id] = fg;
}
}
int skelft2DFill(unsigned char* outputFill, short sx, short sy, short xm, short ym, short xM, short yM, unsigned char fill_value)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
unsigned char background;
int id = sy * pbaTexSize + sx;
hipMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),hipMemcpyDeviceToHost); //See which is the value we have to fill from (sx,sy)
hipMemset(((unsigned char*)pbaTextures[2])+id,fill_value,sizeof(unsigned char)); //Fill the seed (x,y) on the GPU
hipBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT
int iter=0;
bool xy = true; //Direction of filling for current sweep: either north-east or south-west
//This kind of balances the memory-accesses nicely over kernel calls
for(;;++iter,xy=!xy) //Keep filling a sweep at a time until we have no background pixels anymore
{
bool filled = false; //Initialize flag: we didn't fill anything in this sweep
hipMemcpyToSymbol(fill_gc,&filled,sizeof(bool),0,hipMemcpyHostToDevice); //Pass flag to CUDA
hipLaunchKernelGGL(( kernelFill), dim3(grid), dim3(block), 0, 0, (unsigned char*)pbaTextures[2],pbaTexSize,background,fill_value,xm,ym,xM,yM,xy);
//One fill sweep
hipMemcpyFromSymbol(&filled,fill_gc,sizeof(bool),0,hipMemcpyDeviceToHost); //See if we filled anything in this sweep
if (!filled) break; //Nothing filled? Then we're done, the image didn't change
}
hipUnbindTexture(pbaTexGray);
if (outputFill) hipMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost);
return iter; //Return #iterations done for the fill - useful as a performance measure for caller
}
int skelft2DFillHoles(unsigned char* outputFill, short sx, short sy, unsigned char foreground)
{
unsigned char background;
unsigned char fill_value = 128;
int id = sy * pbaTexSize + sx;
hipMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),hipMemcpyDeviceToHost); //See which is the value at (sx,sy)
int iter = skelft2DFill(0,sx,sy,0,0,pbaTexSize,pbaTexSize,fill_value); //First, fill the background surrounding the image with some special value
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT
hipLaunchKernelGGL(( kernelFillHoles), dim3(grid), dim3(block), 0, 0, (unsigned char*)pbaTextures[2],pbaTexSize,background,foreground,fill_value);
hipUnbindTexture(pbaTexGray);
if (outputFill) hipMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost);
return iter;
}
//-------- Advection-related code ----------------------------------------------------------------------
void advect2DSetSites(float* sites, int nsites)
{
hipMemcpy(pbaTexAdvectSites, sites, nsites * sizeof(float2), hipMemcpyHostToDevice); //Copy side-coords from CPU->GPU
}
//#define KERNEL(radius,dist) expf(-10*dist/radius)
#define KERNEL(radius,dist) ((radius-dist)/radius)
__global__ void kernelSplat(float* output, int size, int numpts, float d)
//REMARK: This could be done tens of times faster using OpenGL (draw point-sprites with a 2D distance-kernel texture)
{
int offs = blockIdx.x * blockDim.x + threadIdx.x;
if (offs < numpts) //careful not to index outside the site-vector..
{
float2 p = tex1Dfetch(pbaTexAdvect,offs); //splat the site whose coords are at location offs in pbaTexAdvect[]
float d2 = d*d;
short tx = p.x, ty = p.y; //REMARK: if I make these float, then something gets ccrewed up - why?
short jmin = ty-d;
short jmax = ty+d;
for(short j=jmin;j<=jmax;++j) //bounding-box of the splat at 'skel'
{
float dy = (j-ty)*(j-ty); //precalc this for speed
short imin = tx-d;
short imax = tx+d;
int offs = __mul24(int(j),size);
for(short i=imin;i<=imax;++i) //REMARK: this could be made ~1.5x faster by computing the x-scanline intersection (min,max) with the d2-circle..
{
float r2 = dy+(i-tx)*(i-tx);
if (r2<=d2) //check we're really inside the splat
{
int id = int(i)+offs;
float val = KERNEL(d2,r2);
output[id] += val; //Accumulate density (WARNING: hope this is thread-safe!!)
//If not, I need atomicAdd() on floats which requires CUDA 2.0
}
}
}
}
}
inline __device__ float2 computeGradient(const float2& p) //Compute -gradient of density at p
{
float v_d = tex2D(pbaTexDensity,p.x,p.y-1);
float v_l = tex2D(pbaTexDensity,p.x-1,p.y);
float v_r = tex2D(pbaTexDensity,p.x+1,p.y);
float v_t = tex2D(pbaTexDensity,p.x,p.y+1);
return make_float2((v_l-v_r)/2,(v_d-v_t)/2);
}
inline __device__ float2 computeGradientExplicit(float* density, const float2& p, int size) //Compute gradient of density at p
{
int x = p.x, y = p.y;
float v_d = density[TOID(x,y-1,size)];
float v_l = density[TOID(x-1,y,size)];
float v_r = density[TOID(x+1,y,size)];
float v_t = density[TOID(x,y+1,size)];
return make_float2((v_r-v_l)/2,(v_t-v_d)/2);
}
__global__ void kernelGradient(float2* grad, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
const float eps = 0.0001;
if (tx<size && ty<size) //Careful not to go outside the image..
{
float2 p = make_float2(tx,ty);
float2 g = computeGradient(p);
float gn = sqrtf(g.x*g.x+g.y*g.y); //robustly normalize the gradient
g.x /= gn+eps; g.y /= gn+eps;
grad[TOID(tx,ty,size)] = g;
}
}
void advect2DSplat(float* output, int num_pts, float radius)
{
dim3 block = dim3(BLOCKX*BLOCKY); //Prepare the splatting kernel: this operates on a vector of 2D sites
int numpts_b = (num_pts/block.x+1)*block.x; //Find higher multiple of blocksize than # sites
dim3 grid = dim3(numpts_b/block.x);
hipMemset(pbaTexDensityData,0,sizeof(float)*pbaTexSize*pbaTexSize); //Zero the density texture
hipBindTexture(0,pbaTexAdvect,pbaTexAdvectSites); //Bind sites to a texture
hipLaunchKernelGGL(( kernelSplat), dim3(grid), dim3(block) , 0, 0, pbaTexDensityData, pbaTexSize, num_pts, radius);
//Splat kernel
hipDeviceSynchronize();
hipUnbindTexture(pbaTexAdvect); //Done with the sites
//Copy splat-map to CPU (if desired)
if (output) hipMemcpy(output, pbaTexDensityData, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost);
}
void advect2DGetSites(float* output, int num_pts) //Get sites CUDA->CPU
{
hipMemcpy(output,pbaTexAdvectSites,num_pts*sizeof(float2),hipMemcpyDeviceToHost);
}
void advect2DGradient() //Compute and save gradient of density map
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
hipMemset(pbaTexGradientData,0,sizeof(float2)*pbaTexSize*pbaTexSize); //Zero the gradient texture
pbaTexDensity.filterMode = hipFilterModeLinear; //The floating-point density map
pbaTexDensity.normalized = false;
hipChannelFormatDesc channelDesc1 = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
hipBindTexture2D(0,pbaTexDensity,pbaTexDensityData,channelDesc1,pbaTexSize,pbaTexSize,4*pbaTexSize);
hipLaunchKernelGGL(( kernelGradient), dim3(grid), dim3(block) , 0, 0, pbaTexGradientData, pbaTexSize); //Gradient kernel
hipUnbindTexture(pbaTexDensity);
}
|
590a93520eabf12d18be1c68aa65ff9c5dab2041.cu
|
#include <device_functions.h>
#include "skelft.h"
#include <cstdio>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
using namespace std;
// Parameters for CUDA kernel executions; more or less optimized for a 1024x1024 image.
#define BLOCKX 16
#define BLOCKY 16
#define BLOCKSIZE 64
#define TILE_DIM 32
#define BLOCK_ROWS 16
/****** Global Variables *******/
const int NB = 7; // Nr buffers we use and store in the entire framework
short2 **pbaTextures; // Work buffers used to compute and store resident images
// 0: work buffer
// 1: FT
// 2: thresholded DT or exact floating-point DT
// 3: thresholded skeleton
// 4: topology analysis
// 5: work buffer for topology
// 6: skeleton FT
//
float* pbaTexSiteParam; // Stores boundary parameterization
float2* pbaTexAdvectSites;
float* pbaTexDensityData; // The density map of the sites
float2* pbaTexGradientData; // The gradient of the density map
int pbaTexSize; // Texture size (squared) actually used in all computations
int floodBand = 4, // Various FT computation parameters; defaults are good for an 1024x1024 image.
maurerBand = 4,
colorBand = 4;
texture<short2> pbaTexColor; // 2D textures (bound to various buffers defined above as needed)
texture<short2> pbaTexColor2;
texture<short2> pbaTexLinks;
texture<float> pbaTexParam; // 1D site parameterization texture (bound to pbaTexSiteParam)
texture<unsigned char>
pbaTexGray; // 2D texture of unsigned char values, e.g. the binary skeleton
texture<float2> pbaTexAdvect;
texture<float,cudaTextureType2D,cudaReadModeElementType> pbaTexDensity;
texture<float2,cudaTextureType2D,cudaReadModeElementType> pbaTexGradient;
texture<float,cudaTextureType2D,cudaReadModeElementType> pbaTexDT;
__device__ bool fill_gc; //Indicates if a fill-sweep did fill anything or not
#if __CUDA_ARCH__ < 110 // We cannot use atomic intrinsics on SM10 or below. Thus, we define these as nop.
#define atomicInc(a,b) 0 // The default will be that some code e.g. endpoint detection will thus not do anything.
#endif
/********* Kernels ********/
#include "skelftkernel.h"
// Initialize necessary memory (CPU/GPU sides)
// - textureSize: The max size of any image we will process until re-initialization
void skelft2DInitialization(int maxTexSize)
{
int pbaMemSize = maxTexSize * maxTexSize * sizeof(short2); // A buffer has 2 shorts / pixel
pbaTextures = (short2**) malloc(NB * sizeof(short2*)); // We will use NB buffers
for(int i=0;i<NB;++i)
cudaMalloc((void **) &pbaTextures[i], pbaMemSize); // Allocate work buffer 'i' (on GPU)
cudaMalloc((void**) &pbaTexSiteParam, maxTexSize * maxTexSize * sizeof(float)); // Sites texture (for FT)
cudaMalloc((void**) &pbaTexAdvectSites, maxTexSize * maxTexSize * sizeof(float2)); // Sites 2D-coords
cudaMalloc((void**) &pbaTexDensityData, maxTexSize * maxTexSize * sizeof(float));
cudaMalloc((void**) &pbaTexGradientData, maxTexSize * maxTexSize * sizeof(float2));
}
// Deallocate all allocated memory
void skelft2DDeinitialization()
{
for(int i=0;i<NB;++i) cudaFree(pbaTextures[i]);
cudaFree(pbaTexSiteParam);
cudaFree(pbaTexAdvectSites);
cudaFree(pbaTexDensityData);
cudaFree(pbaTexGradientData);
free(pbaTextures);
}
// Initialize the Voronoi textures from the sites' encoding texture (parameterization)
// REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this
__global__ void kernelSiteParamInit(short2* inputVoro, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) // Careful not to go outside the image..
{
int i = TOID(tx,ty,size);
// The sites-param has non-zero (parameter) values precisely on boundary points
float param = tex1Dfetch(pbaTexParam,i);
short2& v = inputVoro[i];
// Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT.
v.x = v.y = MARKER;
// These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity)
// We could use an if-then-else here, but it's faster with an if-then
if (param>0)
{
v.x = tx;
v.y = ty;
}
}
}
void skelft2DInitializeInput(float* sites, int size) // Copy input sites from CPU to GPU; Also set up site param initialization in pbaTextures[0]
{
pbaTexSize = size; // Size of the actual texture being used in this run; can be smaller than the max-tex-size
// which was used in skelft2DInitialization()
cudaMemcpy(pbaTexSiteParam, sites, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice);
// Pass sites parameterization to CUDA. Must be done before calling the initialization
// kernel, since we use the sites-param as a texture in that kernel
cudaBindTexture(0, pbaTexParam, pbaTexSiteParam); // Bind the sites-param as a 1D texture so we can quickly index it next
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
kernelSiteParamInit<<<grid,block>>>(pbaTextures[0],pbaTexSize); // Do the site param initialization. This sets up pbaTextures[0]
cudaUnbindTexture(pbaTexParam);
}
// In-place transpose a squared texture.
// Block orders are modified to optimize memory access.
// Point coordinates are also swapped.
void pba2DTranspose(short2 *texture)
{
dim3 block(TILE_DIM, BLOCK_ROWS);
dim3 grid(pbaTexSize / TILE_DIM, pbaTexSize / TILE_DIM);
cudaBindTexture(0, pbaTexColor, texture);
kernelTranspose<<< grid, block >>>(texture, pbaTexSize);
cudaUnbindTexture(pbaTexColor);
}
// Phase 1 of PBA. m1 must divides texture size
void pba2DPhase1(int m1, short xm, short ym, short xM, short yM)
{
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3(pbaTexSize / block.x, m1);
// Flood vertically in their own bands
cudaBindTexture(0, pbaTexColor, pbaTextures[0]);
kernelFloodDown<<< grid, block >>>(pbaTextures[1], pbaTexSize, pbaTexSize / m1);
cudaUnbindTexture(pbaTexColor);
cudaBindTexture(0, pbaTexColor, pbaTextures[1]);
kernelFloodUp<<< grid, block >>>(pbaTextures[1], pbaTexSize, pbaTexSize / m1);
// Passing information between bands
grid = dim3(pbaTexSize / block.x, m1);
kernelPropagateInterband<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m1);
cudaBindTexture(0, pbaTexLinks, pbaTextures[0]);
kernelUpdateVertical<<< grid, block >>>(pbaTextures[1], pbaTexSize, m1, pbaTexSize / m1);
cudaUnbindTexture(pbaTexLinks);
cudaUnbindTexture(pbaTexColor);
}
// Phase 2 of PBA. m2 must divides texture size
void pba2DPhase2(int m2)
{
// Compute proximate points locally in each band
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3(pbaTexSize / block.x, m2);
cudaBindTexture(0, pbaTexColor, pbaTextures[1]);
kernelProximatePoints<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m2);
cudaBindTexture(0, pbaTexLinks, pbaTextures[0]);
kernelCreateForwardPointers<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m2);
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2) {
grid = dim3(pbaTexSize / block.x, noBand / 2);
kernelMergeBands<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / noBand);
}
// Replace the forward link with the X coordinate of the seed to remove
// the need of looking at the other texture. We need it for coloring.
grid = dim3(pbaTexSize / block.x, pbaTexSize);
kernelDoubleToSingleList<<< grid, block >>>(pbaTextures[0], pbaTexSize);
cudaUnbindTexture(pbaTexLinks);
cudaUnbindTexture(pbaTexColor);
}
// Phase 3 of PBA. m3 must divides texture size
void pba2DPhase3(int m3)
{
dim3 block = dim3(BLOCKSIZE / m3, m3);
dim3 grid = dim3(pbaTexSize / block.x);
cudaBindTexture(0, pbaTexColor, pbaTextures[0]);
kernelColor<<< grid, block >>>(pbaTextures[1], pbaTexSize);
cudaUnbindTexture(pbaTexColor);
}
void skel2DFTCompute(short xm, short ym, short xM, short yM, int floodBand, int maurerBand, int colorBand)
{
pba2DPhase1(floodBand,xm,ym,xM,yM); //Vertical sweep
pba2DTranspose(pbaTextures[1]); //
pba2DPhase2(maurerBand); //Horizontal coloring
pba2DPhase3(colorBand); //Row coloring
pba2DTranspose(pbaTextures[1]);
}
__global__ void kernelThresholdDT(unsigned char* output, int size, float threshold2, short xm, short ym, short xM, short yM)
//Input: pbaTexColor: closest-site-ids per pixel, i.e. FT
//Output: output: thresholded DT
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y);
output[id] = (d2<=threshold2); //threshold DT into binary image
}
}
__global__ void kernelDT(float* output, int size, short xm, short ym, short xM, short yM)
//Input: pbaTexColor: closest-site-ids per pixel, i.e. FT
//Output: output: DT
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y);
output[id] = sqrtf(d2); //save the Euclidean DT
}
}
__global__ void kernelSkel(unsigned char* output, short xm, short ym,
short xM, short yM, short size, float threshold, float length)
//Input: pbaTexColor: closest-site-ids per pixel
// pbaTexParam: labels for sites (only valid at site locations)
{ //Output: output: binary thresholded skeleton
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM)
{
int id = TOID(tx, ty, size);
int Id = id;
short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y
int id2 = TOID(voroid.x,voroid.y,size); //convert the site's coord to an index into pbaTexParam[], the site-label-texture
float imp = tex1Dfetch(pbaTexParam,id2); //get the site's label
++id; //TOID(tx+1,ty,size)
voroid = tex1Dfetch(pbaTexColor,id); //
id2 = TOID(voroid.x,voroid.y,size); //
float imp_r = tex1Dfetch(pbaTexParam,id2); //
id += size-1; //TOID(tx,ty+1,size)
voroid = tex1Dfetch(pbaTexColor,id); //
id2 = TOID(voroid.x,voroid.y,size); //
float imp_u = tex1Dfetch(pbaTexParam,id2); //
float imp_dx = fabsf(imp_r-imp);
float imp_dy = fabsf(imp_u-imp);
float Imp = max(imp_dx,imp_dy);
Imp = min(Imp,fabsf(length-Imp));
if (Imp>=threshold) output[Id] = 1; //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive)
}
//WARNING: this kernel may sometimes creates 2-pixel-thick branches.. Study the AFMM original code to see if this is correct.
}
#define X 1
__constant__ const //REMARK: put following constants (for kernelTopology) in CUDA constant-memory, as this gives a huge speed difference
unsigned char topo_patterns[][9] = { {0,0,0, //These are the 3x3 templates that we use to detect skeleton endpoints
0,X,0, //(with four 90-degree rotations for each)
0,X,0},
{0,0,0,
0,X,0,
0,0,X},
{0,0,0,
0,X,0,
0,X,X},
{0,0,0,
0,X,0,
X,X,0}
};
#define topo_NPATTERNS 4 //Number of patterns we try to match (for kernelTopology)
//REMARK: #define faster than __constant__
__constant__ const unsigned char topo_rot[][9] = { {0,1,2,3,4,5,6,7,8}, {2,5,8,1,4,7,0,3,6}, {8,7,6,5,4,3,2,1,0}, {6,3,0,7,4,1,8,5,2} };
//These encode the four 90-degree rotations of the patterns (for kernelTopology);
__device__ unsigned int topo_gc = 0;
__device__ unsigned int topo_gc_last = 0;
__global__ void kernelTopology(unsigned char* output, short2* output_set, short xm, short ym, short xM, short yM, short size, int maxpts)
{
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned char t[9];
if (tx>xm && ty>ym && tx<xM-1 && ty<yM-1) //careful not to index outside the image; take into account the template size too
{
int id = TOID(tx, ty, size);
unsigned char p = tex1Dfetch(pbaTexGray,id); //get the skeleton pixel at tx,ty
if (p) //if the pixel isn't skeleton, nothing to do
{
unsigned char idx=0;
for(int j=ty-1;j<=ty+1;++j) //read the template into t[] for easier use
{
int id = TOID(tx-1, j, size);
for(int i=0;i<=2;++i,++id,++idx)
t[idx] = tex1Dfetch(pbaTexGray,id); //get the 3x3 template centered at the skel point tx,ty
}
for(unsigned char r=0;r<4;++r) //try to match all rotations of a pattern:
{
const unsigned char* rr = topo_rot[r];
for(unsigned char p=0;p<topo_NPATTERNS;++p) //try to match all patterns:
{
const unsigned char* pat = topo_patterns[p];
unsigned char j = (p==0)? 0 : 7; //Speedup: for all patterns except 1st, check only last 3 entries, the first 6 are identical for all patterns
for(;j<9;++j) //try to match rotated pattern vs actual pattern
if (pat[j]!=t[rr[j]]) break; //this rotation failed
if (j<6) break; //Speedup: if we have a mismatch on the 1st 6 pattern entries, then none of the patterns can match
// since all templates have the same first 6 entries.
if (j==9) //this rotation succeeded: mark the pixel as a topology event and we're done
{
int crt_gc = atomicInc(&topo_gc,maxpts); //REMARK: this serializes (compacts) all detected endpoints in one array.
output_set[crt_gc] = make_short2(tx,ty); //To do this, we use an atomic read-increment-return on a global counter,
//which is guaranteed to give all threads unique consecutive indexes in the array.
output[id] = 1; //Also create the topology image
return;
}
}
}
}
}
else //Last thread: add zero-marker to the output point-set, so the reader knows how many points are really in there
if (tx==xM-1 && ty==yM-1) //Also reset the global vector counter topo_gc, for the next parallel-run of this function
{ topo_gc_last = topo_gc; topo_gc = 0; } //We do this in the last thread so that no one modifies topo_gc from now on.
//REMARK: this seems to be the only way I can read a __device__ variable back to the CPU
}
void skelft2DParams(int floodBand_, int maurerBand_, int colorBand_) //Set up some params of the FT algorithm
{
floodBand = floodBand_;
maurerBand = maurerBand_;
colorBand = colorBand_;
}
// Compute 2D FT / Voronoi diagram of a set of sites
// siteParam: Site parameterization. 0 = non-site points; >0 = site parameter value.
// output: FT. The (x,y) at (i,j) are the coords of the closest site to (i,j)
// size: Texture size (pow 2)
void skelft2DFT(short* output, float* siteParam, short xm, short ym, short xM, short yM, int size)
{
skelft2DInitializeInput(siteParam,size); // Initialization of already-allocated data structures
skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); // Compute FT
// Copy FT to CPU, if required
if (output) cudaMemcpy(output, pbaTextures[1], size*size*sizeof(short2), cudaMemcpyDeviceToHost);
}
void skelft2DDT(float* outputDT, short xm, short ym, short xM, short yM) //Compute exact DT from resident FT (in pbaTextures[1])
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from
xm = ym = 0; xM = yM = pbaTexSize-1;
kernelDT <<< grid, block >>>((float*)pbaTextures[2], pbaTexSize, xm-1, ym-1, xM+1, yM+1);
cudaUnbindTexture(pbaTexColor);
//Copy DT to CPU
if (outputDT) cudaMemcpy(outputDT, pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost);
}
void skelft2DDT(short* outputDT, float threshold, //Compute (thresholded) DT (into pbaTextures[2]) from resident FT (in pbaTextures[1])
short xm, short ym, short xM, short yM)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from
if (threshold>=0)
{
xm -= (short)threshold; if (xm<0) xm=0;
ym -= (short)threshold; if (ym<0) ym=0;
xM += (short)threshold; if (xM>pbaTexSize-1) xM=pbaTexSize-1;
yM += (short)threshold; if (yM>pbaTexSize-1) yM=pbaTexSize-1;
kernelThresholdDT<<< grid, block >>>((unsigned char*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1);
cudaUnbindTexture(pbaTexColor);
//Copy thresholded image to CPU
if (outputDT) cudaMemcpy(outputDT, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
}
}
void skelft2DSkeleton(unsigned char* outputSkel, float length, float threshold, //Compute thresholded skeleton (into pbaTextures[3]) from resident FT (in pbaTextures[1])
short xm,short ym,short xM,short yM)
{ //length: boundary length
dim3 block = dim3(BLOCKX,BLOCKY); //threshold: skeleton importance min-value (below this, we ignore branches)
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the resident FT
cudaBindTexture(0, pbaTexParam, pbaTexSiteParam); //Used to read the resident boundary parameterization
cudaMemset(pbaTextures[3],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel)
kernelSkel<<< grid, block >>>((unsigned char*)pbaTextures[3], xm, ym, xM-1, yM-1, pbaTexSize, threshold, length);
cudaUnbindTexture(pbaTexColor);
cudaUnbindTexture(pbaTexParam);
//Copy skeleton to CPU
if (outputSkel) cudaMemcpy(outputSkel, pbaTextures[3], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
}
void skelft2DTopology(unsigned char* outputTopo, int* npts, short* outputPoints, //Compute topology-points of the resident skeleton (in pbaTextures[3])
short xm,short ym,short xM,short yM)
{
int maxpts = (npts)? *npts : pbaTexSize*pbaTexSize; //This is the max # topo-points we are going to return in outputPoints[]
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0, pbaTexGray, pbaTextures[3]); //Used to read the resident skeleton
cudaMemset(pbaTextures[4],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel)
unsigned int zero = 0;
cudaMemcpyToSymbol(topo_gc,&zero,sizeof(unsigned int),0,cudaMemcpyHostToDevice); //Set topo_gc to 0
kernelTopology<<< grid, block >>>((unsigned char*)pbaTextures[4], pbaTextures[5], xm, ym, xM, yM, pbaTexSize, maxpts+1);
cudaUnbindTexture(pbaTexGray);
if (outputPoints && maxpts) //If output-point vector desired, copy the end-points, put in pbaTexture[5] as a vector of short2's,
{ //into caller space. We copy only 'maxpts' elements, as the user instructed us.
unsigned int num_pts;
cudaMemcpyFromSymbol(&num_pts,topo_gc_last,sizeof(unsigned int),0,cudaMemcpyDeviceToHost); //Get #topo-points we have detected from the device-var from CUDA
if (npts && num_pts) //Copy the topo-points to caller
cudaMemcpy(outputPoints,pbaTextures[5],num_pts*sizeof(short2),cudaMemcpyDeviceToHost);
if (npts) *npts = num_pts; //Return #detected topo-points to caller
}
if (outputTopo) //If topology image desired, copy it into user space
cudaMemcpy(outputTopo,pbaTextures[4],pbaTexSize*pbaTexSize*sizeof(unsigned char), cudaMemcpyDeviceToHost);
}
__global__ void kernelSiteFromSkeleton(short2* outputSites, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization)
{ //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) //Careful not to go outside the image..
{
int i = TOID(tx,ty,size);
unsigned char param = tex1Dfetch(pbaTexGray,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points
short2& v = outputSites[i];
v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT.
if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity)
{ //We could use an if-then-else here, but it's faster with an if-then
v.x = tx; v.y = ty;
}
}
}
__global__ void kernelSkelInterpolate(float* output, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx<size && ty<size) //Careful not to go outside the image..
{
int id = TOID(tx, ty, size);
short2 vid = tex1Dfetch(pbaTexColor,id);
float T = sqrtf((tx-vid.x)*(tx-vid.x)+(ty-vid.y)*(ty-vid.y));
short2 vid2 = tex1Dfetch(pbaTexColor2,id);
float D = sqrtf((tx-vid2.x)*(tx-vid2.x)+(ty-vid2.y)*(ty-vid2.y));
float B = ((D)? min(T/2/D,0.5f):0.5) + 0.5*((T)? max(1-D/T,0.0f):0);
output[id] = pow(B,0.2f);
}
}
void skel2DSkeletonDT(float* outputSkelDT,short xm,short ym,short xM,short yM)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton
kernelSiteFromSkeleton<<<grid,block>>>(pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray
cudaUnbindTexture(pbaTexGray);
//Must first save pbaTextures[1] since we may need it later..
cudaMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice);
skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6]
cudaMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice);
cudaMemcpy(pbaTextures[1],pbaTextures[5],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice);
//Compute interpolation
cudaBindTexture(0,pbaTexColor,pbaTextures[1]); // FT of boundary
cudaBindTexture(0,pbaTexColor2,pbaTextures[6]); // FT of skeleton
kernelSkelInterpolate<<<grid,block>>>((float*)pbaTextures[0],pbaTexSize);
cudaUnbindTexture(pbaTexColor);
cudaUnbindTexture(pbaTexColor2);
if (outputSkelDT) cudaMemcpy(outputSkelDT, pbaTextures[0], pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void kernelFill(unsigned char* output, int size, unsigned char bg, unsigned char fg, short xm, short ym, short xM, short yM, bool ne)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image..
{
int id0 = TOID(tx, ty, size);
unsigned char val = tex1Dfetch(pbaTexGray,id0); //
if (val==fg) //do we have a filled pixel? Then fill all to left/top/up/bottom of it which is background
{
bool fill = false;
int id = id0;
if (ne) //fill in north-east direction:
{
for(short x=tx+1;x<xM;++x) //REMARK: here and below, the interesting thing is that it's faster, by about 10-15%, to fill a whole
{ // scanline rather than oly until the current block's borders (+1). The reason is that filling a whole
// scanline decreases the total #sweeps, which seems to be the limiting speed factor
if (tex1Dfetch(pbaTexGray,++id)!=bg) break;
output[id] = fg; fill = true;
}
id = id0;
for(short y=ty-1;y>ym;--y)
{
if (tex1Dfetch(pbaTexGray,id-=size)!=bg) break;
output[id] = fg; fill = true;
}
}
else //fill in south-west direction:
{
for(short x=tx-1;x>xm;--x)
{
if (tex1Dfetch(pbaTexGray,--id)!=bg) break;
output[id] = fg; fill = true;
}
id = id0;
for(short y=ty+1;y<yM;++y)
{
if (tex1Dfetch(pbaTexGray,id+=size)!=bg) break;
output[id] = fg; fill = true;
}
}
if (fill) fill_gc = true; //if we filled anything, inform caller; we 'gather' this info from a local var into the
//global var here, since it's faster than writing the global var in the for loops
}
}
}
__global__ void kernelFillHoles(unsigned char* output, int size, unsigned char bg, unsigned char fg, unsigned char fill_fg)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx>=0 && ty>=0 && tx<size && ty<size) //careful not to index outside the image..
{
int id = TOID(tx, ty, size);
unsigned char val = tex1Dfetch(pbaTexGray,id); //
if (val==fill_fg)
output[id] = bg;
else if (val==bg)
output[id] = fg;
}
}
int skelft2DFill(unsigned char* outputFill, short sx, short sy, short xm, short ym, short xM, short yM, unsigned char fill_value)
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
unsigned char background;
int id = sy * pbaTexSize + sx;
cudaMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),cudaMemcpyDeviceToHost); //See which is the value we have to fill from (sx,sy)
cudaMemset(((unsigned char*)pbaTextures[2])+id,fill_value,sizeof(unsigned char)); //Fill the seed (x,y) on the GPU
cudaBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT
int iter=0;
bool xy = true; //Direction of filling for current sweep: either north-east or south-west
//This kind of balances the memory-accesses nicely over kernel calls
for(;;++iter,xy=!xy) //Keep filling a sweep at a time until we have no background pixels anymore
{
bool filled = false; //Initialize flag: we didn't fill anything in this sweep
cudaMemcpyToSymbol(fill_gc,&filled,sizeof(bool),0,cudaMemcpyHostToDevice); //Pass flag to CUDA
kernelFill<<<grid, block>>>((unsigned char*)pbaTextures[2],pbaTexSize,background,fill_value,xm,ym,xM,yM,xy);
//One fill sweep
cudaMemcpyFromSymbol(&filled,fill_gc,sizeof(bool),0,cudaMemcpyDeviceToHost); //See if we filled anything in this sweep
if (!filled) break; //Nothing filled? Then we're done, the image didn't change
}
cudaUnbindTexture(pbaTexGray);
if (outputFill) cudaMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
return iter; //Return #iterations done for the fill - useful as a performance measure for caller
}
int skelft2DFillHoles(unsigned char* outputFill, short sx, short sy, unsigned char foreground)
{
unsigned char background;
unsigned char fill_value = 128;
int id = sy * pbaTexSize + sx;
cudaMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),cudaMemcpyDeviceToHost); //See which is the value at (sx,sy)
int iter = skelft2DFill(0,sx,sy,0,0,pbaTexSize,pbaTexSize,fill_value); //First, fill the background surrounding the image with some special value
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT
kernelFillHoles<<<grid, block>>>((unsigned char*)pbaTextures[2],pbaTexSize,background,foreground,fill_value);
cudaUnbindTexture(pbaTexGray);
if (outputFill) cudaMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
return iter;
}
//-------- Advection-related code ----------------------------------------------------------------------
void advect2DSetSites(float* sites, int nsites)
{
cudaMemcpy(pbaTexAdvectSites, sites, nsites * sizeof(float2), cudaMemcpyHostToDevice); //Copy side-coords from CPU->GPU
}
//#define KERNEL(radius,dist) expf(-10*dist/radius)
#define KERNEL(radius,dist) ((radius-dist)/radius)
__global__ void kernelSplat(float* output, int size, int numpts, float d)
//REMARK: This could be done tens of times faster using OpenGL (draw point-sprites with a 2D distance-kernel texture)
{
int offs = blockIdx.x * blockDim.x + threadIdx.x;
if (offs < numpts) //careful not to index outside the site-vector..
{
float2 p = tex1Dfetch(pbaTexAdvect,offs); //splat the site whose coords are at location offs in pbaTexAdvect[]
float d2 = d*d;
short tx = p.x, ty = p.y; //REMARK: if I make these float, then something gets ccrewed up - why?
short jmin = ty-d;
short jmax = ty+d;
for(short j=jmin;j<=jmax;++j) //bounding-box of the splat at 'skel'
{
float dy = (j-ty)*(j-ty); //precalc this for speed
short imin = tx-d;
short imax = tx+d;
int offs = __mul24(int(j),size);
for(short i=imin;i<=imax;++i) //REMARK: this could be made ~1.5x faster by computing the x-scanline intersection (min,max) with the d2-circle..
{
float r2 = dy+(i-tx)*(i-tx);
if (r2<=d2) //check we're really inside the splat
{
int id = int(i)+offs;
float val = KERNEL(d2,r2);
output[id] += val; //Accumulate density (WARNING: hope this is thread-safe!!)
//If not, I need atomicAdd() on floats which requires CUDA 2.0
}
}
}
}
}
inline __device__ float2 computeGradient(const float2& p) //Compute -gradient of density at p
{
float v_d = tex2D(pbaTexDensity,p.x,p.y-1);
float v_l = tex2D(pbaTexDensity,p.x-1,p.y);
float v_r = tex2D(pbaTexDensity,p.x+1,p.y);
float v_t = tex2D(pbaTexDensity,p.x,p.y+1);
return make_float2((v_l-v_r)/2,(v_d-v_t)/2);
}
inline __device__ float2 computeGradientExplicit(float* density, const float2& p, int size) //Compute gradient of density at p
{
int x = p.x, y = p.y;
float v_d = density[TOID(x,y-1,size)];
float v_l = density[TOID(x-1,y,size)];
float v_r = density[TOID(x+1,y,size)];
float v_t = density[TOID(x,y+1,size)];
return make_float2((v_r-v_l)/2,(v_t-v_d)/2);
}
__global__ void kernelGradient(float2* grad, int size)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
const float eps = 0.0001;
if (tx<size && ty<size) //Careful not to go outside the image..
{
float2 p = make_float2(tx,ty);
float2 g = computeGradient(p);
float gn = sqrtf(g.x*g.x+g.y*g.y); //robustly normalize the gradient
g.x /= gn+eps; g.y /= gn+eps;
grad[TOID(tx,ty,size)] = g;
}
}
void advect2DSplat(float* output, int num_pts, float radius)
{
dim3 block = dim3(BLOCKX*BLOCKY); //Prepare the splatting kernel: this operates on a vector of 2D sites
int numpts_b = (num_pts/block.x+1)*block.x; //Find higher multiple of blocksize than # sites
dim3 grid = dim3(numpts_b/block.x);
cudaMemset(pbaTexDensityData,0,sizeof(float)*pbaTexSize*pbaTexSize); //Zero the density texture
cudaBindTexture(0,pbaTexAdvect,pbaTexAdvectSites); //Bind sites to a texture
kernelSplat<<< grid, block >>>(pbaTexDensityData, pbaTexSize, num_pts, radius);
//Splat kernel
cudaThreadSynchronize();
cudaUnbindTexture(pbaTexAdvect); //Done with the sites
//Copy splat-map to CPU (if desired)
if (output) cudaMemcpy(output, pbaTexDensityData, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost);
}
void advect2DGetSites(float* output, int num_pts) //Get sites CUDA->CPU
{
cudaMemcpy(output,pbaTexAdvectSites,num_pts*sizeof(float2),cudaMemcpyDeviceToHost);
}
void advect2DGradient() //Compute and save gradient of density map
{
dim3 block = dim3(BLOCKX,BLOCKY);
dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y);
cudaMemset(pbaTexGradientData,0,sizeof(float2)*pbaTexSize*pbaTexSize); //Zero the gradient texture
pbaTexDensity.filterMode = cudaFilterModeLinear; //The floating-point density map
pbaTexDensity.normalized = false;
cudaChannelFormatDesc channelDesc1 = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
cudaBindTexture2D(0,pbaTexDensity,pbaTexDensityData,channelDesc1,pbaTexSize,pbaTexSize,4*pbaTexSize);
kernelGradient<<< grid, block >>>(pbaTexGradientData, pbaTexSize); //Gradient kernel
cudaUnbindTexture(pbaTexDensity);
}
|
a8d12437bc26c462381ee78f39087b2aab8bafa6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_func_opt.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <Eigen/LU>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <iostream>
int ei_test_init_cuda()
{
int device = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
std::cout << "CUDA device info:\n";
std::cout << " name: " << deviceProp.name << "\n";
std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
std::cout << " warpSize: " << deviceProp.warpSize << "\n";
std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
std::cout << " clockRate: " << deviceProp.clockRate << "\n";
std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n";
std::cout << " computeMode: " << deviceProp.computeMode << "\n";
return 1;
}
static void HandleError( hipError_t err, const char *file, int line )
{
// CUDA error handeling from the "CUDA by example" book
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void cu_dot(Eigen::Vector3d *v1, Eigen::Vector3d *v2, double *out, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
out[idx] = v1[idx].dot(v2[idx]);
}
return;
}
// The wrapper for the calling of the actual kernel
double dot_cuda(const std::vector<Eigen::Vector3d> & v1, const std::vector<Eigen::Vector3d> & v2)
{
int n = v1.size();
double *ret = new double[n];
// Allocate device arrays
Eigen::Vector3d *dev_v1, *dev_v2;
HANDLE_ERROR(hipMalloc((void **)&dev_v1, sizeof(Eigen::Vector3d)*n));
HANDLE_ERROR(hipMalloc((void **)&dev_v2, sizeof(Eigen::Vector3d)*n));
double* dev_ret;
HANDLE_ERROR(hipMalloc((void **)&dev_ret, sizeof(double)*n));
// Copy to device
HANDLE_ERROR(hipMemcpy(dev_v1, v1.data(), sizeof(Eigen::Vector3d)*n, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_v2, v2.data(), sizeof(Eigen::Vector3d)*n, hipMemcpyHostToDevice));
// Dot product
hipLaunchKernelGGL(( cu_dot), dim3((n+1023)/1024), dim3(1024), 0, 0, dev_v1, dev_v2, dev_ret, n);
// Copy to host
HANDLE_ERROR(hipMemcpy(ret, dev_ret, sizeof(double)*n, hipMemcpyDeviceToHost));
// Reduction of the array
for (int i=1; i<n; ++i)
{
ret[0] += ret[i];
}
// Return
return ret[0];
}
|
a8d12437bc26c462381ee78f39087b2aab8bafa6.cu
|
#include "cuda_func_opt.hpp"
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <Eigen/LU>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <iostream>
int ei_test_init_cuda()
{
int device = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
std::cout << "CUDA device info:\n";
std::cout << " name: " << deviceProp.name << "\n";
std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
std::cout << " warpSize: " << deviceProp.warpSize << "\n";
std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
std::cout << " clockRate: " << deviceProp.clockRate << "\n";
std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n";
std::cout << " computeMode: " << deviceProp.computeMode << "\n";
return 1;
}
static void HandleError( cudaError_t err, const char *file, int line )
{
// CUDA error handeling from the "CUDA by example" book
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void cu_dot(Eigen::Vector3d *v1, Eigen::Vector3d *v2, double *out, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
out[idx] = v1[idx].dot(v2[idx]);
}
return;
}
// The wrapper for the calling of the actual kernel
double dot_cuda(const std::vector<Eigen::Vector3d> & v1, const std::vector<Eigen::Vector3d> & v2)
{
int n = v1.size();
double *ret = new double[n];
// Allocate device arrays
Eigen::Vector3d *dev_v1, *dev_v2;
HANDLE_ERROR(cudaMalloc((void **)&dev_v1, sizeof(Eigen::Vector3d)*n));
HANDLE_ERROR(cudaMalloc((void **)&dev_v2, sizeof(Eigen::Vector3d)*n));
double* dev_ret;
HANDLE_ERROR(cudaMalloc((void **)&dev_ret, sizeof(double)*n));
// Copy to device
HANDLE_ERROR(cudaMemcpy(dev_v1, v1.data(), sizeof(Eigen::Vector3d)*n, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_v2, v2.data(), sizeof(Eigen::Vector3d)*n, cudaMemcpyHostToDevice));
// Dot product
cu_dot<<<(n+1023)/1024, 1024>>>(dev_v1, dev_v2, dev_ret, n);
// Copy to host
HANDLE_ERROR(cudaMemcpy(ret, dev_ret, sizeof(double)*n, cudaMemcpyDeviceToHost));
// Reduction of the array
for (int i=1; i<n; ++i)
{
ret[0] += ret[i];
}
// Return
return ret[0];
}
|
c9c7ec102b6e09bed0c560971b9343d738280598.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "CycleTimer.h"
#define THREADS_PER_BLOCK 256
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
// helper function to round an integer up to the next power of 2
static inline int nextPow2(int n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
__global__ void
upsweep_kernel(int N, int stride, int* input) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = (blockIdx.x * blockDim.x + threadIdx.x) * stride * 2;
if (index + stride * 2 - 1 < N) {
input[index + stride * 2 - 1] += input[index + stride - 1];
}
if (stride == N/2 && index == 0) {
input[N - 1] = 0;
}
}
__global__ void
downsweep_kernel(int N, int stride, int* input) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = (blockIdx.x * blockDim.x + threadIdx.x) * stride * 2;
if (index + stride * 2 - 1 < N) {
int t = input[index + stride - 1];
input[index + stride - 1] = input[index + stride * 2 - 1];
input[index + stride * 2 - 1] += t;
}
}
__global__ void
copy_kernel(int N, int* input, int* result) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
result[index] = input[index];
} else {
result[index] = 0;
}
}
// exclusive_scan --
//
// Implementation of an exclusive scan on global memory array `input`,
// with results placed in global memory `result`.
//
// N is the logical size of the input and output arrays, however
// students can assume that both the start and result arrays we
// allocated with next power-of-two sizes as described by the comments
// in cudaScan(). This is helpful, since your parallel segmented scan
// will likely write to memory locations beyond N, but of course not
// greater than N rounded up to the next power of 2.
//
// Also, as per the comments in cudaScan(), you can implement an
// "in-place" scan, since the timing harness makes a copy of input and
// places it in result
void exclusive_scan(int* input, int N, int* result)
{
// CS149 TODO:
//
// Implement your exclusive scan implementation here. Keep input
// mind that although the arguments to this function are device
// allocated arrays, this is a function that is running in a thread
// on the CPU. Your implementation will need to make multiple calls
// to CUDA kernel functions (that you must write) to implement the
// scan.
int rounded_length = nextPow2(N);
int blocks = (rounded_length + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
hipLaunchKernelGGL(( copy_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, N, input, result);
cudaCheckError(hipDeviceSynchronize());
// printf("N = %d rounded = %d\n", N, rounded_length);
for (int i = 1; i <= rounded_length / 2; i*=2) {
int n_threads = rounded_length / (2 * i);
if (n_threads > THREADS_PER_BLOCK) {
int blocks = (n_threads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( upsweep_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, rounded_length, i, result);
} else {
hipLaunchKernelGGL(( upsweep_kernel), dim3(1), dim3(n_threads), 0, 0, rounded_length, i, result);
}
cudaCheckError(hipDeviceSynchronize());
}
for (int i = rounded_length / 2; i >= 1; i/=2) {
int n_threads = rounded_length / (2 * i);
if (n_threads > THREADS_PER_BLOCK) {
int blocks = (n_threads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// printf("i = %d n_threads = %d, blocks = %d\n", i, n_threads, blocks);
hipLaunchKernelGGL(( downsweep_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, rounded_length, i, result);
} else {
hipLaunchKernelGGL(( downsweep_kernel), dim3(1), dim3(n_threads), 0, 0, rounded_length, i, result);
}
cudaCheckError(hipDeviceSynchronize());
}
}
//
// cudaScan --
//
// This function is a timing wrapper around the student's
// implementation of segmented scan - it copies the input to the GPU
// and times the invocation of the exclusive_scan() function
// above. Students should not modify it.
double cudaScan(int* inarray, int* end, int* resultarray)
{
int* device_result;
int* device_input;
int N = end - inarray;
// This code rounds the arrays provided to exclusive_scan up
// to a power of 2, but elements after the end of the original
// input are left uninitialized and not checked for correctness.
//
// Student implementations of exclusive_scan may assume an array's
// allocated length is a power of 2 for simplicity. This will
// result in extra work on non-power-of-2 inputs, but it's worth
// the simplicity of a power of two only solution.
int rounded_length = nextPow2(end - inarray);
hipMalloc((void **)&device_result, sizeof(int) * rounded_length);
hipMalloc((void **)&device_input, sizeof(int) * rounded_length);
// For convenience, both the input and output vectors on the
// device are initialized to the input values. This means that
// students are free to implement an in-place scan on the result
// vector if desired. If you do this, you will need to keep this
// in mind when calling exclusive_scan from find_repeats.
hipMemcpy(device_input, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_result, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice);
double startTime = CycleTimer::currentSeconds();
exclusive_scan(device_input, N, device_result);
// Wait for completion
hipDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
hipMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), hipMemcpyDeviceToHost);
double overallDuration = endTime - startTime;
return overallDuration;
}
// cudaScanThrust --
//
// Wrapper around the Thrust library's exclusive scan function
// As above in cudaScan(), this function copies the input to the GPU
// and times only the execution of the scan itself.
//
// Students are not expected to produce implementations that achieve
// performance that is competition to the Thrust version, but it is fun to try.
double cudaScanThrust(int* inarray, int* end, int* resultarray) {
int length = end - inarray;
thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length);
thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length);
hipMemcpy(d_input.get(), inarray, length * sizeof(int), hipMemcpyHostToDevice);
double startTime = CycleTimer::currentSeconds();
thrust::exclusive_scan(d_input, d_input + length, d_output);
hipDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
hipMemcpy(resultarray, d_output.get(), length * sizeof(int), hipMemcpyDeviceToHost);
thrust::device_free(d_input);
thrust::device_free(d_output);
double overallDuration = endTime - startTime;
return overallDuration;
}
__global__ void
check_neighbor_kernel(int length, int* input, int* output) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index + 1 < length && input[index] == input[index + 1]) {
output[index] = 1;
} else {
output[index] = 0;
}
}
__device__ int device_num_pairs;
__global__ void
get_index_kernel(int length, int* prefix_sum, int* output) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (output[index] == 1) {
output[prefix_sum[index]] = index;
} else if (index == length - 1) {
device_num_pairs = prefix_sum[index];
}
}
// find_repeats --
//
// Given an array of integers `device_input`, returns an array of all
// indices `i` for which `device_input[i] == device_input[i+1]`.
//
// Returns the total number of pairs found
int find_repeats(int* device_input, int length, int* device_output) {
// CS149 TODO:
//
// Implement this function. You will probably want to
// make use of one or more calls to exclusive_scan(), as well as
// additional CUDA kernel launches.
//
// Note: As in the scan code, the calling code ensures that
// allocated arrays are a power of 2 in size, so you can use your
// exclusive_scan function with them. However, your implementation
// must ensure that the results of find_repeats are correct given
// the actual array length.
int blocks = (length + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int* tmp;
// int num_pairs[1];
// int* device_num_pairs;
int rounded_length = nextPow2(length);
hipMalloc((void **)&tmp, rounded_length * sizeof(int));
// hipMalloc((void **)&device_num_pairs, sizeof(int));
// hipMemcpy(device_num_pairs, num_pairs, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( check_neighbor_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, length, device_input, device_output);
cudaCheckError(hipDeviceSynchronize());
exclusive_scan(device_output, length, tmp);
hipLaunchKernelGGL(( get_index_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, length, tmp, device_output);
int num_pairs;
hipMemcpyFromSymbol(&num_pairs, device_num_pairs, sizeof(num_pairs), 0, hipMemcpyDeviceToHost);
// hipMemcpy(num_pairs, device_num_pairs, sizeof(int), hipMemcpyDeviceToHost);
cudaCheckError(hipDeviceSynchronize());
hipFree(tmp);
return num_pairs;
}
//
// cudaFindRepeats --
//
// Timing wrapper around find_repeats. You should not modify this function.
double cudaFindRepeats(int *input, int length, int *output, int *output_length) {
int *device_input;
int *device_output;
int rounded_length = nextPow2(length);
hipMalloc((void **)&device_input, rounded_length * sizeof(int));
hipMalloc((void **)&device_output, rounded_length * sizeof(int));
hipMemcpy(device_input, input, length * sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
double startTime = CycleTimer::currentSeconds();
int result = find_repeats(device_input, length, device_output);
hipDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
// set output count and results array
*output_length = result;
hipMemcpy(output, device_output, length * sizeof(int), hipMemcpyDeviceToHost);
hipFree(device_input);
hipFree(device_output);
float duration = endTime - startTime;
return duration;
}
void printCudaInfo()
{
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
c9c7ec102b6e09bed0c560971b9343d738280598.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "CycleTimer.h"
#define THREADS_PER_BLOCK 256
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
// helper function to round an integer up to the next power of 2
static inline int nextPow2(int n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
__global__ void
upsweep_kernel(int N, int stride, int* input) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = (blockIdx.x * blockDim.x + threadIdx.x) * stride * 2;
if (index + stride * 2 - 1 < N) {
input[index + stride * 2 - 1] += input[index + stride - 1];
}
if (stride == N/2 && index == 0) {
input[N - 1] = 0;
}
}
__global__ void
downsweep_kernel(int N, int stride, int* input) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = (blockIdx.x * blockDim.x + threadIdx.x) * stride * 2;
if (index + stride * 2 - 1 < N) {
int t = input[index + stride - 1];
input[index + stride - 1] = input[index + stride * 2 - 1];
input[index + stride * 2 - 1] += t;
}
}
__global__ void
copy_kernel(int N, int* input, int* result) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
result[index] = input[index];
} else {
result[index] = 0;
}
}
// exclusive_scan --
//
// Implementation of an exclusive scan on global memory array `input`,
// with results placed in global memory `result`.
//
// N is the logical size of the input and output arrays, however
// students can assume that both the start and result arrays we
// allocated with next power-of-two sizes as described by the comments
// in cudaScan(). This is helpful, since your parallel segmented scan
// will likely write to memory locations beyond N, but of course not
// greater than N rounded up to the next power of 2.
//
// Also, as per the comments in cudaScan(), you can implement an
// "in-place" scan, since the timing harness makes a copy of input and
// places it in result
void exclusive_scan(int* input, int N, int* result)
{
// CS149 TODO:
//
// Implement your exclusive scan implementation here. Keep input
// mind that although the arguments to this function are device
// allocated arrays, this is a function that is running in a thread
// on the CPU. Your implementation will need to make multiple calls
// to CUDA kernel functions (that you must write) to implement the
// scan.
int rounded_length = nextPow2(N);
int blocks = (rounded_length + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
copy_kernel<<<blocks, THREADS_PER_BLOCK>>>(N, input, result);
cudaCheckError(cudaDeviceSynchronize());
// printf("N = %d rounded = %d\n", N, rounded_length);
for (int i = 1; i <= rounded_length / 2; i*=2) {
int n_threads = rounded_length / (2 * i);
if (n_threads > THREADS_PER_BLOCK) {
int blocks = (n_threads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
upsweep_kernel<<<blocks, THREADS_PER_BLOCK>>>(rounded_length, i, result);
} else {
upsweep_kernel<<<1, n_threads>>>(rounded_length, i, result);
}
cudaCheckError(cudaDeviceSynchronize());
}
for (int i = rounded_length / 2; i >= 1; i/=2) {
int n_threads = rounded_length / (2 * i);
if (n_threads > THREADS_PER_BLOCK) {
int blocks = (n_threads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// printf("i = %d n_threads = %d, blocks = %d\n", i, n_threads, blocks);
downsweep_kernel<<<blocks, THREADS_PER_BLOCK>>>(rounded_length, i, result);
} else {
downsweep_kernel<<<1, n_threads>>>(rounded_length, i, result);
}
cudaCheckError(cudaDeviceSynchronize());
}
}
//
// cudaScan --
//
// This function is a timing wrapper around the student's
// implementation of segmented scan - it copies the input to the GPU
// and times the invocation of the exclusive_scan() function
// above. Students should not modify it.
double cudaScan(int* inarray, int* end, int* resultarray)
{
int* device_result;
int* device_input;
int N = end - inarray;
// This code rounds the arrays provided to exclusive_scan up
// to a power of 2, but elements after the end of the original
// input are left uninitialized and not checked for correctness.
//
// Student implementations of exclusive_scan may assume an array's
// allocated length is a power of 2 for simplicity. This will
// result in extra work on non-power-of-2 inputs, but it's worth
// the simplicity of a power of two only solution.
int rounded_length = nextPow2(end - inarray);
cudaMalloc((void **)&device_result, sizeof(int) * rounded_length);
cudaMalloc((void **)&device_input, sizeof(int) * rounded_length);
// For convenience, both the input and output vectors on the
// device are initialized to the input values. This means that
// students are free to implement an in-place scan on the result
// vector if desired. If you do this, you will need to keep this
// in mind when calling exclusive_scan from find_repeats.
cudaMemcpy(device_input, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_result, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice);
double startTime = CycleTimer::currentSeconds();
exclusive_scan(device_input, N, device_result);
// Wait for completion
cudaDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
cudaMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), cudaMemcpyDeviceToHost);
double overallDuration = endTime - startTime;
return overallDuration;
}
// cudaScanThrust --
//
// Wrapper around the Thrust library's exclusive scan function
// As above in cudaScan(), this function copies the input to the GPU
// and times only the execution of the scan itself.
//
// Students are not expected to produce implementations that achieve
// performance that is competition to the Thrust version, but it is fun to try.
double cudaScanThrust(int* inarray, int* end, int* resultarray) {
int length = end - inarray;
thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length);
thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length);
cudaMemcpy(d_input.get(), inarray, length * sizeof(int), cudaMemcpyHostToDevice);
double startTime = CycleTimer::currentSeconds();
thrust::exclusive_scan(d_input, d_input + length, d_output);
cudaDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
cudaMemcpy(resultarray, d_output.get(), length * sizeof(int), cudaMemcpyDeviceToHost);
thrust::device_free(d_input);
thrust::device_free(d_output);
double overallDuration = endTime - startTime;
return overallDuration;
}
__global__ void
check_neighbor_kernel(int length, int* input, int* output) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index + 1 < length && input[index] == input[index + 1]) {
output[index] = 1;
} else {
output[index] = 0;
}
}
__device__ int device_num_pairs;
__global__ void
get_index_kernel(int length, int* prefix_sum, int* output) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (output[index] == 1) {
output[prefix_sum[index]] = index;
} else if (index == length - 1) {
device_num_pairs = prefix_sum[index];
}
}
// find_repeats --
//
// Given an array of integers `device_input`, returns an array of all
// indices `i` for which `device_input[i] == device_input[i+1]`.
//
// Returns the total number of pairs found
int find_repeats(int* device_input, int length, int* device_output) {
// CS149 TODO:
//
// Implement this function. You will probably want to
// make use of one or more calls to exclusive_scan(), as well as
// additional CUDA kernel launches.
//
// Note: As in the scan code, the calling code ensures that
// allocated arrays are a power of 2 in size, so you can use your
// exclusive_scan function with them. However, your implementation
// must ensure that the results of find_repeats are correct given
// the actual array length.
int blocks = (length + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int* tmp;
// int num_pairs[1];
// int* device_num_pairs;
int rounded_length = nextPow2(length);
cudaMalloc((void **)&tmp, rounded_length * sizeof(int));
// cudaMalloc((void **)&device_num_pairs, sizeof(int));
// cudaMemcpy(device_num_pairs, num_pairs, sizeof(int), cudaMemcpyHostToDevice);
check_neighbor_kernel<<<blocks, THREADS_PER_BLOCK>>>(length, device_input, device_output);
cudaCheckError(cudaDeviceSynchronize());
exclusive_scan(device_output, length, tmp);
get_index_kernel<<<blocks, THREADS_PER_BLOCK>>>(length, tmp, device_output);
int num_pairs;
cudaMemcpyFromSymbol(&num_pairs, device_num_pairs, sizeof(num_pairs), 0, cudaMemcpyDeviceToHost);
// cudaMemcpy(num_pairs, device_num_pairs, sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckError(cudaDeviceSynchronize());
cudaFree(tmp);
return num_pairs;
}
//
// cudaFindRepeats --
//
// Timing wrapper around find_repeats. You should not modify this function.
double cudaFindRepeats(int *input, int length, int *output, int *output_length) {
int *device_input;
int *device_output;
int rounded_length = nextPow2(length);
cudaMalloc((void **)&device_input, rounded_length * sizeof(int));
cudaMalloc((void **)&device_output, rounded_length * sizeof(int));
cudaMemcpy(device_input, input, length * sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
double startTime = CycleTimer::currentSeconds();
int result = find_repeats(device_input, length, device_output);
cudaDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
// set output count and results array
*output_length = result;
cudaMemcpy(output, device_output, length * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_input);
cudaFree(device_output);
float duration = endTime - startTime;
return duration;
}
void printCudaInfo()
{
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
3b6bdc82a2383a1a2fb184f9f2f8b96e1b0e8876.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MIT License
*
* Copyright (c) 2021 CSCS, ETH Zurich
* 2021 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Cornerstone octree GPU testing
*
* @author Sebastian Keller <[email protected]>
*
*/
#include "gtest/gtest.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "coord_samples/random.hpp"
#include "cstone/tree/octree.cuh"
#include "cstone/tree/octree_util.hpp"
using namespace cstone;
//! @brief direct node count test
TEST(OctreeGpu, computeNodeCountsKernel)
{
using I = unsigned;
// 4096 codes
thrust::host_vector<I> h_codes = makeNLevelGrid<I>(4);
thrust::device_vector<I> d_codes = h_codes;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8*8*8, 1);
// subdivide the first level-3 node
for (int octant = 1; octant < 8; ++octant)
h_cstree.push_back(octant*nodeRange<I>(4));
std::sort(begin(h_cstree), end(h_cstree));
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::device_vector<unsigned> d_counts(nNodes(d_cstree));
constexpr unsigned nThreads = 512;
hipLaunchKernelGGL(( computeNodeCountsKernel), dim3(iceil(nNodes(d_cstree), nThreads)), dim3(nThreads), 0, 0,
thrust::raw_pointer_cast(d_cstree.data()), thrust::raw_pointer_cast(d_counts.data()), nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()), thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
std::numeric_limits<unsigned>::max());
// download counts from device
thrust::host_vector<unsigned> h_counts = d_counts;
thrust::host_vector<unsigned> refCounts(nNodes(d_cstree), 8);
// the first 8 nodes are level-4, node count is 1, the other nodes are level-3 with node counts of 8
for (int nodeIdx = 0; nodeIdx < 8; ++nodeIdx)
refCounts[nodeIdx] = 1;
EXPECT_EQ(h_counts, refCounts);
}
//! @brief counts only tree nodes that cover the supplied particle codes
TEST(OctreeGpu, computeNodeCountsGpu)
{
using I = unsigned;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8*8*8, 1);
// subdivide the first level-3 node
for (int octant = 1; octant < 8; ++octant)
h_cstree.push_back(octant*nodeRange<I>(4));
std::sort(begin(h_cstree), end(h_cstree));
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::host_vector<I> h_codes;
for (int nodeIdx = 1; nodeIdx < nNodes(h_cstree)-1; ++nodeIdx)
{
// put 2 particles in each tree node, except the first and last node
h_codes.push_back(h_cstree[nodeIdx]);
h_codes.push_back(h_cstree[nodeIdx]+1);
}
// upload particle codes to device
thrust::device_vector<I> d_codes = h_codes;
thrust::device_vector<unsigned> d_counts(nNodes(d_cstree), 1);
// findPopulatedNodes check
{
TreeNodeIndex popNodes[2];
hipLaunchKernelGGL(( findPopulatedNodes), dim3(1),dim3(1), 0, 0, thrust::raw_pointer_cast(d_cstree.data()), nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()));
hipMemcpyFromSymbol(popNodes, populatedNodes, 2 * sizeof(TreeNodeIndex));
// first and last nodes have no particles
EXPECT_EQ(popNodes[0], 1);
EXPECT_EQ(popNodes[1], nNodes(d_cstree) - 1);
}
computeNodeCountsGpu(thrust::raw_pointer_cast(d_cstree.data()), thrust::raw_pointer_cast(d_counts.data()),
nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
std::numeric_limits<unsigned>::max());
// download counts from device
thrust::host_vector<unsigned> h_counts = d_counts;
thrust::host_vector<unsigned> refCounts(nNodes(d_cstree), 2);
// first and last nodes are empty
refCounts[0] = 0;
*refCounts.rbegin() = 0;
EXPECT_EQ(h_counts, refCounts);
}
TEST(OctreeGpu, rebalanceDecision)
{
using I = unsigned;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8 * 8 * 8, 1);
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::device_vector<unsigned> d_counts(8 * 8 * 8, 1);
// set first 8 nodes to empty
for (int i = 0; i < 8; ++i) { d_counts[i] = 0; }
d_counts[9] = 2;
unsigned bucketSize = 1;
thrust::device_vector<TreeNodeIndex> d_nodeOps(d_counts.size());
constexpr unsigned nThreads = 512;
hipLaunchKernelGGL(( rebalanceDecisionKernel), dim3(iceil(d_counts.size(), nThreads)), dim3(nThreads), 0, 0,
thrust::raw_pointer_cast(d_cstree.data()),
thrust::raw_pointer_cast(d_counts.data()),
nNodes(d_cstree),
bucketSize,
thrust::raw_pointer_cast(d_nodeOps.data()));
// download result from device
thrust::host_vector<TreeNodeIndex> h_nodeOps = d_nodeOps;
thrust::host_vector<TreeNodeIndex> reference(d_counts.size(), 1);
for (int i = 1; i < 8; ++i) { reference[i] = 0; } // merge
reference[9] = 8; // fuse
int changeCounter = 0;
hipMemcpyFromSymbol(&changeCounter, rebalanceChangeCounter, sizeof(int));
EXPECT_EQ(h_nodeOps, reference);
EXPECT_NE(0, changeCounter);
}
TEST(OctreeGpu, rebalanceTree)
{
using CodeType = unsigned;
constexpr int bucketSize = 8;
thrust::device_vector<CodeType> tree = OctreeMaker<CodeType>{}.divide().divide(7).makeTree();
thrust::device_vector<CodeType> tmpTree;
thrust::device_vector<TreeNodeIndex> workArray;
// nodes {7,i} will need to be fused
thrust::device_vector<unsigned> counts(nNodes(tree), 1);
// node {1} will need to be split
counts[1] = bucketSize + 1;
bool converged = rebalanceTreeGpu(tree, thrust::raw_pointer_cast(counts.data()), bucketSize, tmpTree, workArray);
// download tree from host
thrust::host_vector<CodeType> h_tree = tree;
thrust::host_vector<CodeType> reference = OctreeMaker<CodeType>{}.divide().divide(1).makeTree();
EXPECT_EQ(h_tree, reference);
EXPECT_FALSE(converged);
}
/*! @brief fixture for octree tests based on random particle distributions
*
* @tparam KeyType 32- or 64-bit unsigned integer
*
* These tests are already integration tests strictly speaking. They can be seen
* as the second line of defense in case the unit tests above (with minimal and explict reference data)
* fail to catch an error.
*/
template<class KeyType>
class OctreeFixtureGpu
{
public:
OctreeFixtureGpu(unsigned numParticles, unsigned bucketSize)
{
d_codes = makeRandomGaussianKeys<KeyType>(numParticles);
d_tree = std::vector<KeyType>{0, nodeRange<KeyType>(0)};
d_counts = std::vector<unsigned>{numParticles};
thrust::device_vector<KeyType> tmpTree;
thrust::device_vector<TreeNodeIndex> workArray;
while(!updateOctreeGpu(thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
bucketSize, d_tree, d_counts, tmpTree, workArray));
}
thrust::device_vector<KeyType> d_tree;
thrust::device_vector<KeyType> d_codes;
thrust::device_vector<unsigned> d_counts;
};
//! @brief build tree from random particles and compare against CPU
TEST(OctreeGpu, computeOctreeRandom)
{
using Integer = unsigned;
int nParticles = 100000;
int bucketSize = 64;
// compute octree starting from default uniform octree
auto particleKeys = makeRandomGaussianKeys<Integer>(nParticles);
auto [treeCpu, countsCpu] = computeOctree(particleKeys.data(), particleKeys.data() + nParticles, bucketSize);
OctreeFixtureGpu<Integer> fixt(nParticles, bucketSize);
// download tree from device
thrust::host_vector<Integer> h_tree = fixt.d_tree;
thrust::host_vector<Integer> refTreeCpu = treeCpu;
thrust::host_vector<Integer> h_counts = fixt.d_counts;
thrust::host_vector<Integer> refCountsCpu = countsCpu;
EXPECT_EQ(h_tree, refTreeCpu);
EXPECT_EQ(h_counts, refCountsCpu);
}
/*! @brief simulation of distributed tree
*
* In distributed octrees, the executing rank only has a part of the particle SFC codes, such that
* many nodes in the tree are empty. Here this is simulated by removing a large connected part of the particle codes
* and recomputing the node counts based on this subset of particle codes. The non-zero node counts should stay the same.
*/
TEST(OctreeGpu, distributedMockUp)
{
using CodeType = unsigned;
int nParticles = 100000;
int bucketSize = 64;
OctreeFixtureGpu<CodeType> fixt(nParticles, bucketSize);
thrust::device_vector<CodeType> d_counts_orig = fixt.d_counts;
// omit first and last tenth of nodes
TreeNodeIndex Nodes = nNodes(fixt.d_tree);
TreeNodeIndex firstNode = Nodes / 10;
TreeNodeIndex lastNode = Nodes - Nodes / 10;
// determine the part of the tree that will be empty
thrust::host_vector<CodeType> h_codes = fixt.d_codes;
unsigned firstParticleIdx = stl::lower_bound(h_codes.begin(), h_codes.end(), fixt.d_tree[firstNode]) - h_codes.begin();
unsigned lastParticleIdx = stl::lower_bound(h_codes.begin(), h_codes.end(), fixt.d_tree[lastNode]) - h_codes.begin();
std::cout << firstNode << " " << lastNode << std::endl;
std::cout << firstParticleIdx << " " << lastParticleIdx << std::endl;
bool useCountsAsGuess = true;
computeNodeCountsGpu(thrust::raw_pointer_cast(fixt.d_tree.data()), thrust::raw_pointer_cast(fixt.d_counts.data()),
nNodes(fixt.d_tree),
thrust::raw_pointer_cast(fixt.d_codes.data() + firstParticleIdx),
thrust::raw_pointer_cast(fixt.d_codes.data() + lastParticleIdx),
std::numeric_limits<unsigned>::max(),
useCountsAsGuess);
thrust::device_vector<CodeType> d_counts_ref = d_counts_orig;
thrust::fill(d_counts_ref.begin(), d_counts_ref.begin() + firstNode, 0);
thrust::fill(d_counts_ref.begin() + lastNode, d_counts_ref.end(), 0);
EXPECT_EQ(fixt.d_counts, d_counts_ref);
}
|
3b6bdc82a2383a1a2fb184f9f2f8b96e1b0e8876.cu
|
/*
* MIT License
*
* Copyright (c) 2021 CSCS, ETH Zurich
* 2021 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Cornerstone octree GPU testing
*
* @author Sebastian Keller <[email protected]>
*
*/
#include "gtest/gtest.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "coord_samples/random.hpp"
#include "cstone/tree/octree.cuh"
#include "cstone/tree/octree_util.hpp"
using namespace cstone;
//! @brief direct node count test
TEST(OctreeGpu, computeNodeCountsKernel)
{
using I = unsigned;
// 4096 codes
thrust::host_vector<I> h_codes = makeNLevelGrid<I>(4);
thrust::device_vector<I> d_codes = h_codes;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8*8*8, 1);
// subdivide the first level-3 node
for (int octant = 1; octant < 8; ++octant)
h_cstree.push_back(octant*nodeRange<I>(4));
std::sort(begin(h_cstree), end(h_cstree));
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::device_vector<unsigned> d_counts(nNodes(d_cstree));
constexpr unsigned nThreads = 512;
computeNodeCountsKernel<<<iceil(nNodes(d_cstree), nThreads), nThreads>>>(
thrust::raw_pointer_cast(d_cstree.data()), thrust::raw_pointer_cast(d_counts.data()), nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()), thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
std::numeric_limits<unsigned>::max());
// download counts from device
thrust::host_vector<unsigned> h_counts = d_counts;
thrust::host_vector<unsigned> refCounts(nNodes(d_cstree), 8);
// the first 8 nodes are level-4, node count is 1, the other nodes are level-3 with node counts of 8
for (int nodeIdx = 0; nodeIdx < 8; ++nodeIdx)
refCounts[nodeIdx] = 1;
EXPECT_EQ(h_counts, refCounts);
}
//! @brief counts only tree nodes that cover the supplied particle codes
TEST(OctreeGpu, computeNodeCountsGpu)
{
using I = unsigned;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8*8*8, 1);
// subdivide the first level-3 node
for (int octant = 1; octant < 8; ++octant)
h_cstree.push_back(octant*nodeRange<I>(4));
std::sort(begin(h_cstree), end(h_cstree));
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::host_vector<I> h_codes;
for (int nodeIdx = 1; nodeIdx < nNodes(h_cstree)-1; ++nodeIdx)
{
// put 2 particles in each tree node, except the first and last node
h_codes.push_back(h_cstree[nodeIdx]);
h_codes.push_back(h_cstree[nodeIdx]+1);
}
// upload particle codes to device
thrust::device_vector<I> d_codes = h_codes;
thrust::device_vector<unsigned> d_counts(nNodes(d_cstree), 1);
// findPopulatedNodes check
{
TreeNodeIndex popNodes[2];
findPopulatedNodes<<<1,1>>>(thrust::raw_pointer_cast(d_cstree.data()), nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()));
cudaMemcpyFromSymbol(popNodes, populatedNodes, 2 * sizeof(TreeNodeIndex));
// first and last nodes have no particles
EXPECT_EQ(popNodes[0], 1);
EXPECT_EQ(popNodes[1], nNodes(d_cstree) - 1);
}
computeNodeCountsGpu(thrust::raw_pointer_cast(d_cstree.data()), thrust::raw_pointer_cast(d_counts.data()),
nNodes(d_cstree),
thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
std::numeric_limits<unsigned>::max());
// download counts from device
thrust::host_vector<unsigned> h_counts = d_counts;
thrust::host_vector<unsigned> refCounts(nNodes(d_cstree), 2);
// first and last nodes are empty
refCounts[0] = 0;
*refCounts.rbegin() = 0;
EXPECT_EQ(h_counts, refCounts);
}
TEST(OctreeGpu, rebalanceDecision)
{
using I = unsigned;
// regular level-3 cornerstone tree with 512 leaves
thrust::host_vector<I> h_cstree = makeUniformNLevelTree<I>(8 * 8 * 8, 1);
// create + upload tree to the device
thrust::device_vector<I> d_cstree = h_cstree;
thrust::device_vector<unsigned> d_counts(8 * 8 * 8, 1);
// set first 8 nodes to empty
for (int i = 0; i < 8; ++i) { d_counts[i] = 0; }
d_counts[9] = 2;
unsigned bucketSize = 1;
thrust::device_vector<TreeNodeIndex> d_nodeOps(d_counts.size());
constexpr unsigned nThreads = 512;
rebalanceDecisionKernel<<<iceil(d_counts.size(), nThreads), nThreads>>>(
thrust::raw_pointer_cast(d_cstree.data()),
thrust::raw_pointer_cast(d_counts.data()),
nNodes(d_cstree),
bucketSize,
thrust::raw_pointer_cast(d_nodeOps.data()));
// download result from device
thrust::host_vector<TreeNodeIndex> h_nodeOps = d_nodeOps;
thrust::host_vector<TreeNodeIndex> reference(d_counts.size(), 1);
for (int i = 1; i < 8; ++i) { reference[i] = 0; } // merge
reference[9] = 8; // fuse
int changeCounter = 0;
cudaMemcpyFromSymbol(&changeCounter, rebalanceChangeCounter, sizeof(int));
EXPECT_EQ(h_nodeOps, reference);
EXPECT_NE(0, changeCounter);
}
TEST(OctreeGpu, rebalanceTree)
{
using CodeType = unsigned;
constexpr int bucketSize = 8;
thrust::device_vector<CodeType> tree = OctreeMaker<CodeType>{}.divide().divide(7).makeTree();
thrust::device_vector<CodeType> tmpTree;
thrust::device_vector<TreeNodeIndex> workArray;
// nodes {7,i} will need to be fused
thrust::device_vector<unsigned> counts(nNodes(tree), 1);
// node {1} will need to be split
counts[1] = bucketSize + 1;
bool converged = rebalanceTreeGpu(tree, thrust::raw_pointer_cast(counts.data()), bucketSize, tmpTree, workArray);
// download tree from host
thrust::host_vector<CodeType> h_tree = tree;
thrust::host_vector<CodeType> reference = OctreeMaker<CodeType>{}.divide().divide(1).makeTree();
EXPECT_EQ(h_tree, reference);
EXPECT_FALSE(converged);
}
/*! @brief fixture for octree tests based on random particle distributions
*
* @tparam KeyType 32- or 64-bit unsigned integer
*
* These tests are already integration tests strictly speaking. They can be seen
* as the second line of defense in case the unit tests above (with minimal and explict reference data)
* fail to catch an error.
*/
template<class KeyType>
class OctreeFixtureGpu
{
public:
OctreeFixtureGpu(unsigned numParticles, unsigned bucketSize)
{
d_codes = makeRandomGaussianKeys<KeyType>(numParticles);
d_tree = std::vector<KeyType>{0, nodeRange<KeyType>(0)};
d_counts = std::vector<unsigned>{numParticles};
thrust::device_vector<KeyType> tmpTree;
thrust::device_vector<TreeNodeIndex> workArray;
while(!updateOctreeGpu(thrust::raw_pointer_cast(d_codes.data()),
thrust::raw_pointer_cast(d_codes.data() + d_codes.size()),
bucketSize, d_tree, d_counts, tmpTree, workArray));
}
thrust::device_vector<KeyType> d_tree;
thrust::device_vector<KeyType> d_codes;
thrust::device_vector<unsigned> d_counts;
};
//! @brief build tree from random particles and compare against CPU
TEST(OctreeGpu, computeOctreeRandom)
{
using Integer = unsigned;
int nParticles = 100000;
int bucketSize = 64;
// compute octree starting from default uniform octree
auto particleKeys = makeRandomGaussianKeys<Integer>(nParticles);
auto [treeCpu, countsCpu] = computeOctree(particleKeys.data(), particleKeys.data() + nParticles, bucketSize);
OctreeFixtureGpu<Integer> fixt(nParticles, bucketSize);
// download tree from device
thrust::host_vector<Integer> h_tree = fixt.d_tree;
thrust::host_vector<Integer> refTreeCpu = treeCpu;
thrust::host_vector<Integer> h_counts = fixt.d_counts;
thrust::host_vector<Integer> refCountsCpu = countsCpu;
EXPECT_EQ(h_tree, refTreeCpu);
EXPECT_EQ(h_counts, refCountsCpu);
}
/*! @brief simulation of distributed tree
*
* In distributed octrees, the executing rank only has a part of the particle SFC codes, such that
* many nodes in the tree are empty. Here this is simulated by removing a large connected part of the particle codes
* and recomputing the node counts based on this subset of particle codes. The non-zero node counts should stay the same.
*/
TEST(OctreeGpu, distributedMockUp)
{
using CodeType = unsigned;
int nParticles = 100000;
int bucketSize = 64;
OctreeFixtureGpu<CodeType> fixt(nParticles, bucketSize);
thrust::device_vector<CodeType> d_counts_orig = fixt.d_counts;
// omit first and last tenth of nodes
TreeNodeIndex Nodes = nNodes(fixt.d_tree);
TreeNodeIndex firstNode = Nodes / 10;
TreeNodeIndex lastNode = Nodes - Nodes / 10;
// determine the part of the tree that will be empty
thrust::host_vector<CodeType> h_codes = fixt.d_codes;
unsigned firstParticleIdx = stl::lower_bound(h_codes.begin(), h_codes.end(), fixt.d_tree[firstNode]) - h_codes.begin();
unsigned lastParticleIdx = stl::lower_bound(h_codes.begin(), h_codes.end(), fixt.d_tree[lastNode]) - h_codes.begin();
std::cout << firstNode << " " << lastNode << std::endl;
std::cout << firstParticleIdx << " " << lastParticleIdx << std::endl;
bool useCountsAsGuess = true;
computeNodeCountsGpu(thrust::raw_pointer_cast(fixt.d_tree.data()), thrust::raw_pointer_cast(fixt.d_counts.data()),
nNodes(fixt.d_tree),
thrust::raw_pointer_cast(fixt.d_codes.data() + firstParticleIdx),
thrust::raw_pointer_cast(fixt.d_codes.data() + lastParticleIdx),
std::numeric_limits<unsigned>::max(),
useCountsAsGuess);
thrust::device_vector<CodeType> d_counts_ref = d_counts_orig;
thrust::fill(d_counts_ref.begin(), d_counts_ref.begin() + firstNode, 0);
thrust::fill(d_counts_ref.begin() + lastNode, d_counts_ref.end(), 0);
EXPECT_EQ(fixt.d_counts, d_counts_ref);
}
|
d5d9e14d1d1503b89a1c73b72141199e5a302fa7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <algorithm>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Thrust Radix Sort
void sortByThrust(const uint32_t * in, int n,
uint32_t * out)
{
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeLocalHist(uint32_t * in, int n, uint32_t * scan, int nBins, int bit)
{
extern __shared__ int s_hist[];
int i=blockDim.x*blockIdx.x+threadIdx.x;
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
s_hist[threadIdx.x+stride]=0;
__syncthreads();
if(i<n)
{
int bin=(in[i]>>bit)&(nBins-1);// ly nBits ra tnh xem phn t ny thuc bin no
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();// syncthreads chc chn cc phn t trong block c tnh trong s_hist
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
scan[(threadIdx.x+stride)*gridDim.x+blockIdx.x]=s_hist[threadIdx.x+stride];
// hist[nBins*blockIdx.x+threadIdx.x+stride]=s_hist[threadIdx.x+stride];
}
__global__ void scanBlkKernel(uint32_t * in, int n, uint32_t * out, uint32_t * blkSums)
{
extern __shared__ uint32_t value[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
value[threadIdx.x] = in[i];
}
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
int tmp;
if (threadIdx.x < n - stride)
tmp = value[threadIdx.x-stride];
else
tmp = 0;
__syncthreads();
value[threadIdx.x] += tmp;
}
blkSums[blockIdx.x] = value[blockDim.x - 1];
__syncthreads();
if (i<n) {
if(threadIdx.x==0)
{
out[i]=0;
}
else
out[i]=value[threadIdx.x-1];
}
}
__global__ void addSumScan(uint32_t * out, int n, uint32_t * blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
{
out[i] = out[i] + blkSums[blockIdx.x - 1];
}
}
__global__ void radixSort1bit(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, uint32_t* starts)
{ int i = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ uint32_t value[];
__shared__ uint32_t start[256];
for(int indexbit=0;indexbit<nBits;indexbit++)
{
if (i < n)
{
value[threadIdx.x] = ((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
__syncthreads();
for(int stride=1;stride<blockDim.x;stride*=2)
{
int temp=0;
if(threadIdx.x>=stride)
{
temp=value[threadIdx.x-stride];// ly phn t trc stride bc
}
__syncthreads();// chc chn gi tr nm trc stride bc c ly vo b nh thanh ghi
if(threadIdx.x>=stride )
{
value[threadIdx.x]+=temp;
}
__syncthreads();// chc chn cc gi tr c cng xong
}
int nZeros=0;
if(blockIdx.x*blockDim.x+blockDim.x<=n)
nZeros = blockDim.x - value[blockDim.x-2] -((((in[blockIdx.x*blockDim.x+blockDim.x-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
{
if(n%blockDim.x>=2)
nZeros = n%blockDim.x - value[n%blockDim.x-2] - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
nZeros = n%blockDim.x - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
if (i<n)
{
if(threadIdx.x==0)
{
if (((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i]=in[i];
}
else
out[nZeros+blockIdx.x*blockDim.x]=in[i];
}
else
{
if(((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i-value[threadIdx.x-1]]=in[i];
}
else
{
out[nZeros+value[threadIdx.x-1]+blockIdx.x*blockDim.x]=in[i];
}
}
}
__syncthreads();
uint32_t *tmp=in;
in=out;
out=tmp;
}
if (i<n)
{
if(threadIdx.x==0)
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
}
else
{
if(((in[i] >> bit) & (nBins - 1))!=((in[i-1] >> bit) & (nBins - 1)))
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
starts[blockIdx.x*nBins+((in[i] >> bit) & (nBins - 1))]=start[((in[i] >> bit) & (nBins - 1))];
}
}
}
}
__global__ void scatter(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, uint32_t* start, uint32_t* histScan)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
int rank=histScan[bin*gridDim.x+blockIdx.x]+threadIdx.x-start[nBins*blockIdx.x+bin];
out[rank]=in[i];
}
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int blockSizes)
{
int nBins = 1 << nBits; // s bin
int m = (n - 1) / blockSizes + 1;// gridSize
dim3 blockSize(blockSizes);
dim3 blockSizeScan(blockSizes);
dim3 gridSize((n - 1) / blockSize.x + 1);
dim3 gridSizeScan((nBins*m - 1) / blockSizeScan.x + 1);
// cp pht
// scan
uint32_t *d_scan, *d_blkSums, *d_histScan, *d_blkOuts, *d_starts;
uint32_t *histScan = (uint32_t *)malloc(m*nBins * sizeof(uint32_t));
uint32_t *blkSums = (uint32_t *)malloc(m*nBins*sizeof(uint32_t));
uint32_t* starts1D=(uint32_t *) malloc(m*nBins*sizeof(uint32_t));
CHECK(hipMalloc(&d_scan, nBins*m * sizeof(uint32_t)));
CHECK(hipMalloc(&d_blkSums,gridSizeScan.x*sizeof(uint32_t)));
CHECK(hipMalloc(&d_blkOuts,m*nBins*sizeof(uint32_t)));
CHECK(hipMalloc(&d_starts,m*nBins*sizeof(uint32_t)));
CHECK(hipMalloc(&d_histScan,m*nBins*sizeof(uint32_t)));
// ch s bt u
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * d_in,*d_out, *d_tmp;
CHECK(hipMalloc(&d_in,n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_out,n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_tmp,n * sizeof(uint32_t)));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
size_t bytes = gridSizeScan.x * sizeof(uint32_t);
uint32_t * in_tmp = (uint32_t *)malloc(bytes);
uint32_t * out_tmp = (uint32_t*)malloc(bytes);
CHECK(hipMemcpy(d_in, src, n * sizeof(uint32_t), hipMemcpyHostToDevice));
GpuTimer timerTmp1,timerTmp2,timerTmp3,timerTmp4,timerTmp5;
float time1,time2,time3,time4,time5;
time1=time2=time3=time4=time5=0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// Tnh local hist b vo d_scan
timerTmp1.Start();
hipLaunchKernelGGL(( computeLocalHist), dim3(gridSize), dim3(blockSize), blockSizes*sizeof(uint32_t), 0, d_in, n, d_scan, nBins,bit);
timerTmp1.Stop();
time1 = time1 + timerTmp1.Elapsed();
timerTmp2.Start();
// // Tnh exclusive scan b vo d_histscan
hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSizeScan),dim3(blockSizeScan),blockSizes*sizeof(uint32_t), 0, d_scan,m*nBins,d_histScan,d_blkSums);
CHECK(hipMemcpy(in_tmp, d_blkSums, gridSizeScan.x * sizeof(uint32_t), hipMemcpyDeviceToHost));
out_tmp[0] = in_tmp[0];
for (int i = 1; i < gridSizeScan.x; i++)
{
out_tmp[i] = out_tmp[i - 1] + in_tmp[i];
}
CHECK(hipMemcpy(d_blkOuts, out_tmp, gridSizeScan.x * sizeof(uint32_t), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( addSumScan), dim3(gridSizeScan),dim3(blockSizeScan), 0, 0, d_histScan, n, d_blkOuts);
hipDeviceSynchronize();
CHECK(hipGetLastError());
timerTmp2.Stop();
time2 = time2 + timerTmp2.Elapsed();
timerTmp3.Start();
// Radix Sort 1 bit
hipLaunchKernelGGL(( radixSort1bit), dim3(gridSize),dim3(blockSize),blockSizes*sizeof(uint32_t), 0, d_in,n,d_out,nBits,bit,nBins, d_starts);
timerTmp3.Stop();
time3 = time3 + timerTmp3.Elapsed();
timerTmp5.Start();
// Scatter
hipLaunchKernelGGL(( scatter), dim3(gridSize),dim3(blockSize),blockSizes*sizeof(uint32_t), 0, d_in,n,d_out,nBits,bit,nBins,d_starts,d_histScan);
timerTmp5.Stop();
time5 = time5 + timerTmp5.Elapsed();
d_tmp = d_in;
d_in = d_out;
d_out = d_tmp;
}
printf("Time (local hist): %.3f ms\n", time1);
printf("Time (exclusive scan): %.3f ms\n", time2);
printf("Time (local sort) + Time (start value): %.3f ms\n", time3);
printf("Time (scatter): %.3f ms\n", time5);
CHECK(hipMemcpy(src, d_in, n * sizeof(uint32_t), hipMemcpyDeviceToHost));
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
hipFree(d_scan);
hipFree(d_blkSums);
hipFree(d_histScan);
hipFree(d_in);
hipFree(d_out);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int blockSizes=512)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by Thrust\n");
sortByThrust(in, n, out);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
// printf("%i %i",out[i+100] , correctOut[i+100]);
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < 100; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 1000000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
//printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes=512; // One for histogram, one for scan
if (argc == 3)
{
blockSizes = atoi(argv[2]);
}
printf("\nblock size: %d", blockSizes);
// SORT BY HOST
sort(in, n, correctOut, nBits);
// printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
// printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
d5d9e14d1d1503b89a1c73b72141199e5a302fa7.cu
|
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <algorithm>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Thrust Radix Sort
void sortByThrust(const uint32_t * in, int n,
uint32_t * out)
{
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeLocalHist(uint32_t * in, int n, uint32_t * scan, int nBins, int bit)
{
extern __shared__ int s_hist[];
int i=blockDim.x*blockIdx.x+threadIdx.x;
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
s_hist[threadIdx.x+stride]=0;
__syncthreads();
if(i<n)
{
int bin=(in[i]>>bit)&(nBins-1);// lấy nBits ra để tính xem phần tử này thuộc bin nào
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();// syncthreads để chắc chắn các phần tử trong block đã được tính trong s_hist
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
scan[(threadIdx.x+stride)*gridDim.x+blockIdx.x]=s_hist[threadIdx.x+stride];
// hist[nBins*blockIdx.x+threadIdx.x+stride]=s_hist[threadIdx.x+stride];
}
__global__ void scanBlkKernel(uint32_t * in, int n, uint32_t * out, uint32_t * blkSums)
{
extern __shared__ uint32_t value[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
value[threadIdx.x] = in[i];
}
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
int tmp;
if (threadIdx.x < n - stride)
tmp = value[threadIdx.x-stride];
else
tmp = 0;
__syncthreads();
value[threadIdx.x] += tmp;
}
blkSums[blockIdx.x] = value[blockDim.x - 1];
__syncthreads();
if (i<n) {
if(threadIdx.x==0)
{
out[i]=0;
}
else
out[i]=value[threadIdx.x-1];
}
}
__global__ void addSumScan(uint32_t * out, int n, uint32_t * blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
{
out[i] = out[i] + blkSums[blockIdx.x - 1];
}
}
__global__ void radixSort1bit(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, uint32_t* starts)
{ int i = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ uint32_t value[];
__shared__ uint32_t start[256];
for(int indexbit=0;indexbit<nBits;indexbit++)
{
if (i < n)
{
value[threadIdx.x] = ((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
__syncthreads();
for(int stride=1;stride<blockDim.x;stride*=2)
{
int temp=0;
if(threadIdx.x>=stride)
{
temp=value[threadIdx.x-stride];// lấy phần tử trước đó stride bước
}
__syncthreads();// chắc chắn giá trị năm trước stride bước đã được lấy vào bộ nhớ thanh ghi
if(threadIdx.x>=stride )
{
value[threadIdx.x]+=temp;
}
__syncthreads();// chắc chắn các giá trị đã được cộng xong
}
int nZeros=0;
if(blockIdx.x*blockDim.x+blockDim.x<=n)
nZeros = blockDim.x - value[blockDim.x-2] -((((in[blockIdx.x*blockDim.x+blockDim.x-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
{
if(n%blockDim.x>=2)
nZeros = n%blockDim.x - value[n%blockDim.x-2] - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
nZeros = n%blockDim.x - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
if (i<n)
{
if(threadIdx.x==0)
{
if (((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i]=in[i];
}
else
out[nZeros+blockIdx.x*blockDim.x]=in[i];
}
else
{
if(((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i-value[threadIdx.x-1]]=in[i];
}
else
{
out[nZeros+value[threadIdx.x-1]+blockIdx.x*blockDim.x]=in[i];
}
}
}
__syncthreads();
uint32_t *tmp=in;
in=out;
out=tmp;
}
if (i<n)
{
if(threadIdx.x==0)
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
}
else
{
if(((in[i] >> bit) & (nBins - 1))!=((in[i-1] >> bit) & (nBins - 1)))
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
starts[blockIdx.x*nBins+((in[i] >> bit) & (nBins - 1))]=start[((in[i] >> bit) & (nBins - 1))];
}
}
}
}
__global__ void scatter(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, uint32_t* start, uint32_t* histScan)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
int rank=histScan[bin*gridDim.x+blockIdx.x]+threadIdx.x-start[nBins*blockIdx.x+bin];
out[rank]=in[i];
}
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int blockSizes)
{
int nBins = 1 << nBits; // số bin
int m = (n - 1) / blockSizes + 1;// gridSize
dim3 blockSize(blockSizes);
dim3 blockSizeScan(blockSizes);
dim3 gridSize((n - 1) / blockSize.x + 1);
dim3 gridSizeScan((nBins*m - 1) / blockSizeScan.x + 1);
// cấp phát
// scan
uint32_t *d_scan, *d_blkSums, *d_histScan, *d_blkOuts, *d_starts;
uint32_t *histScan = (uint32_t *)malloc(m*nBins * sizeof(uint32_t));
uint32_t *blkSums = (uint32_t *)malloc(m*nBins*sizeof(uint32_t));
uint32_t* starts1D=(uint32_t *) malloc(m*nBins*sizeof(uint32_t));
CHECK(cudaMalloc(&d_scan, nBins*m * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_blkSums,gridSizeScan.x*sizeof(uint32_t)));
CHECK(cudaMalloc(&d_blkOuts,m*nBins*sizeof(uint32_t)));
CHECK(cudaMalloc(&d_starts,m*nBins*sizeof(uint32_t)));
CHECK(cudaMalloc(&d_histScan,m*nBins*sizeof(uint32_t)));
// chỉ số bắt đầu
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * d_in,*d_out, *d_tmp;
CHECK(cudaMalloc(&d_in,n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_out,n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_tmp,n * sizeof(uint32_t)));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
size_t bytes = gridSizeScan.x * sizeof(uint32_t);
uint32_t * in_tmp = (uint32_t *)malloc(bytes);
uint32_t * out_tmp = (uint32_t*)malloc(bytes);
CHECK(cudaMemcpy(d_in, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuTimer timerTmp1,timerTmp2,timerTmp3,timerTmp4,timerTmp5;
float time1,time2,time3,time4,time5;
time1=time2=time3=time4=time5=0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// Tính local hist bỏ vào d_scan
timerTmp1.Start();
computeLocalHist<<<gridSize, blockSize, blockSizes*sizeof(uint32_t)>>>(d_in, n, d_scan, nBins,bit);
timerTmp1.Stop();
time1 = time1 + timerTmp1.Elapsed();
timerTmp2.Start();
// // Tính exclusive scan bỏ vào d_histscan
scanBlkKernel<<<gridSizeScan,blockSizeScan,blockSizes*sizeof(uint32_t)>>>(d_scan,m*nBins,d_histScan,d_blkSums);
CHECK(cudaMemcpy(in_tmp, d_blkSums, gridSizeScan.x * sizeof(uint32_t), cudaMemcpyDeviceToHost));
out_tmp[0] = in_tmp[0];
for (int i = 1; i < gridSizeScan.x; i++)
{
out_tmp[i] = out_tmp[i - 1] + in_tmp[i];
}
CHECK(cudaMemcpy(d_blkOuts, out_tmp, gridSizeScan.x * sizeof(uint32_t), cudaMemcpyHostToDevice));
addSumScan<<<gridSizeScan,blockSizeScan>>>(d_histScan, n, d_blkOuts);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
timerTmp2.Stop();
time2 = time2 + timerTmp2.Elapsed();
timerTmp3.Start();
// Radix Sort 1 bit
radixSort1bit<<<gridSize,blockSize,blockSizes*sizeof(uint32_t)>>>(d_in,n,d_out,nBits,bit,nBins, d_starts);
timerTmp3.Stop();
time3 = time3 + timerTmp3.Elapsed();
timerTmp5.Start();
// Scatter
scatter<<<gridSize,blockSize,blockSizes*sizeof(uint32_t)>>>(d_in,n,d_out,nBits,bit,nBins,d_starts,d_histScan);
timerTmp5.Stop();
time5 = time5 + timerTmp5.Elapsed();
d_tmp = d_in;
d_in = d_out;
d_out = d_tmp;
}
printf("Time (local hist): %.3f ms\n", time1);
printf("Time (exclusive scan): %.3f ms\n", time2);
printf("Time (local sort) + Time (start value): %.3f ms\n", time3);
printf("Time (scatter): %.3f ms\n", time5);
CHECK(cudaMemcpy(src, d_in, n * sizeof(uint32_t), cudaMemcpyDeviceToHost));
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
cudaFree(d_scan);
cudaFree(d_blkSums);
cudaFree(d_histScan);
cudaFree(d_in);
cudaFree(d_out);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int blockSizes=512)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by Thrust\n");
sortByThrust(in, n, out);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
// printf("%i %i",out[i+100] , correctOut[i+100]);
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < 100; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 1000000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
//printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes=512; // One for histogram, one for scan
if (argc == 3)
{
blockSizes = atoi(argv[2]);
}
printf("\nblock size: %d", blockSizes);
// SORT BY HOST
sort(in, n, correctOut, nBits);
// printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
// printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
0abb7c2765b9c5edaddf237837421c5d720805fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "weak_cc_hip.cuh"
#include "scc_matrix.cuh"
#include <thrust/sequence.h>
#include "utilities/graph_utils.cuh"
#include "utilities/error_utils.h"
#include <cugraph.h>
#include <iostream>
#include <type_traits>
#include <cstdint>
#include "topology/topology.cuh"
namespace cugraph {
namespace detail {
//#define _DEBUG_WEAK_CC
//
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @tparam IndexT the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param graph input graph; assumed undirected for weakly CC [in]
* @param table of 2 gdf_columns: output labels and vertex indices [out]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param stream the cuda stream [in]
*/
template<typename IndexT,
int TPB_X = 32>
std::enable_if_t<std::is_signed<IndexT>::value>
connected_components_impl(Graph *graph,
cudf::table *table,
cugraph_cc_t connectivity_type,
hipStream_t stream)
{
using ByteT = unsigned char;//minimum addressable unit
static auto row_offsets_ = [](const Graph* G){
return static_cast<const IndexT*>(G->adjList->offsets->data);
};
static auto col_indices_ = [](const Graph* G){
return static_cast<const IndexT*>(G->adjList->indices->data);
};
static auto nrows_ = [](const Graph* G){
return G->adjList->offsets->size - 1;
};
static auto nnz_ = [](const Graph* G){
return G->adjList->indices->size;
};
gdf_column* labels = table->get_column(0);
gdf_column* verts = table->get_column(1);
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(row_offsets_(graph) != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(col_indices_(graph) != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(labels->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(verts->data != nullptr, "Invalid API parameter");
auto type_id = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS( type_id == GDF_INT32 || type_id == GDF_INT64, "Unsupported data type");
CUGRAPH_EXPECTS( type_id == graph->adjList->indices->dtype, "Unsupported data type");
//TODO: relax this requirement:
//
CUGRAPH_EXPECTS( type_id == labels->dtype, "Unsupported data type");
IndexT* p_d_labels = static_cast<IndexT*>(labels->data);
IndexT* p_d_verts = static_cast<IndexT*>(verts->data);
const IndexT* p_d_row_offsets = row_offsets_(graph);
const IndexT* p_d_col_ind = col_indices_(graph);
IndexT nnz = nnz_(graph);
IndexT nrows = nrows_(graph);//static_cast<IndexT>(graph->adjList->offsets->size) - 1;
if( connectivity_type == CUGRAPH_WEAK )
{
// using VectorT = thrust::device_vector<IndexT>;
// VectorT d_ro(p_d_row_offsets, p_d_row_offsets + nrows + 1);
// VectorT d_ci(p_d_col_ind, p_d_col_ind + nnz);
#ifdef _DEBUG_WEAK_CC
IndexT last_elem{0};
hipMemcpy((void*)(&last_elem), p_d_row_offsets+nrows, sizeof(IndexT), hipMemcpyDeviceToHost);
std::cout<<"############## "
<<"nrows = "<<nrows
<<"; nnz = "<<nnz
<<"; nnz_ro = "<<last_elem
<<"; p_d_labels valid: "<<(p_d_labels != nullptr)
<<"; p_d_row_offsets valid: "<<(p_d_row_offsets != nullptr)
<<"; p_d_col_ind valid: " << (p_d_col_ind != nullptr)
<<"\n";
std::cout<<"############## d_ro:\n";
print_v(d_ro, std::cout);
std::cout<<"############## d_ci:\n";
print_v(d_ci, std::cout);
#endif
//check if graph is undirected; return w/ error, if not?
//Yes, for now; in the future we may remove this constraint;
//
bool is_symmetric = cugraph::detail::check_symmetry(nrows, p_d_row_offsets, nnz, p_d_col_ind);
#ifdef _DEBUG_WEAK_CC
std::cout<<"############## "
<<"; adj. matrix symmetric? " << is_symmetric
<<"\n";
#endif
CUGRAPH_EXPECTS( is_symmetric, "Invalid API parameter");
MLCommon::Sparse::weak_cc_entry<IndexT, TPB_X>(p_d_labels,
p_d_row_offsets,
p_d_col_ind,
nnz,
nrows,
stream);
}
else
{
//device memory requirements: 2n^2 + 2n x sizeof(IndexT) + 1 (for flag)
//( n = |V|)
//
size_t n2 = 2*nrows;
n2 = n2*(nrows*sizeof(ByteT) + sizeof(IndexT)) + 1;
int device;
hipDeviceProp_t prop;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
if( n2 > prop.totalGlobalMem )
{
CUGRAPH_FAIL("ERROR: Insufficient device memory for SCC");
}
SCC_Data<ByteT, IndexT> sccd(nrows, p_d_row_offsets, p_d_col_ind);
sccd.run_scc(p_d_labels);
}
//fill the vertex indices column:
//
thrust::sequence(thrust::device, p_d_verts, p_d_verts + nrows);
}
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @param graph input graph; assumed undirected for weakly CC [in]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param table of 2 gdf_columns: output labels and vertex indices [out]
*/
}
void connected_components(Graph *graph,
cugraph_cc_t connectivity_type,
cudf::table *table)
{
hipStream_t stream{nullptr};
CUGRAPH_EXPECTS(table != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(table->num_columns() > 1, "Invalid API parameter");
gdf_column* labels = table->get_column(0);
gdf_column* verts = table->get_column(1);
CUGRAPH_EXPECTS(labels != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(verts != nullptr, "Invalid API parameter");
auto dtype = labels->dtype;
CUGRAPH_EXPECTS( dtype == verts->dtype, "Invalid API parameter");
switch( dtype )//currently graph's row offsets, col_indices and labels are same type; that may change in the future
{
case GDF_INT32:
return detail::connected_components_impl<int32_t>(graph, table, connectivity_type, stream);
// case GDF_INT64:
//return gdf_connected_components_impl<int64_t>(graph, labels, connectivity_type, stream);
// PROBLEM: relies on atomicMin(), which won't work w/ int64_t
// should work with `unsigned long long` but using signed `Type`'s
//(initialized to `-1`)
default:
break;//warning eater
}
CUGRAPH_FAIL("Unsupported data type");
}
} //namespace
|
0abb7c2765b9c5edaddf237837421c5d720805fe.cu
|
#include "weak_cc.cuh"
#include "scc_matrix.cuh"
#include <thrust/sequence.h>
#include "utilities/graph_utils.cuh"
#include "utilities/error_utils.h"
#include <cugraph.h>
#include <iostream>
#include <type_traits>
#include <cstdint>
#include "topology/topology.cuh"
namespace cugraph {
namespace detail {
//#define _DEBUG_WEAK_CC
//
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @tparam IndexT the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param graph input graph; assumed undirected for weakly CC [in]
* @param table of 2 gdf_columns: output labels and vertex indices [out]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param stream the cuda stream [in]
*/
template<typename IndexT,
int TPB_X = 32>
std::enable_if_t<std::is_signed<IndexT>::value>
connected_components_impl(Graph *graph,
cudf::table *table,
cugraph_cc_t connectivity_type,
cudaStream_t stream)
{
using ByteT = unsigned char;//minimum addressable unit
static auto row_offsets_ = [](const Graph* G){
return static_cast<const IndexT*>(G->adjList->offsets->data);
};
static auto col_indices_ = [](const Graph* G){
return static_cast<const IndexT*>(G->adjList->indices->data);
};
static auto nrows_ = [](const Graph* G){
return G->adjList->offsets->size - 1;
};
static auto nnz_ = [](const Graph* G){
return G->adjList->indices->size;
};
gdf_column* labels = table->get_column(0);
gdf_column* verts = table->get_column(1);
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(row_offsets_(graph) != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(col_indices_(graph) != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(labels->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(verts->data != nullptr, "Invalid API parameter");
auto type_id = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS( type_id == GDF_INT32 || type_id == GDF_INT64, "Unsupported data type");
CUGRAPH_EXPECTS( type_id == graph->adjList->indices->dtype, "Unsupported data type");
//TODO: relax this requirement:
//
CUGRAPH_EXPECTS( type_id == labels->dtype, "Unsupported data type");
IndexT* p_d_labels = static_cast<IndexT*>(labels->data);
IndexT* p_d_verts = static_cast<IndexT*>(verts->data);
const IndexT* p_d_row_offsets = row_offsets_(graph);
const IndexT* p_d_col_ind = col_indices_(graph);
IndexT nnz = nnz_(graph);
IndexT nrows = nrows_(graph);//static_cast<IndexT>(graph->adjList->offsets->size) - 1;
if( connectivity_type == CUGRAPH_WEAK )
{
// using VectorT = thrust::device_vector<IndexT>;
// VectorT d_ro(p_d_row_offsets, p_d_row_offsets + nrows + 1);
// VectorT d_ci(p_d_col_ind, p_d_col_ind + nnz);
#ifdef _DEBUG_WEAK_CC
IndexT last_elem{0};
cudaMemcpy((void*)(&last_elem), p_d_row_offsets+nrows, sizeof(IndexT), cudaMemcpyDeviceToHost);
std::cout<<"############## "
<<"nrows = "<<nrows
<<"; nnz = "<<nnz
<<"; nnz_ro = "<<last_elem
<<"; p_d_labels valid: "<<(p_d_labels != nullptr)
<<"; p_d_row_offsets valid: "<<(p_d_row_offsets != nullptr)
<<"; p_d_col_ind valid: " << (p_d_col_ind != nullptr)
<<"\n";
std::cout<<"############## d_ro:\n";
print_v(d_ro, std::cout);
std::cout<<"############## d_ci:\n";
print_v(d_ci, std::cout);
#endif
//check if graph is undirected; return w/ error, if not?
//Yes, for now; in the future we may remove this constraint;
//
bool is_symmetric = cugraph::detail::check_symmetry(nrows, p_d_row_offsets, nnz, p_d_col_ind);
#ifdef _DEBUG_WEAK_CC
std::cout<<"############## "
<<"; adj. matrix symmetric? " << is_symmetric
<<"\n";
#endif
CUGRAPH_EXPECTS( is_symmetric, "Invalid API parameter");
MLCommon::Sparse::weak_cc_entry<IndexT, TPB_X>(p_d_labels,
p_d_row_offsets,
p_d_col_ind,
nnz,
nrows,
stream);
}
else
{
//device memory requirements: 2n^2 + 2n x sizeof(IndexT) + 1 (for flag)
//( n = |V|)
//
size_t n2 = 2*nrows;
n2 = n2*(nrows*sizeof(ByteT) + sizeof(IndexT)) + 1;
int device;
cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
if( n2 > prop.totalGlobalMem )
{
CUGRAPH_FAIL("ERROR: Insufficient device memory for SCC");
}
SCC_Data<ByteT, IndexT> sccd(nrows, p_d_row_offsets, p_d_col_ind);
sccd.run_scc(p_d_labels);
}
//fill the vertex indices column:
//
thrust::sequence(thrust::device, p_d_verts, p_d_verts + nrows);
}
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @param graph input graph; assumed undirected for weakly CC [in]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param table of 2 gdf_columns: output labels and vertex indices [out]
*/
}
void connected_components(Graph *graph,
cugraph_cc_t connectivity_type,
cudf::table *table)
{
cudaStream_t stream{nullptr};
CUGRAPH_EXPECTS(table != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(table->num_columns() > 1, "Invalid API parameter");
gdf_column* labels = table->get_column(0);
gdf_column* verts = table->get_column(1);
CUGRAPH_EXPECTS(labels != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(verts != nullptr, "Invalid API parameter");
auto dtype = labels->dtype;
CUGRAPH_EXPECTS( dtype == verts->dtype, "Invalid API parameter");
switch( dtype )//currently graph's row offsets, col_indices and labels are same type; that may change in the future
{
case GDF_INT32:
return detail::connected_components_impl<int32_t>(graph, table, connectivity_type, stream);
// case GDF_INT64:
//return gdf_connected_components_impl<int64_t>(graph, labels, connectivity_type, stream);
// PROBLEM: relies on atomicMin(), which won't work w/ int64_t
// should work with `unsigned long long` but using signed `Type`'s
//(initialized to `-1`)
default:
break;//warning eater
}
CUGRAPH_FAIL("Unsupported data type");
}
} //namespace
|
c0bda63f18ac956d0bcdc9779c18a1deca90f744.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaWorker.h>
#include <math.h>
#include <processor.h>
#include <iostream>
#include <string>
#include <vector>
__global__ void calculate(int row, char* data, int queryLength, char* query,
int* result) {
int move[8] = {0, 1, 1, 0, 1, 1, -1, -1};
int col = row;
int threadCount = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int segment = row / threadCount;
int i = segment * tid;
int j = 0;
int iMax = segment * (tid + 1);
if (tid == (threadCount - 1)) {
iMax = row;
}
bool found = false;
while (!found && i < iMax) {
j = 0;
while (!found && j < col) {
// If first letter matched
if (query[0] == data[i * row + j]) {
// For all direction
for (int dir = 0; dir < 4; dir++) {
direction currDir = static_cast<direction>(dir);
int k = 0;
while (!found && k < queryLength) {
int tempRow = i + move[currDir * 2] * k;
int tempCol = j + move[currDir * 2 + 1] * k;
if (tempRow >= row || tempCol >= col || tempRow < 0 ||
tempCol < 0) {
break;
}
if (data[tempRow * row + tempCol] != query[k]) {
break;
}
if (k == queryLength - 1) {
found = true;
result[0] = true;
result[1] = i + 1;
result[2] = j + 1;
result[3] = currDir;
}
k++;
}
}
}
j++;
}
i++;
}
}
void docuda(char* cpuData, int row, std::string cpuQuery, int threadCount,
int* cpuResult) {
char* serialData;
char* query;
int* result;
hipMallocManaged(&serialData, row * row * sizeof(char));
hipMallocManaged(&result, 4 * sizeof(int));
hipMallocManaged(&query, cpuQuery.size() * sizeof(char));
for (int i = 0; i < cpuQuery.size(); i++) {
query[i] = cpuQuery[i];
}
for (int i = 0; i < row * row; i++) {
serialData[i] = cpuData[i];
}
hipDeviceProp_t props;
hipGetDeviceProperties(&props, 0);
std::cout << "Device info" << std::endl;
std::cout << props.name << ": " << props.major << "." << props.minor
<< std::endl;
std::cout << " Warp size : " << props.warpSize << std::endl;
std::cout << " Threads per block : " << props.maxThreadsPerBlock
<< std::endl;
std::cout << " SM (Processor) count : " << props.multiProcessorCount
<< std::endl;
int threadPerBlock = 128;
std::cout << "Thread per block (recommended 128/256): ";
std::cin >> threadPerBlock;
int blockCount = threadCount / threadPerBlock;
std::cout << "Using " << blockCount << " blocks" << std::endl;
hipLaunchKernelGGL(( calculate), dim3(blockCount), dim3(threadPerBlock), 0, 0, row, serialData, cpuQuery.size(),
query, result);
hipDeviceSynchronize();
for (int i = 0; i < 4; i++) {
// std::cout << result[i] << std::endl;
cpuResult[i] = result[i];
}
hipFree(serialData);
hipFree(query);
hipFree(result);
}
|
c0bda63f18ac956d0bcdc9779c18a1deca90f744.cu
|
#include <cudaWorker.h>
#include <math.h>
#include <processor.h>
#include <iostream>
#include <string>
#include <vector>
__global__ void calculate(int row, char* data, int queryLength, char* query,
int* result) {
int move[8] = {0, 1, 1, 0, 1, 1, -1, -1};
int col = row;
int threadCount = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int segment = row / threadCount;
int i = segment * tid;
int j = 0;
int iMax = segment * (tid + 1);
if (tid == (threadCount - 1)) {
iMax = row;
}
bool found = false;
while (!found && i < iMax) {
j = 0;
while (!found && j < col) {
// If first letter matched
if (query[0] == data[i * row + j]) {
// For all direction
for (int dir = 0; dir < 4; dir++) {
direction currDir = static_cast<direction>(dir);
int k = 0;
while (!found && k < queryLength) {
int tempRow = i + move[currDir * 2] * k;
int tempCol = j + move[currDir * 2 + 1] * k;
if (tempRow >= row || tempCol >= col || tempRow < 0 ||
tempCol < 0) {
break;
}
if (data[tempRow * row + tempCol] != query[k]) {
break;
}
if (k == queryLength - 1) {
found = true;
result[0] = true;
result[1] = i + 1;
result[2] = j + 1;
result[3] = currDir;
}
k++;
}
}
}
j++;
}
i++;
}
}
void docuda(char* cpuData, int row, std::string cpuQuery, int threadCount,
int* cpuResult) {
char* serialData;
char* query;
int* result;
cudaMallocManaged(&serialData, row * row * sizeof(char));
cudaMallocManaged(&result, 4 * sizeof(int));
cudaMallocManaged(&query, cpuQuery.size() * sizeof(char));
for (int i = 0; i < cpuQuery.size(); i++) {
query[i] = cpuQuery[i];
}
for (int i = 0; i < row * row; i++) {
serialData[i] = cpuData[i];
}
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
std::cout << "Device info" << std::endl;
std::cout << props.name << ": " << props.major << "." << props.minor
<< std::endl;
std::cout << " Warp size : " << props.warpSize << std::endl;
std::cout << " Threads per block : " << props.maxThreadsPerBlock
<< std::endl;
std::cout << " SM (Processor) count : " << props.multiProcessorCount
<< std::endl;
int threadPerBlock = 128;
std::cout << "Thread per block (recommended 128/256): ";
std::cin >> threadPerBlock;
int blockCount = threadCount / threadPerBlock;
std::cout << "Using " << blockCount << " blocks" << std::endl;
calculate<<<blockCount, threadPerBlock>>>(row, serialData, cpuQuery.size(),
query, result);
cudaDeviceSynchronize();
for (int i = 0; i < 4; i++) {
// std::cout << result[i] << std::endl;
cpuResult[i] = result[i];
}
cudaFree(serialData);
cudaFree(query);
cudaFree(result);
}
|
636dcd1f8a78064ae89efdf5b8c129108c9655f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
636dcd1f8a78064ae89efdf5b8c129108c9655f3.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
cd86590311cb37e2e5075de2be5e74f2b68325b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "fmha.h"
#include "fmha_block_fprop_kernel_1xN.h"
template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Return_softmax>
__global__ void fmha_block_fprop_fp16_sm80_loop_kernel(FMHA_fprop_params params) {
fmha::device_block_1xN_loop<Kernel_traits, Is_dropout, Is_causal, Return_softmax>(params);
}
template<typename Kernel_traits>
void run_fmha_block_fp16_sm80_loop_(Launch_params<FMHA_fprop_params> &launch_params,
const bool configure) {
bool is_causal = launch_params.params.is_causal;
// TD [2022-04-27]: This case work is pretty ugly, maybe there's a better way?
auto kernel = launch_params.is_dropout
? (is_causal
? (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, true, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, true, false>)
: (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, false, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, false, false>))
: (is_causal
? (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, true, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, true, false>)
: (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, false, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, false, false>));
constexpr int blocksize_c = Kernel_traits::Cta_tile_p::N;
const int loop_steps = (launch_params.params.seqlen_k + blocksize_c - 1) / blocksize_c;
constexpr int smem_size_softmax_lse = Kernel_traits::Smem_dp_sum::BYTES_PER_TILE;
// Don't need smem_size_softmax_lse if we're not looping
const int smem_size = fmha::get_dynamic_smem_size<Kernel_traits>()
+ (loop_steps > 1 ? smem_size_softmax_lse : 0);
if( smem_size >= 48 * 1024 ) {
FMHA_CHECK_CUDA(hipFuncSetAttribute(kernel, hipFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
if (configure) {
using Mma_tile_p = fmha::Hmma_tile<typename Kernel_traits::Cta_tile_p>;
constexpr int M = Kernel_traits::Cta_tile_p::M;
size_t STEPS = (launch_params.params.seqlen_q + M - 1) / M;
constexpr size_t MMAS_M = Mma_tile_p::MMAS_M;
constexpr size_t MMAS_N = Mma_tile_p::MMAS_N;
size_t elts_per_head = STEPS * MMAS_M * MMAS_N * 8 * loop_steps;
launch_params.elts_per_thread = elts_per_head;
return;
}
dim3 grid(launch_params.params.b, launch_params.params.h);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(Kernel_traits::THREADS), smem_size, launch_params.stream,
launch_params.params);
FMHA_CHECK_CUDA(hipPeekAtLastError());
}
void run_fmha_block_fp16_sm80(Launch_params<FMHA_fprop_params> &launch_params,
const bool configure) {
if (launch_params.params.d == 16) {
using Kernel_traits = FMHA_kernel_traits<256, 16, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
} else if (launch_params.params.d == 32) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
} else if (launch_params.params.d == 64) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
}
}
|
cd86590311cb37e2e5075de2be5e74f2b68325b1.cu
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "fmha.h"
#include "fmha_block_fprop_kernel_1xN.h"
template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Return_softmax>
__global__ void fmha_block_fprop_fp16_sm80_loop_kernel(FMHA_fprop_params params) {
fmha::device_block_1xN_loop<Kernel_traits, Is_dropout, Is_causal, Return_softmax>(params);
}
template<typename Kernel_traits>
void run_fmha_block_fp16_sm80_loop_(Launch_params<FMHA_fprop_params> &launch_params,
const bool configure) {
bool is_causal = launch_params.params.is_causal;
// TD [2022-04-27]: This case work is pretty ugly, maybe there's a better way?
auto kernel = launch_params.is_dropout
? (is_causal
? (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, true, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, true, false>)
: (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, false, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, true, false, false>))
: (is_causal
? (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, true, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, true, false>)
: (launch_params.return_softmax ? &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, false, true> : &fmha_block_fprop_fp16_sm80_loop_kernel<Kernel_traits, false, false, false>));
constexpr int blocksize_c = Kernel_traits::Cta_tile_p::N;
const int loop_steps = (launch_params.params.seqlen_k + blocksize_c - 1) / blocksize_c;
constexpr int smem_size_softmax_lse = Kernel_traits::Smem_dp_sum::BYTES_PER_TILE;
// Don't need smem_size_softmax_lse if we're not looping
const int smem_size = fmha::get_dynamic_smem_size<Kernel_traits>()
+ (loop_steps > 1 ? smem_size_softmax_lse : 0);
if( smem_size >= 48 * 1024 ) {
FMHA_CHECK_CUDA(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
if (configure) {
using Mma_tile_p = fmha::Hmma_tile<typename Kernel_traits::Cta_tile_p>;
constexpr int M = Kernel_traits::Cta_tile_p::M;
size_t STEPS = (launch_params.params.seqlen_q + M - 1) / M;
constexpr size_t MMAS_M = Mma_tile_p::MMAS_M;
constexpr size_t MMAS_N = Mma_tile_p::MMAS_N;
size_t elts_per_head = STEPS * MMAS_M * MMAS_N * 8 * loop_steps;
launch_params.elts_per_thread = elts_per_head;
return;
}
dim3 grid(launch_params.params.b, launch_params.params.h);
kernel<<<grid, Kernel_traits::THREADS, smem_size, launch_params.stream>>>(
launch_params.params);
FMHA_CHECK_CUDA(cudaPeekAtLastError());
}
void run_fmha_block_fp16_sm80(Launch_params<FMHA_fprop_params> &launch_params,
const bool configure) {
if (launch_params.params.d == 16) {
using Kernel_traits = FMHA_kernel_traits<256, 16, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
} else if (launch_params.params.d == 32) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
} else if (launch_params.params.d == 64) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u>;
run_fmha_block_fp16_sm80_loop_<Kernel_traits>(launch_params, configure);
}
}
|
31c31fad5523e38287e00eb8777abbcd000da20d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.hpp>
#include <layers/slice_layer.hpp>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <size_t length, typename T>
__device__ int array_length(T (&)[length]) {
return length;
}
template <typename T, typename... Args>
__global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w,
const Args... args) {
const typename SliceLayer<T>::OutParam out_params[] = {args...};
const int n_outs = array_length(out_params);
for (int row = blockIdx.x; row < h; row += gridDim.x) {
for (int k = 0; k < n_outs; k++) {
int st = out_params[k].st;
int ed = out_params[k].ed;
int out_w = ed - st;
for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) {
int in_col = out_col + st;
int in_idx = row * in_w + in_col;
int out_idx = row * out_w + out_col;
T* out = out_params[k].out;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] += out[out_idx];
}
}
__syncthreads();
}
}
}
} // anonymous namespace
template <typename T>
SliceLayer<T>::SliceLayer(const Tensor2<T>& in_tensor, Tensors2<T>& out_tensors,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
std::vector<std::pair<int, int>>& ranges,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), virt_w_(0) {
try {
if (ranges.empty()) {
CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed");
}
if (!out_tensors.empty()) {
CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty");
}
auto in_dims = in_tensor.get_dimensions();
if (in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated");
}
size_t height = in_dims[0];
int in_w = in_dims[1];
int prev_min = -1;
int prev_max = 0;
for (auto& range : ranges) {
int cur_min = range.first;
int cur_max = range.second;
if (cur_min >= cur_max) {
CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed");
}
if (cur_min < 0 || cur_max < 0) {
CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed");
}
if (!(prev_min <= cur_min && prev_max <= cur_max)) {
CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another");
}
if (cur_min >= in_w || cur_max > in_w) {
CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width");
}
size_t out_w = cur_max - cur_min;
std::vector<size_t> out_dims = {height, out_w};
{
Tensor2<T> tensor;
blobs_buff->reserve(out_dims, &tensor);
out_tensors.push_back(tensor);
}
sts_.push_back(cur_min);
virt_w_ += out_w;
prev_min = cur_min;
prev_max = cur_max;
}
in_tensors_.push_back(in_tensor);
for (auto& out_tensor : out_tensors) {
out_tensors_.push_back(out_tensor);
}
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
void SliceLayer<T>::fprop(bool is_train) {
prop_common(true, is_train, get_gpu().get_stream());
}
template <typename T>
void SliceLayer<T>::bprop() {
prop_common(false, true, get_gpu().get_stream());
}
template <typename T>
void SliceLayer<T>::prop_common(bool forward, bool is_train, hipStream_t stream) {
CudaDeviceContext context(get_device_id());
int n_out_tensors = out_tensors_.size();
if (n_out_tensors == 2) {
std::vector<OutParam> out_params = set_out_params(2);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1]);
} else if (n_out_tensors == 3) {
std::vector<OutParam> out_params = set_out_params(3);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2]);
} else if (n_out_tensors == 4) {
std::vector<OutParam> out_params = set_out_params(4);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2],
out_params[3]);
} else if (n_out_tensors == 5) {
std::vector<OutParam> out_params = set_out_params(5);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2],
out_params[3], out_params[4]);
} else {
CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported");
}
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) {
std::vector<OutParam> out_params;
for (int i = 0; i < n; i++) {
Tensor2<T>& out_tensor = out_tensors_[i];
T* out = out_tensor.get_ptr();
int st = sts_[i];
int w = out_tensor.get_dimensions()[1];
out_params.push_back({out, st, st + w});
}
return out_params;
}
template <typename T>
template <typename... Args>
void SliceLayer<T>::kernel_launch(bool forward, bool is_train, hipStream_t stream, Args&... args) {
int block_size = 512;
int n_blocks = get_gpu().get_sm_count() * 4;
Tensor2<T>& in_tensor = get_in_tensors(is_train)[0];
T* in = in_tensor.get_ptr();
int h = in_tensor.get_dimensions()[0];
int in_w = in_tensor.get_dimensions()[1];
if (!forward) {
hipLaunchKernelGGL(( initialize_array), dim3(n_blocks), dim3(block_size), 0, stream, in, h * in_w, T(0));
}
hipLaunchKernelGGL(( slice_kernel), dim3(n_blocks), dim3(block_size), 0, stream, forward, in, h, in_w, virt_w_, args...);
}
template class SliceLayer<float>;
template class SliceLayer<__half>;
} // namespace HugeCTR
|
31c31fad5523e38287e00eb8777abbcd000da20d.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.hpp>
#include <layers/slice_layer.hpp>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <size_t length, typename T>
__device__ int array_length(T (&)[length]) {
return length;
}
template <typename T, typename... Args>
__global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w,
const Args... args) {
const typename SliceLayer<T>::OutParam out_params[] = {args...};
const int n_outs = array_length(out_params);
for (int row = blockIdx.x; row < h; row += gridDim.x) {
for (int k = 0; k < n_outs; k++) {
int st = out_params[k].st;
int ed = out_params[k].ed;
int out_w = ed - st;
for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) {
int in_col = out_col + st;
int in_idx = row * in_w + in_col;
int out_idx = row * out_w + out_col;
T* out = out_params[k].out;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] += out[out_idx];
}
}
__syncthreads();
}
}
}
} // anonymous namespace
template <typename T>
SliceLayer<T>::SliceLayer(const Tensor2<T>& in_tensor, Tensors2<T>& out_tensors,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
std::vector<std::pair<int, int>>& ranges,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), virt_w_(0) {
try {
if (ranges.empty()) {
CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed");
}
if (!out_tensors.empty()) {
CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty");
}
auto in_dims = in_tensor.get_dimensions();
if (in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated");
}
size_t height = in_dims[0];
int in_w = in_dims[1];
int prev_min = -1;
int prev_max = 0;
for (auto& range : ranges) {
int cur_min = range.first;
int cur_max = range.second;
if (cur_min >= cur_max) {
CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed");
}
if (cur_min < 0 || cur_max < 0) {
CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed");
}
if (!(prev_min <= cur_min && prev_max <= cur_max)) {
CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another");
}
if (cur_min >= in_w || cur_max > in_w) {
CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width");
}
size_t out_w = cur_max - cur_min;
std::vector<size_t> out_dims = {height, out_w};
{
Tensor2<T> tensor;
blobs_buff->reserve(out_dims, &tensor);
out_tensors.push_back(tensor);
}
sts_.push_back(cur_min);
virt_w_ += out_w;
prev_min = cur_min;
prev_max = cur_max;
}
in_tensors_.push_back(in_tensor);
for (auto& out_tensor : out_tensors) {
out_tensors_.push_back(out_tensor);
}
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
void SliceLayer<T>::fprop(bool is_train) {
prop_common(true, is_train, get_gpu().get_stream());
}
template <typename T>
void SliceLayer<T>::bprop() {
prop_common(false, true, get_gpu().get_stream());
}
template <typename T>
void SliceLayer<T>::prop_common(bool forward, bool is_train, cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
int n_out_tensors = out_tensors_.size();
if (n_out_tensors == 2) {
std::vector<OutParam> out_params = set_out_params(2);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1]);
} else if (n_out_tensors == 3) {
std::vector<OutParam> out_params = set_out_params(3);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2]);
} else if (n_out_tensors == 4) {
std::vector<OutParam> out_params = set_out_params(4);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2],
out_params[3]);
} else if (n_out_tensors == 5) {
std::vector<OutParam> out_params = set_out_params(5);
kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2],
out_params[3], out_params[4]);
} else {
CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported");
}
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) {
std::vector<OutParam> out_params;
for (int i = 0; i < n; i++) {
Tensor2<T>& out_tensor = out_tensors_[i];
T* out = out_tensor.get_ptr();
int st = sts_[i];
int w = out_tensor.get_dimensions()[1];
out_params.push_back({out, st, st + w});
}
return out_params;
}
template <typename T>
template <typename... Args>
void SliceLayer<T>::kernel_launch(bool forward, bool is_train, cudaStream_t stream, Args&... args) {
int block_size = 512;
int n_blocks = get_gpu().get_sm_count() * 4;
Tensor2<T>& in_tensor = get_in_tensors(is_train)[0];
T* in = in_tensor.get_ptr();
int h = in_tensor.get_dimensions()[0];
int in_w = in_tensor.get_dimensions()[1];
if (!forward) {
initialize_array<<<n_blocks, block_size, 0, stream>>>(in, h * in_w, T(0));
}
slice_kernel<<<n_blocks, block_size, 0, stream>>>(forward, in, h, in_w, virt_w_, args...);
}
template class SliceLayer<float>;
template class SliceLayer<__half>;
} // namespace HugeCTR
|
31cb283b61eb12efd6ab78313786dcf376e6da26.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel(int *send) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
send[i] = 1;
}
#define N (2048*2048)
#define M 512
int main(int argc, char **argv) {
int mpisize, mpirank;
int size = N * sizeof(int);
int *send = (int *)malloc(size);
int *recv = (int *)malloc(size);
int *d_send, *d_recv;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
hipSetDevice(mpirank % mpisize);
hipMalloc((void **) &d_send, size);
hipMalloc((void **) &d_recv, size);
hipMemcpy(d_send, send, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GPU_Kernel), dim3(N/M),dim3(M), 0, 0, d_send);
hipMemcpy(send, d_send, size, hipMemcpyDeviceToHost);
int sendrank = (mpirank + 1) % mpisize;
int recvrank = (mpirank - 1 + mpisize) % mpisize;
MPI_Request reqs[2];
MPI_Status stats[2];
MPI_Isend(send, N, MPI_INT, sendrank, 0, MPI_COMM_WORLD, &reqs[0]);
MPI_Irecv(recv, N, MPI_INT, recvrank, 0, MPI_COMM_WORLD, &reqs[1]);
MPI_Waitall(2, reqs, stats);
int sum = 0;
for (int i=0; i<N; i++)
sum += recv[i];
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("rank%d: sum=%d, N=%d\n", mpirank, sum, N);
}
}
free(send); free(recv);
hipFree(d_send); hipFree(d_recv);
MPI_Finalize();
}
|
31cb283b61eb12efd6ab78313786dcf376e6da26.cu
|
#include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel(int *send) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
send[i] = 1;
}
#define N (2048*2048)
#define M 512
int main(int argc, char **argv) {
int mpisize, mpirank;
int size = N * sizeof(int);
int *send = (int *)malloc(size);
int *recv = (int *)malloc(size);
int *d_send, *d_recv;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
cudaSetDevice(mpirank % mpisize);
cudaMalloc((void **) &d_send, size);
cudaMalloc((void **) &d_recv, size);
cudaMemcpy(d_send, send, size, cudaMemcpyHostToDevice);
GPU_Kernel<<<N/M,M>>>(d_send);
cudaMemcpy(send, d_send, size, cudaMemcpyDeviceToHost);
int sendrank = (mpirank + 1) % mpisize;
int recvrank = (mpirank - 1 + mpisize) % mpisize;
MPI_Request reqs[2];
MPI_Status stats[2];
MPI_Isend(send, N, MPI_INT, sendrank, 0, MPI_COMM_WORLD, &reqs[0]);
MPI_Irecv(recv, N, MPI_INT, recvrank, 0, MPI_COMM_WORLD, &reqs[1]);
MPI_Waitall(2, reqs, stats);
int sum = 0;
for (int i=0; i<N; i++)
sum += recv[i];
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("rank%d: sum=%d, N=%d\n", mpirank, sum, N);
}
}
free(send); free(recv);
cudaFree(d_send); cudaFree(d_recv);
MPI_Finalize();
}
|
c884b76ab0efc4f1aedab4f32925da749340850f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* inference-101
*/
#include "cudaYUV.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
hipError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return hipErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
hipLaunchKernelGGL(( RGB_to_YV12<T, formatYV12>), dim3(grid), dim3(block), 0, 0, input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(hipGetLastError());
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
#if 0
__global__ void Gray_to_YV12(const GlobPtrSz<uint8_t> src, GlobPtr<uint8_t> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uint8_t> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uint8_t> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uint8_t> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uint8_t pix;
uint8_t y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
|
c884b76ab0efc4f1aedab4f32925da749340850f.cu
|
/*
* inference-101
*/
#include "cudaYUV.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
cudaError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return cudaErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
RGB_to_YV12<T, formatYV12><<<grid, block>>>(input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(cudaGetLastError());
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
#if 0
__global__ void Gray_to_YV12(const GlobPtrSz<uint8_t> src, GlobPtr<uint8_t> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uint8_t> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uint8_t> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uint8_t> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uint8_t pix;
uint8_t y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
|
5ce808e0157a7f4bdc70f2d67b2a284ff1024f14.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: Lin Yang, Alex Travesset
// Previous Maintainer: Morozov
#include "EAMForceGPU_hip.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file EAMForceGPU.cu
\brief Defines GPU kernel code for calculating the EAM forces. Used by EAMForceComputeGPU.
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading the neighbor list
texture<unsigned int, 1, hipReadModeElementType> nlist_tex;
//! Texture for reading potential
scalar4_tex_t tex_F;
scalar4_tex_t tex_rho;
scalar4_tex_t tex_rphi;
scalar4_tex_t tex_dF;
scalar4_tex_t tex_drho;
scalar4_tex_t tex_drphi;
//! Texture for dF/dP
scalar_tex_t tex_dFdP;
//! Storage space for EAM parameters on the GPU
__constant__ EAMTexInterData eam_data_ti;
//! Kernel for computing EAM forces on the GPU
template<unsigned char use_gmem_nlist>
__global__ void gpu_kernel_1(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N,
const Scalar4 *d_pos, BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const Scalar4 *d_F, const Scalar4 *d_rho, const Scalar4 *d_rphi,
const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi, Scalar *d_dFdP)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle.
Scalar4 postype = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// index and remainder
Scalar position;// look up position, scalar
unsigned int int_position;// look up index for position, integer
unsigned int idxs;// look up index in F, rho, rphi array, considering shift, integer
Scalar remainder;// look up remainder in array, integer
Scalar4 v, dv;// value, d(value)
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx);
}
int typei = __scalar_as_int(postype.w);
// loop over neighbors
Scalar atomElectronDensity = Scalar(0.0);
int ntypes = eam_data_ti.ntypes;
int nrho = eam_data_ti.nrho;
int nr = eam_data_ti.nr;
Scalar rdrho = eam_data_ti.rdrho;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx + neigh_idx + 1];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx + neigh_idx + 1);
}
// get the neighbor's position
Scalar4 neigh_postype = texFetchScalar4(d_pos, pdata_pos_tex, cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
;
if (rsq < r_cutsq)
{
// calculate position r for rho(r)
position = sqrtf(rsq) * rdr;
int_position = (unsigned int) position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate P = sum{rho}
idxs = int_position + nr * (typej * ntypes + typei);
v = texFetchScalar4(d_rho, tex_rho, idxs);
atomElectronDensity += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
}
}
// calculate position rho for F(rho)
position = atomElectronDensity * rdrho;
int_position = (unsigned int) position;
int_position = min(int_position, nrho - 1);
remainder = position - int_position;
idxs = int_position + typei * nrho;
dv = texFetchScalar4(d_dF, tex_dF, idxs);
v = texFetchScalar4(d_F, tex_F, idxs);
// compute dF / dP
d_dFdP[idx] = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// compute embedded energy F(P), sum up each particle
force.w += v.w + v.z * remainder + v.y * remainder * remainder + v.x * remainder * remainder * remainder;
// update the d_force
d_force[idx] = force;
}
//! Second stage kernel for computing EAM forces on the GPU
template<unsigned char use_gmem_nlist>
__global__ void gpu_kernel_2(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N,
const Scalar4 *d_pos, BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const Scalar4 *d_F, const Scalar4 *d_rho, const Scalar4 *d_rphi,
const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi, Scalar *d_dFdP)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads
Scalar4 postype = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
int typei = __scalar_as_int(postype.w);
// index and remainder
Scalar position;// look up position, scalar
unsigned int int_position;// look up index for position, integer
unsigned int idxs;// look up index in F, rho, rphi array, considering shift, integer
Scalar remainder;// look up remainder in array, integer
Scalar4 v, dv;// value, d(value)
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx);
}
//Scalar4 force = force_data.force[idx];
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
//force.w = force_data.force[idx].w;
Scalar fxi = Scalar(0.0);
Scalar fyi = Scalar(0.0);
Scalar fzi = Scalar(0.0);
Scalar m_pe = Scalar(0.0);
Scalar pairForce = Scalar(0.0);
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
force.w = d_force[idx].w;
int ntypes = eam_data_ti.ntypes;
int nr = eam_data_ti.nr;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
Scalar d_dFdPidx = texFetchScalar(d_dFdP, tex_dFdP, idx);
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
cur_neigh = next_neigh;
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx + neigh_idx + 1];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx + neigh_idx + 1);
}
// get the neighbor's position
Scalar4 neigh_postype = texFetchScalar4(d_pos, pdata_pos_tex, cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
if (rsq > r_cutsq)
continue;
// calculate position r for phi(r)
Scalar inverseR = rsqrtf(rsq);
Scalar r = Scalar(1.0) / inverseR;
position = r * rdr;
int_position = (unsigned int) position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate the shift position for type ij
int shift =
(typei >= typej) ?
(int) (0.5 * (2 * ntypes - typej - 1) * typej + typei) * nr :
(int) (0.5 * (2 * ntypes - typei - 1) * typei + typej) * nr;
idxs = int_position + shift;
v = texFetchScalar4(d_rphi, tex_rphi, idxs);
dv = texFetchScalar4(d_drphi, tex_drphi, idxs);
// aspair_potential = r * phi
Scalar aspair_potential = v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// derivative_pair_potential = phi + r * dphi / dr
Scalar derivative_pair_potential = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// pair_eng = phi
Scalar pair_eng = aspair_potential * inverseR;
// derivativePhi = (phi + r * dphi/dr - phi) * 1/r = dphi / dr
Scalar derivativePhi = (derivative_pair_potential - pair_eng) * inverseR;
// derivativeRhoI = drho / dr of i
idxs = int_position + typei * ntypes * nr + typej * nr;
dv = texFetchScalar4(d_drho, tex_drho, idxs);
Scalar derivativeRhoI = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// derivativeRhoJ = drho / dr of j
idxs = int_position + typej * ntypes * nr + typei * nr;
dv = texFetchScalar4(d_drho, tex_drho, idxs);
Scalar derivativeRhoJ = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// fullDerivativePhi = dF/dP * drho / dr for j + dF/dP * drho / dr for j + phi
Scalar d_dFdPcur = texFetchScalar(d_dFdP, tex_dFdP, cur_neigh);
Scalar fullDerivativePhi = d_dFdPidx * derivativeRhoJ + d_dFdPcur * derivativeRhoI + derivativePhi;
// compute forces
pairForce = -fullDerivativePhi * inverseR;
// avoid double counting
Scalar pairForceover2 = Scalar(0.5) * pairForce;
virial[0] += dx.x * dx.x * pairForceover2;
virial[1] += dx.x * dx.y * pairForceover2;
virial[2] += dx.x * dx.z * pairForceover2;
virial[3] += dx.y * dx.y * pairForceover2;
virial[4] += dx.y * dx.z * pairForceover2;
virial[5] += dx.z * dx.z * pairForceover2;
fxi += dx.x * pairForce;
fyi += dx.y * pairForce;
fzi += dx.z * pairForce;
m_pe += pair_eng * Scalar(0.5);
}
// now that the force calculation is complete, write out the result
force.x = fxi;
force.y = fyi;
force.z = fzi;
force.w += m_pe;
d_force[idx] = force;
for (int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] = virial[i];
}
//! compute forces on GPU
hipError_t gpu_compute_eam_tex_inter_forces(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch,
const unsigned int N, const Scalar4 *d_pos, const BoxDim &box, const unsigned int *d_n_neigh,
const unsigned int *d_nlist, const unsigned int *d_head_list, const unsigned int size_nlist,
const EAMTexInterData &eam_data, Scalar *d_dFdP, const Scalar4 *d_F, const Scalar4 *d_rho,
const Scalar4 *d_rphi, const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi,
const unsigned int compute_capability, const unsigned int max_tex1d_width)
{
hipError_t error;
// bind the texture
if (compute_capability < 350 && size_nlist <= max_tex1d_width)
{
nlist_tex.normalized = false;
nlist_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, nlist_tex, d_nlist, sizeof(unsigned int) * size_nlist);
if (error != hipSuccess)
return error;
}
if (compute_capability < 350)
{
tex_F.normalized = false;
tex_F.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_F, d_F, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes);
if (error != hipSuccess)
return error;
tex_dF.normalized = false;
tex_dF.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_dF, d_dF, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes);
if (error != hipSuccess)
return error;
tex_rho.normalized = false;
tex_rho.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_rho, d_rho, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes * eam_data.ntypes);
if (error != hipSuccess)
return error;
tex_drho.normalized = false;
tex_drho.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_drho, d_drho,
sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes * eam_data.ntypes);
if (error != hipSuccess)
return error;
tex_rphi.normalized = false;
tex_rphi.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_rphi, d_rphi,
sizeof(Scalar4) * (int) (0.5 * eam_data.nr * (eam_data.ntypes + 1) * eam_data.ntypes));
if (error != hipSuccess)
return error;
tex_drphi.normalized = false;
tex_drphi.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_drphi, d_drphi,
sizeof(Scalar4) * (int) (0.5 * eam_data.nr * (eam_data.ntypes + 1) * eam_data.ntypes));
if (error != hipSuccess)
return error;
}
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4) * N);
if (error != hipSuccess)
return error;
tex_dFdP.normalized = false;
tex_dFdP.filterMode = hipFilterModePoint;
error = hipBindTexture(0, tex_dFdP, d_dFdP, sizeof(Scalar) * N);
if (error != hipSuccess)
return error;
// run the kernel
hipMemcpyToSymbol(eam_data_ti, &eam_data, sizeof(EAMTexInterData));
if (compute_capability < 350 && size_nlist > max_tex1d_width)
{
static unsigned int max_block_size_1 = UINT_MAX;
static unsigned int max_block_size_2 = UINT_MAX;
hipFuncAttributes attr1;
hipFuncGetAttributes(&attr1, gpu_kernel_1<1>);
hipFuncAttributes attr2;
hipFuncGetAttributes(&attr2, gpu_kernel_2<1>);
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(eam_data.block_size, max_block_size_1);
unsigned int run_block_size_2 = min(eam_data.block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int) ceil((double) N / (double) run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int) ceil((double) N / (double) run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
hipLaunchKernelGGL(( gpu_kernel_1<1>) , dim3(grid_1), dim3(threads_1), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
hipLaunchKernelGGL(( gpu_kernel_2<1>) , dim3(grid_2), dim3(threads_2), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
}
else
{
static unsigned int max_block_size_1 = UINT_MAX;
static unsigned int max_block_size_2 = UINT_MAX;
hipFuncAttributes attr1;
hipFuncGetAttributes(&attr1, gpu_kernel_1<0>);
hipFuncAttributes attr2;
hipFuncGetAttributes(&attr2, gpu_kernel_2<0>);
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(eam_data.block_size, max_block_size_1);
unsigned int run_block_size_2 = min(eam_data.block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int) ceil((double) N / (double) run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int) ceil((double) N / (double) run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
hipLaunchKernelGGL(( gpu_kernel_1<0>) , dim3(grid_1), dim3(threads_1), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
hipLaunchKernelGGL(( gpu_kernel_2<0>) , dim3(grid_2), dim3(threads_2), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
}
return hipSuccess;
}
|
5ce808e0157a7f4bdc70f2d67b2a284ff1024f14.cu
|
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: Lin Yang, Alex Travesset
// Previous Maintainer: Morozov
#include "EAMForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file EAMForceGPU.cu
\brief Defines GPU kernel code for calculating the EAM forces. Used by EAMForceComputeGPU.
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading the neighbor list
texture<unsigned int, 1, cudaReadModeElementType> nlist_tex;
//! Texture for reading potential
scalar4_tex_t tex_F;
scalar4_tex_t tex_rho;
scalar4_tex_t tex_rphi;
scalar4_tex_t tex_dF;
scalar4_tex_t tex_drho;
scalar4_tex_t tex_drphi;
//! Texture for dF/dP
scalar_tex_t tex_dFdP;
//! Storage space for EAM parameters on the GPU
__constant__ EAMTexInterData eam_data_ti;
//! Kernel for computing EAM forces on the GPU
template<unsigned char use_gmem_nlist>
__global__ void gpu_kernel_1(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N,
const Scalar4 *d_pos, BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const Scalar4 *d_F, const Scalar4 *d_rho, const Scalar4 *d_rphi,
const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi, Scalar *d_dFdP)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle.
Scalar4 postype = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// index and remainder
Scalar position;// look up position, scalar
unsigned int int_position;// look up index for position, integer
unsigned int idxs;// look up index in F, rho, rphi array, considering shift, integer
Scalar remainder;// look up remainder in array, integer
Scalar4 v, dv;// value, d(value)
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx);
}
int typei = __scalar_as_int(postype.w);
// loop over neighbors
Scalar atomElectronDensity = Scalar(0.0);
int ntypes = eam_data_ti.ntypes;
int nrho = eam_data_ti.nrho;
int nr = eam_data_ti.nr;
Scalar rdrho = eam_data_ti.rdrho;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx + neigh_idx + 1];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx + neigh_idx + 1);
}
// get the neighbor's position
Scalar4 neigh_postype = texFetchScalar4(d_pos, pdata_pos_tex, cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
;
if (rsq < r_cutsq)
{
// calculate position r for rho(r)
position = sqrtf(rsq) * rdr;
int_position = (unsigned int) position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate P = sum{rho}
idxs = int_position + nr * (typej * ntypes + typei);
v = texFetchScalar4(d_rho, tex_rho, idxs);
atomElectronDensity += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
}
}
// calculate position rho for F(rho)
position = atomElectronDensity * rdrho;
int_position = (unsigned int) position;
int_position = min(int_position, nrho - 1);
remainder = position - int_position;
idxs = int_position + typei * nrho;
dv = texFetchScalar4(d_dF, tex_dF, idxs);
v = texFetchScalar4(d_F, tex_F, idxs);
// compute dF / dP
d_dFdP[idx] = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// compute embedded energy F(P), sum up each particle
force.w += v.w + v.z * remainder + v.y * remainder * remainder + v.x * remainder * remainder * remainder;
// update the d_force
d_force[idx] = force;
}
//! Second stage kernel for computing EAM forces on the GPU
template<unsigned char use_gmem_nlist>
__global__ void gpu_kernel_2(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N,
const Scalar4 *d_pos, BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const Scalar4 *d_F, const Scalar4 *d_rho, const Scalar4 *d_rphi,
const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi, Scalar *d_dFdP)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads
Scalar4 postype = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
int typei = __scalar_as_int(postype.w);
// index and remainder
Scalar position;// look up position, scalar
unsigned int int_position;// look up index for position, integer
unsigned int idxs;// look up index in F, rho, rphi array, considering shift, integer
Scalar remainder;// look up remainder in array, integer
Scalar4 v, dv;// value, d(value)
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx);
}
//Scalar4 force = force_data.force[idx];
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
//force.w = force_data.force[idx].w;
Scalar fxi = Scalar(0.0);
Scalar fyi = Scalar(0.0);
Scalar fzi = Scalar(0.0);
Scalar m_pe = Scalar(0.0);
Scalar pairForce = Scalar(0.0);
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
force.w = d_force[idx].w;
int ntypes = eam_data_ti.ntypes;
int nr = eam_data_ti.nr;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
Scalar d_dFdPidx = texFetchScalar(d_dFdP, tex_dFdP, idx);
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
cur_neigh = next_neigh;
if (use_gmem_nlist)
{
next_neigh = d_nlist[head_idx + neigh_idx + 1];
}
else
{
next_neigh = texFetchUint(d_nlist, nlist_tex, head_idx + neigh_idx + 1);
}
// get the neighbor's position
Scalar4 neigh_postype = texFetchScalar4(d_pos, pdata_pos_tex, cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
if (rsq > r_cutsq)
continue;
// calculate position r for phi(r)
Scalar inverseR = rsqrtf(rsq);
Scalar r = Scalar(1.0) / inverseR;
position = r * rdr;
int_position = (unsigned int) position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate the shift position for type ij
int shift =
(typei >= typej) ?
(int) (0.5 * (2 * ntypes - typej - 1) * typej + typei) * nr :
(int) (0.5 * (2 * ntypes - typei - 1) * typei + typej) * nr;
idxs = int_position + shift;
v = texFetchScalar4(d_rphi, tex_rphi, idxs);
dv = texFetchScalar4(d_drphi, tex_drphi, idxs);
// aspair_potential = r * phi
Scalar aspair_potential = v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// derivative_pair_potential = phi + r * dphi / dr
Scalar derivative_pair_potential = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// pair_eng = phi
Scalar pair_eng = aspair_potential * inverseR;
// derivativePhi = (phi + r * dphi/dr - phi) * 1/r = dphi / dr
Scalar derivativePhi = (derivative_pair_potential - pair_eng) * inverseR;
// derivativeRhoI = drho / dr of i
idxs = int_position + typei * ntypes * nr + typej * nr;
dv = texFetchScalar4(d_drho, tex_drho, idxs);
Scalar derivativeRhoI = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// derivativeRhoJ = drho / dr of j
idxs = int_position + typej * ntypes * nr + typei * nr;
dv = texFetchScalar4(d_drho, tex_drho, idxs);
Scalar derivativeRhoJ = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// fullDerivativePhi = dF/dP * drho / dr for j + dF/dP * drho / dr for j + phi
Scalar d_dFdPcur = texFetchScalar(d_dFdP, tex_dFdP, cur_neigh);
Scalar fullDerivativePhi = d_dFdPidx * derivativeRhoJ + d_dFdPcur * derivativeRhoI + derivativePhi;
// compute forces
pairForce = -fullDerivativePhi * inverseR;
// avoid double counting
Scalar pairForceover2 = Scalar(0.5) * pairForce;
virial[0] += dx.x * dx.x * pairForceover2;
virial[1] += dx.x * dx.y * pairForceover2;
virial[2] += dx.x * dx.z * pairForceover2;
virial[3] += dx.y * dx.y * pairForceover2;
virial[4] += dx.y * dx.z * pairForceover2;
virial[5] += dx.z * dx.z * pairForceover2;
fxi += dx.x * pairForce;
fyi += dx.y * pairForce;
fzi += dx.z * pairForce;
m_pe += pair_eng * Scalar(0.5);
}
// now that the force calculation is complete, write out the result
force.x = fxi;
force.y = fyi;
force.z = fzi;
force.w += m_pe;
d_force[idx] = force;
for (int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] = virial[i];
}
//! compute forces on GPU
cudaError_t gpu_compute_eam_tex_inter_forces(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch,
const unsigned int N, const Scalar4 *d_pos, const BoxDim &box, const unsigned int *d_n_neigh,
const unsigned int *d_nlist, const unsigned int *d_head_list, const unsigned int size_nlist,
const EAMTexInterData &eam_data, Scalar *d_dFdP, const Scalar4 *d_F, const Scalar4 *d_rho,
const Scalar4 *d_rphi, const Scalar4 *d_dF, const Scalar4 *d_drho, const Scalar4 *d_drphi,
const unsigned int compute_capability, const unsigned int max_tex1d_width)
{
cudaError_t error;
// bind the texture
if (compute_capability < 350 && size_nlist <= max_tex1d_width)
{
nlist_tex.normalized = false;
nlist_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, nlist_tex, d_nlist, sizeof(unsigned int) * size_nlist);
if (error != cudaSuccess)
return error;
}
if (compute_capability < 350)
{
tex_F.normalized = false;
tex_F.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_F, d_F, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes);
if (error != cudaSuccess)
return error;
tex_dF.normalized = false;
tex_dF.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_dF, d_dF, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes);
if (error != cudaSuccess)
return error;
tex_rho.normalized = false;
tex_rho.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_rho, d_rho, sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes * eam_data.ntypes);
if (error != cudaSuccess)
return error;
tex_drho.normalized = false;
tex_drho.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_drho, d_drho,
sizeof(Scalar4) * eam_data.nrho * eam_data.ntypes * eam_data.ntypes);
if (error != cudaSuccess)
return error;
tex_rphi.normalized = false;
tex_rphi.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_rphi, d_rphi,
sizeof(Scalar4) * (int) (0.5 * eam_data.nr * (eam_data.ntypes + 1) * eam_data.ntypes));
if (error != cudaSuccess)
return error;
tex_drphi.normalized = false;
tex_drphi.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_drphi, d_drphi,
sizeof(Scalar4) * (int) (0.5 * eam_data.nr * (eam_data.ntypes + 1) * eam_data.ntypes));
if (error != cudaSuccess)
return error;
}
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4) * N);
if (error != cudaSuccess)
return error;
tex_dFdP.normalized = false;
tex_dFdP.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, tex_dFdP, d_dFdP, sizeof(Scalar) * N);
if (error != cudaSuccess)
return error;
// run the kernel
cudaMemcpyToSymbol(eam_data_ti, &eam_data, sizeof(EAMTexInterData));
if (compute_capability < 350 && size_nlist > max_tex1d_width)
{
static unsigned int max_block_size_1 = UINT_MAX;
static unsigned int max_block_size_2 = UINT_MAX;
cudaFuncAttributes attr1;
cudaFuncGetAttributes(&attr1, gpu_kernel_1<1>);
cudaFuncAttributes attr2;
cudaFuncGetAttributes(&attr2, gpu_kernel_2<1>);
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(eam_data.block_size, max_block_size_1);
unsigned int run_block_size_2 = min(eam_data.block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int) ceil((double) N / (double) run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int) ceil((double) N / (double) run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
gpu_kernel_1<1> <<<grid_1, threads_1>>>(d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
gpu_kernel_2<1> <<<grid_2, threads_2>>>(d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
}
else
{
static unsigned int max_block_size_1 = UINT_MAX;
static unsigned int max_block_size_2 = UINT_MAX;
cudaFuncAttributes attr1;
cudaFuncGetAttributes(&attr1, gpu_kernel_1<0>);
cudaFuncAttributes attr2;
cudaFuncGetAttributes(&attr2, gpu_kernel_2<0>);
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(eam_data.block_size, max_block_size_1);
unsigned int run_block_size_2 = min(eam_data.block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int) ceil((double) N / (double) run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int) ceil((double) N / (double) run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
gpu_kernel_1<0> <<<grid_1, threads_1>>>(d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
gpu_kernel_2<0> <<<grid_2, threads_2>>>(d_force, d_virial, virial_pitch, N, d_pos, box, d_n_neigh, d_nlist,
d_head_list, d_F, d_rho, d_rphi, d_dF, d_drho, d_drphi, d_dFdP);
}
return cudaSuccess;
}
|
1e08497e00467850d6d6ac8ee1d4347d38ec1c25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,hipReadModeElementType> tex_vec;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int* __restrict__ cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
1e08497e00467850d6d6ac8ee1d4347d38ec1c25.cu
|
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,cudaReadModeElementType> tex_vec;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int* __restrict__ cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.