hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
76cd4bab4fb0ce91f25218c82adc67adc209b5cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
#include "bulk.cuh"
#include "fThread.cuh"
#include "macros.h"
__global__ void fGrid(float* fIn_d, float *fOut_d, float t, float dt, int ntot){
fThread fi = fThread(fIn_d, fOut_d, t, dt);
//fi.print();
fi.setntot(ntot);
fi.nextTime();
fi.update();
}
bulk::bulk(float* f0_h, float t0, float dt, int ntot){
_ntot = ntot; _t = t0; _dt = dt;
_nbytes = _ntot*sizeof(int);
//cout << _ntot << ", " <<_nbytes << endl;
CUDA_STATUS(hipMalloc((void**) &_fIn_d, _nbytes));
CUDA_STATUS(hipMalloc((void**) &_fOut_d, _nbytes));
CUDA_STATUS(hipMemcpy(_fIn_d, f0_h, _nbytes, hipMemcpyHostToDevice));
}
bulk::~bulk(){
hipFree((void*) _fIn_d);
hipFree((void*) _fOut_d);
}
void bulk::nextTime(){
int nt = 32*4;
hipLaunchKernelGGL(( fGrid), dim3((_ntot+nt-1)/nt), dim3(nt), 0, 0, _fIn_d, _fOut_d, _t, _dt, _ntot);
hipError_t err = hipGetLastError();
if (err != hipSuccess){
printf("Kernel call in bulk::nextTime:\n");
fprintf( stderr , "Error %s at line %d in file %s \n", hipGetErrorString(err), __LINE__, __FILE__);
exit(1);
}
CUDA_STATUS(hipDeviceSynchronize());
_t += _dt;
}
void bulk::output(float* f_h){
CUDA_STATUS(hipMemcpy(f_h, _fOut_d, _nbytes, hipMemcpyDeviceToHost));
}
| 76cd4bab4fb0ce91f25218c82adc67adc209b5cc.cu | #include <cuda.h>
#include <iostream>
using namespace std;
#include "bulk.cuh"
#include "fThread.cuh"
#include "macros.h"
__global__ void fGrid(float* fIn_d, float *fOut_d, float t, float dt, int ntot){
fThread fi = fThread(fIn_d, fOut_d, t, dt);
//fi.print();
fi.setntot(ntot);
fi.nextTime();
fi.update();
}
bulk::bulk(float* f0_h, float t0, float dt, int ntot){
_ntot = ntot; _t = t0; _dt = dt;
_nbytes = _ntot*sizeof(int);
//cout << _ntot << ", " <<_nbytes << endl;
CUDA_STATUS(cudaMalloc((void**) &_fIn_d, _nbytes));
CUDA_STATUS(cudaMalloc((void**) &_fOut_d, _nbytes));
CUDA_STATUS(cudaMemcpy(_fIn_d, f0_h, _nbytes, cudaMemcpyHostToDevice));
}
bulk::~bulk(){
cudaFree((void*) _fIn_d);
cudaFree((void*) _fOut_d);
}
void bulk::nextTime(){
int nt = 32*4;
fGrid<<<(_ntot+nt-1)/nt, nt>>>(_fIn_d, _fOut_d, _t, _dt, _ntot);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
printf("Kernel call in bulk::nextTime:\n");
fprintf( stderr , "Error %s at line %d in file %s \n", cudaGetErrorString(err), __LINE__, __FILE__);
exit(1);
}
CUDA_STATUS(cudaDeviceSynchronize());
_t += _dt;
}
void bulk::output(float* f_h){
CUDA_STATUS(cudaMemcpy(f_h, _fOut_d, _nbytes, cudaMemcpyDeviceToHost));
}
|
eeb5243e57c7e0e41a886d52e8eed6c20a3b25ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel.h"
#include "kernels.h"
#include "hip/hip_runtime.h"
#include "corecrt_math.h"
#include <stdexcept>
#include <string>
#include "utils.h"
#include "test.h"
// __syncthreads()
#ifndef __HIPCC__
#define __HIPCC__
#endif // !__HIPCC__
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <cstdio>
namespace cudaKernel {
__device__ float interpolationValue(
float l, float r, size_t lOffset, size_t totalNum) {
return l + static_cast<float>(lOffset) * (r - l) / static_cast<float>(totalNum);
}
__global__ void interpolationMatrix(
float* array, size_t size, size_t nInterpolation) {
float temp;
size_t idx_x = threadIdx.x;
size_t idx_y = threadIdx.y;
float* basePtr = array + idx_x * size;
size_t idx = idx_y / (nInterpolation + 1);
size_t lOffset = idx_y % (nInterpolation + 1);
if (lOffset == 0) {
temp = basePtr[idx];
} else {
temp = interpolationValue(
basePtr[idx], basePtr[idx + 1], lOffset, nInterpolation + 1);
}
deviceDebugPrint("%llu, %llu, %llu %llu, %f\n",
idx_x, idx_y, idx_x * size, idx, temp);
__syncthreads();
array[idx_x * blockDim.y + idx_y] = temp;
}
__global__ void interpolationMatrixOut(float* arrayOut,
const float* arrayIn, size_t size, size_t nInterpolation) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
const float* basePtr = arrayIn + bid * size;
size_t idx = tid / (nInterpolation + 1);
size_t lOffset = tid % (nInterpolation + 1);
if (lOffset == 0) {
arrayOut[bid * blockDim.x + tid] = basePtr[idx];
}
else {
arrayOut[bid * blockDim.x + tid] = interpolationValue(
basePtr[idx], basePtr[idx + 1], lOffset, nInterpolation + 1);
}
}
void interpolation(
float* dArray, size_t nGroups, size_t size, size_t nInterpolation) {
if (nGroups * (size + nInterpolation * (size - 1)) < kMmaxBlockDim) {
dim3 dimBlock(nGroups, size + nInterpolation * (size - 1));
interpolationMatrix << <1, dimBlock >> > (
dArray, size, nInterpolation);
CUDACHECK(hipGetLastError());
}
else {
if (size + nInterpolation * (size - 1) > kMmaxBlockDim) {
throw std::runtime_error("Max result length allowed is "
+ std::to_string(kMmaxBlockDim) + "!");
}
float* tempArray;
cudaMallocAndCopy(tempArray, dArray, nGroups * size);
size_t nBlockDim = size + nInterpolation * (size - 1);
hipLaunchKernelGGL(( interpolationMatrixOut) , dim3(nGroups), dim3(nBlockDim) , 0, 0,
dArray, tempArray, size, nInterpolation);
CUDACHECK(hipGetLastError());
cudaFreeAll(tempArray);
}
}
__global__ void interpolationOffsets(size_t* dGroupOffsets,
const size_t* dGroupOffsetsBrfore, size_t nInterpolation) {
size_t tid = threadIdx.x;
dGroupOffsets[tid + 1] = dGroupOffsets[tid + 1] * (nInterpolation + 1) -
(tid + 1) * nInterpolation;
}
__global__ void interpolationPoints(float* points, float* colors, float* sizes,
const float* pointsIn, const float* colorsIn, const float* sizesIn,
const size_t* dGroupOffsetsBrfore, const size_t* dGroupOffsets,
size_t nInterpolation) {
size_t bidx = blockIdx.x;
size_t tidx = threadIdx.x;
const float* pPointsIn = pointsIn + 3 * dGroupOffsetsBrfore[bidx];
const float* pColorsIn = colorsIn + 3 * dGroupOffsetsBrfore[bidx];
const float* pSizesIn = sizesIn + dGroupOffsetsBrfore[bidx];
__syncthreads();
if (tidx < dGroupOffsets[bidx + 1] - dGroupOffsets[bidx]) {
float* pPointsOut = points + 3 * dGroupOffsets[bidx];
float* pColorsOut = colors + 3 * dGroupOffsets[bidx];
float* pSizezOut = sizes + dGroupOffsets[bidx];
size_t idx = tidx / (nInterpolation + 1);
size_t lOffset = tidx % (nInterpolation + 1);
if (lOffset == 0) {
pPointsOut[tidx * 3] = pPointsIn[3 * idx];
pPointsOut[tidx * 3 + 1] = pPointsIn[3 * idx + 1];
pPointsOut[tidx * 3 + 2] = pPointsIn[3 * idx + 2];
pColorsOut[tidx * 3] = pColorsIn[3 * idx];
pColorsOut[tidx * 3 + 1] = pColorsIn[3 * idx + 1];
pColorsOut[tidx * 3 + 2] = pColorsIn[3 * idx + 2];
pSizezOut[tidx] = pSizesIn[idx];
}
else {
pPointsOut[tidx * 3] = interpolationValue(
pPointsIn[3 * idx],
pPointsIn[3 * idx + 3], lOffset, nInterpolation + 1);
pPointsOut[tidx * 3 + 1] = interpolationValue(
pPointsIn[3 * idx + 1],
pPointsIn[3 * idx + 4], lOffset, nInterpolation + 1);
pPointsOut[tidx * 3 + 2] = interpolationValue(
pPointsIn[3 * idx + 2],
pPointsIn[3 * idx + 5], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3] = interpolationValue(
pColorsIn[3 * idx],
pColorsIn[3 * idx + 3], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3 + 1] = interpolationValue(
pColorsIn[3 * idx + 1],
pColorsIn[3 * idx + 4], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3 + 2] = interpolationValue(
pColorsIn[3 * idx + 2],
pColorsIn[3 * idx + 5], lOffset, nInterpolation + 1);
pSizezOut[tidx] = interpolationValue(
pSizesIn[idx],
pSizesIn[idx + 1], lOffset, nInterpolation + 1);
}
}
}
void interpolation(
float* dPoints, float* dColors, float* dSizes, size_t* dGroupOffsets,
size_t nGroups, size_t maxSize, size_t nInterpolation) {
float *dPointsTemp, *dColorsTemp, *dSizesTemp;
size_t *dGroupOffsetsTemp;
cudaMallocAndCopy(dPointsTemp, dPoints, 3 * nGroups * maxSize);
cudaMallocAndCopy(dColorsTemp, dColors, 3 * nGroups * maxSize);
cudaMallocAndCopy(dSizesTemp, dSizes, nGroups * maxSize);
cudaMallocAndCopy(dGroupOffsetsTemp, dGroupOffsets, nGroups + 1);
CUDACHECK(hipDeviceSynchronize());
/*printSplitLine("points");
show(dPointsTemp, dGroupOffsets, nGroups, 3);
printSplitLine("colors");
show(dColorsTemp, dGroupOffsets, nGroups, 3);
printSplitLine("sizes");
show(dSizesTemp, dGroupOffsets, nGroups, 1);
printSplitLine("end");
printf("%llu %llu %llu\n",
nGroups, maxSize, maxSize + nInterpolation * (maxSize - 1));*/
interpolationOffsets << <1, nGroups >> > (
dGroupOffsets, dGroupOffsetsTemp, nInterpolation);
hipLaunchKernelGGL(( interpolationPoints) , dim3(nGroups), dim3(maxSize * (nInterpolation + 1)), 0, 0,
dPoints, dColors, dSizes, dPointsTemp, dColorsTemp,
dSizesTemp, dGroupOffsetsTemp, dGroupOffsets, nInterpolation);
CUDACHECK(hipGetLastError());
cudaFreeAll(dPointsTemp, dColorsTemp, dSizesTemp, dGroupOffsetsTemp);
CUDACHECK(hipDeviceSynchronize());
}
} | eeb5243e57c7e0e41a886d52e8eed6c20a3b25ea.cu | #include "kernel.h"
#include "kernels.h"
#include "cuda_runtime.h"
#include "corecrt_math.h"
#include <stdexcept>
#include <string>
#include "utils.h"
#include "test.h"
// 为了让__syncthreads()通过语法检查
#ifndef __CUDACC__
#define __CUDACC__
#endif // !__CUDACC__
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cstdio>
namespace cudaKernel {
__device__ float interpolationValue(
float l, float r, size_t lOffset, size_t totalNum) {
return l + static_cast<float>(lOffset) * (r - l) / static_cast<float>(totalNum);
}
__global__ void interpolationMatrix(
float* array, size_t size, size_t nInterpolation) {
float temp;
size_t idx_x = threadIdx.x;
size_t idx_y = threadIdx.y;
float* basePtr = array + idx_x * size;
size_t idx = idx_y / (nInterpolation + 1);
size_t lOffset = idx_y % (nInterpolation + 1);
if (lOffset == 0) {
temp = basePtr[idx];
} else {
temp = interpolationValue(
basePtr[idx], basePtr[idx + 1], lOffset, nInterpolation + 1);
}
deviceDebugPrint("%llu, %llu, %llu %llu, %f\n",
idx_x, idx_y, idx_x * size, idx, temp);
__syncthreads();
array[idx_x * blockDim.y + idx_y] = temp;
}
__global__ void interpolationMatrixOut(float* arrayOut,
const float* arrayIn, size_t size, size_t nInterpolation) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
const float* basePtr = arrayIn + bid * size;
size_t idx = tid / (nInterpolation + 1);
size_t lOffset = tid % (nInterpolation + 1);
if (lOffset == 0) {
arrayOut[bid * blockDim.x + tid] = basePtr[idx];
}
else {
arrayOut[bid * blockDim.x + tid] = interpolationValue(
basePtr[idx], basePtr[idx + 1], lOffset, nInterpolation + 1);
}
}
void interpolation(
float* dArray, size_t nGroups, size_t size, size_t nInterpolation) {
if (nGroups * (size + nInterpolation * (size - 1)) < kMmaxBlockDim) {
dim3 dimBlock(nGroups, size + nInterpolation * (size - 1));
interpolationMatrix << <1, dimBlock >> > (
dArray, size, nInterpolation);
CUDACHECK(cudaGetLastError());
}
else {
if (size + nInterpolation * (size - 1) > kMmaxBlockDim) {
throw std::runtime_error("Max result length allowed is "
+ std::to_string(kMmaxBlockDim) + "!");
}
float* tempArray;
cudaMallocAndCopy(tempArray, dArray, nGroups * size);
size_t nBlockDim = size + nInterpolation * (size - 1);
interpolationMatrixOut <<<nGroups, nBlockDim >>> (
dArray, tempArray, size, nInterpolation);
CUDACHECK(cudaGetLastError());
cudaFreeAll(tempArray);
}
}
__global__ void interpolationOffsets(size_t* dGroupOffsets,
const size_t* dGroupOffsetsBrfore, size_t nInterpolation) {
size_t tid = threadIdx.x;
dGroupOffsets[tid + 1] = dGroupOffsets[tid + 1] * (nInterpolation + 1) -
(tid + 1) * nInterpolation;
}
__global__ void interpolationPoints(float* points, float* colors, float* sizes,
const float* pointsIn, const float* colorsIn, const float* sizesIn,
const size_t* dGroupOffsetsBrfore, const size_t* dGroupOffsets,
size_t nInterpolation) {
size_t bidx = blockIdx.x;
size_t tidx = threadIdx.x;
const float* pPointsIn = pointsIn + 3 * dGroupOffsetsBrfore[bidx];
const float* pColorsIn = colorsIn + 3 * dGroupOffsetsBrfore[bidx];
const float* pSizesIn = sizesIn + dGroupOffsetsBrfore[bidx];
__syncthreads();
if (tidx < dGroupOffsets[bidx + 1] - dGroupOffsets[bidx]) {
float* pPointsOut = points + 3 * dGroupOffsets[bidx];
float* pColorsOut = colors + 3 * dGroupOffsets[bidx];
float* pSizezOut = sizes + dGroupOffsets[bidx];
size_t idx = tidx / (nInterpolation + 1);
size_t lOffset = tidx % (nInterpolation + 1);
if (lOffset == 0) {
pPointsOut[tidx * 3] = pPointsIn[3 * idx];
pPointsOut[tidx * 3 + 1] = pPointsIn[3 * idx + 1];
pPointsOut[tidx * 3 + 2] = pPointsIn[3 * idx + 2];
pColorsOut[tidx * 3] = pColorsIn[3 * idx];
pColorsOut[tidx * 3 + 1] = pColorsIn[3 * idx + 1];
pColorsOut[tidx * 3 + 2] = pColorsIn[3 * idx + 2];
pSizezOut[tidx] = pSizesIn[idx];
}
else {
pPointsOut[tidx * 3] = interpolationValue(
pPointsIn[3 * idx],
pPointsIn[3 * idx + 3], lOffset, nInterpolation + 1);
pPointsOut[tidx * 3 + 1] = interpolationValue(
pPointsIn[3 * idx + 1],
pPointsIn[3 * idx + 4], lOffset, nInterpolation + 1);
pPointsOut[tidx * 3 + 2] = interpolationValue(
pPointsIn[3 * idx + 2],
pPointsIn[3 * idx + 5], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3] = interpolationValue(
pColorsIn[3 * idx],
pColorsIn[3 * idx + 3], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3 + 1] = interpolationValue(
pColorsIn[3 * idx + 1],
pColorsIn[3 * idx + 4], lOffset, nInterpolation + 1);
pColorsOut[tidx * 3 + 2] = interpolationValue(
pColorsIn[3 * idx + 2],
pColorsIn[3 * idx + 5], lOffset, nInterpolation + 1);
pSizezOut[tidx] = interpolationValue(
pSizesIn[idx],
pSizesIn[idx + 1], lOffset, nInterpolation + 1);
}
}
}
void interpolation(
float* dPoints, float* dColors, float* dSizes, size_t* dGroupOffsets,
size_t nGroups, size_t maxSize, size_t nInterpolation) {
float *dPointsTemp, *dColorsTemp, *dSizesTemp;
size_t *dGroupOffsetsTemp;
cudaMallocAndCopy(dPointsTemp, dPoints, 3 * nGroups * maxSize);
cudaMallocAndCopy(dColorsTemp, dColors, 3 * nGroups * maxSize);
cudaMallocAndCopy(dSizesTemp, dSizes, nGroups * maxSize);
cudaMallocAndCopy(dGroupOffsetsTemp, dGroupOffsets, nGroups + 1);
CUDACHECK(cudaDeviceSynchronize());
/*printSplitLine("points");
show(dPointsTemp, dGroupOffsets, nGroups, 3);
printSplitLine("colors");
show(dColorsTemp, dGroupOffsets, nGroups, 3);
printSplitLine("sizes");
show(dSizesTemp, dGroupOffsets, nGroups, 1);
printSplitLine("end");
printf("%llu %llu %llu\n",
nGroups, maxSize, maxSize + nInterpolation * (maxSize - 1));*/
interpolationOffsets << <1, nGroups >> > (
dGroupOffsets, dGroupOffsetsTemp, nInterpolation);
interpolationPoints <<<nGroups, maxSize * (nInterpolation + 1)>>> (
dPoints, dColors, dSizes, dPointsTemp, dColorsTemp,
dSizesTemp, dGroupOffsetsTemp, dGroupOffsets, nInterpolation);
CUDACHECK(cudaGetLastError());
cudaFreeAll(dPointsTemp, dColorsTemp, dSizesTemp, dGroupOffsetsTemp);
CUDACHECK(cudaDeviceSynchronize());
}
} |
0598a74945a7c599d9bb8ccf4c17fdec5c6ee334.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
#define BLOCK_SIZE 512 //TODO: You can change this
#define wbCheck(ans) gpuAssert((ans), __FILE__, __LINE__)
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void exclusiveScan(const int *input, int *output, int *S, int N) {
extern __shared__ int sharedInput[];
unsigned int tx = threadIdx.x;
int i = tx + blockIdx.x * blockDim.x;
if (i < N && i != 0) {
sharedInput[tx] = input[i - 1];
} else {
sharedInput[tx] = 0;
}
// Down phase
for (unsigned int stride = 1; stride < blockDim.x; stride <<= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx < blockDim.x) {
sharedInput[idx] += sharedInput[idx - stride];
}
}
// Up phase
for (int stride = blockDim.x / 4; stride > 0; stride >>= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx + stride < blockDim.x) {
sharedInput[idx + stride] += sharedInput[idx];
}
}
__syncthreads();
if (i < N) {
output[i] = sharedInput[tx];
if (S != nullptr && tx == (BLOCK_SIZE - 1)) {
S[blockIdx.x] = sharedInput[tx];
}
}
}
__global__ void auxMerge(const int *offsets, int *input, int N) {
const unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int startIdx = tx * BLOCK_SIZE;
for (unsigned int i = 0; i < BLOCK_SIZE; i++) {
unsigned int idx = i + startIdx;
if (idx < N) {
input[idx] += offsets[tx];
}
}
}
void recursiveScan(int *input, int *output, int numInputs) {
const int scanGridSize = (numInputs + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (scanGridSize == 1) {
hipLaunchKernelGGL(( exclusiveScan), dim3(scanGridSize), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(int), 0, input, output, nullptr, numInputs);
wbCheck(hipDeviceSynchronize());
return;
} else {
int *aux;
int *scannedAux;
wbCheck(hipMalloc((void **) &aux, scanGridSize * sizeof(int)));
wbCheck(hipMalloc((void **) &scannedAux, scanGridSize * sizeof(int)));
hipLaunchKernelGGL(( exclusiveScan), dim3(scanGridSize), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(int), 0, input, output, aux, numInputs);
wbCheck(hipDeviceSynchronize());
recursiveScan(aux, scannedAux, scanGridSize);
int mergeGrids = (scanGridSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( auxMerge), dim3(mergeGrids), dim3(BLOCK_SIZE), 0, 0, scannedAux, output, numInputs);
wbCheck(hipDeviceSynchronize());
hipFree(scannedAux);
hipFree(aux);
}
}
__global__ void checkBits(const int *__restrict__ input, int *__restrict__ output, const int N, const int radix) {
extern __shared__ int sharedInput[];
unsigned int tx = threadIdx.x;
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
sharedInput[tx] = input[idx];
output[idx] = ~(sharedInput[tx] >> radix) & 1;
}
}
__global__ void scatter(const int *__restrict__ input, const int *__restrict__ bitArray, const int *__restrict__ scannedBits, int *__restrict__ output, const int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
unsigned int totalFalses = bitArray[N - 1] + scannedBits[N - 1];
unsigned int target = idx - scannedBits[idx] + totalFalses;
unsigned int destination = bitArray[idx] ? scannedBits[idx] : target;
output[destination] = input[idx];
}
}
__global__ void copyMemory(const int *__restrict__ input, int *__restrict output, const int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
output[idx] = input[idx];
}
}
void sort(int *d_deviceInput, int *d_deviceOutput, int numElements) {
int *bitArray;
int *scannedBits;
hipMalloc((void **) &bitArray, numElements * sizeof(int));
hipMalloc((void **) &scannedBits, numElements * sizeof(int));
dim3 blockSize(BLOCK_SIZE, 1, 1);
dim3 gridSize((numElements + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
for (int i = 0; i < 16; i++) {
hipLaunchKernelGGL(( checkBits), dim3(gridSize), dim3(blockSize), BLOCK_SIZE * sizeof(int), 0, d_deviceInput, bitArray, numElements, i);
hipDeviceSynchronize();
recursiveScan(bitArray, scannedBits, numElements);
hipLaunchKernelGGL(( scatter), dim3(gridSize), dim3(blockSize), 0, 0, d_deviceInput, bitArray, scannedBits, d_deviceOutput, numElements);
hipDeviceSynchronize();
if (i != 15) {
hipLaunchKernelGGL(( copyMemory), dim3(gridSize), dim3(blockSize), 0, 0, d_deviceOutput, d_deviceInput, numElements);
}
}
hipFree(bitArray);
hipFree(scannedBits);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostInput; // The input 1D list
int *hostOutput; // The output list
int *deviceInput;
int *deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *) wbImport(wbArg_getInputFile(args, 0), &numElements, "integral_vector");
hipHostMalloc(&hostOutput, numElements * sizeof(int), hipHostMallocDefault);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void **) &deviceInput, numElements * sizeof(int)));
wbCheck(hipMalloc((void **) &deviceOutput, numElements * sizeof(int)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceOutput, 0, numElements * sizeof(int)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements * sizeof(int),
hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
sort(deviceInput, deviceOutput, numElements);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements * sizeof(int), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
hipHostFree(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 0598a74945a7c599d9bb8ccf4c17fdec5c6ee334.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
#define BLOCK_SIZE 512 //TODO: You can change this
#define wbCheck(ans) gpuAssert((ans), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void exclusiveScan(const int *input, int *output, int *S, int N) {
extern __shared__ int sharedInput[];
unsigned int tx = threadIdx.x;
int i = tx + blockIdx.x * blockDim.x;
if (i < N && i != 0) {
sharedInput[tx] = input[i - 1];
} else {
sharedInput[tx] = 0;
}
// Down phase
for (unsigned int stride = 1; stride < blockDim.x; stride <<= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx < blockDim.x) {
sharedInput[idx] += sharedInput[idx - stride];
}
}
// Up phase
for (int stride = blockDim.x / 4; stride > 0; stride >>= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx + stride < blockDim.x) {
sharedInput[idx + stride] += sharedInput[idx];
}
}
__syncthreads();
if (i < N) {
output[i] = sharedInput[tx];
if (S != nullptr && tx == (BLOCK_SIZE - 1)) {
S[blockIdx.x] = sharedInput[tx];
}
}
}
__global__ void auxMerge(const int *offsets, int *input, int N) {
const unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int startIdx = tx * BLOCK_SIZE;
for (unsigned int i = 0; i < BLOCK_SIZE; i++) {
unsigned int idx = i + startIdx;
if (idx < N) {
input[idx] += offsets[tx];
}
}
}
void recursiveScan(int *input, int *output, int numInputs) {
const int scanGridSize = (numInputs + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (scanGridSize == 1) {
exclusiveScan<<<scanGridSize, BLOCK_SIZE, BLOCK_SIZE * sizeof(int)>>>(input, output, nullptr, numInputs);
wbCheck(cudaDeviceSynchronize());
return;
} else {
int *aux;
int *scannedAux;
wbCheck(cudaMalloc((void **) &aux, scanGridSize * sizeof(int)));
wbCheck(cudaMalloc((void **) &scannedAux, scanGridSize * sizeof(int)));
exclusiveScan<<<scanGridSize, BLOCK_SIZE, BLOCK_SIZE * sizeof(int)>>>(input, output, aux, numInputs);
wbCheck(cudaDeviceSynchronize());
recursiveScan(aux, scannedAux, scanGridSize);
int mergeGrids = (scanGridSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
auxMerge<<<mergeGrids, BLOCK_SIZE>>>(scannedAux, output, numInputs);
wbCheck(cudaDeviceSynchronize());
cudaFree(scannedAux);
cudaFree(aux);
}
}
__global__ void checkBits(const int *__restrict__ input, int *__restrict__ output, const int N, const int radix) {
extern __shared__ int sharedInput[];
unsigned int tx = threadIdx.x;
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
sharedInput[tx] = input[idx];
output[idx] = ~(sharedInput[tx] >> radix) & 1;
}
}
__global__ void scatter(const int *__restrict__ input, const int *__restrict__ bitArray, const int *__restrict__ scannedBits, int *__restrict__ output, const int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
unsigned int totalFalses = bitArray[N - 1] + scannedBits[N - 1];
unsigned int target = idx - scannedBits[idx] + totalFalses;
unsigned int destination = bitArray[idx] ? scannedBits[idx] : target;
output[destination] = input[idx];
}
}
__global__ void copyMemory(const int *__restrict__ input, int *__restrict output, const int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
output[idx] = input[idx];
}
}
void sort(int *d_deviceInput, int *d_deviceOutput, int numElements) {
int *bitArray;
int *scannedBits;
cudaMalloc((void **) &bitArray, numElements * sizeof(int));
cudaMalloc((void **) &scannedBits, numElements * sizeof(int));
dim3 blockSize(BLOCK_SIZE, 1, 1);
dim3 gridSize((numElements + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
for (int i = 0; i < 16; i++) {
checkBits<<<gridSize, blockSize, BLOCK_SIZE * sizeof(int)>>>(d_deviceInput, bitArray, numElements, i);
cudaDeviceSynchronize();
recursiveScan(bitArray, scannedBits, numElements);
scatter<<<gridSize, blockSize>>>(d_deviceInput, bitArray, scannedBits, d_deviceOutput, numElements);
cudaDeviceSynchronize();
if (i != 15) {
copyMemory<<<gridSize, blockSize>>>(d_deviceOutput, d_deviceInput, numElements);
}
}
cudaFree(bitArray);
cudaFree(scannedBits);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostInput; // The input 1D list
int *hostOutput; // The output list
int *deviceInput;
int *deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *) wbImport(wbArg_getInputFile(args, 0), &numElements, "integral_vector");
cudaHostAlloc(&hostOutput, numElements * sizeof(int), cudaHostAllocDefault);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void **) &deviceInput, numElements * sizeof(int)));
wbCheck(cudaMalloc((void **) &deviceOutput, numElements * sizeof(int)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceOutput, 0, numElements * sizeof(int)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements * sizeof(int),
cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
sort(deviceInput, deviceOutput, numElements);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements * sizeof(int), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
cudaFreeHost(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
2204a70b0f9f3014fed22732445ebe4e53d69ff3.hip | // !!! This is a file automatically generated by hipify!!!
//******************************************************************
//cuSTSG is used to reconstruct high-quality NDVI time series data(MODIS/SPOT) based on STSG
//
//This procedure cuSTSG is the source code for the first version of cuSTSG.
//This is a parallel computing code using GPU.
//
//Coded by Yang Xue
// Reference:Xue Yang, Jin Chen, Qingfeng Guan, Huan Gao, and Wei Xia.
// Enhanced Spatial-Temporal Savitzky-Golay Method for Reconstructing High-quality NDVI Time Series: Reduced Sensitivity
// to Quality Flags and Improved Computational Efficiency.Transactions on Geoscience and Remote Sensing
//******************************************************************
#include <iostream>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <gdal/gdal_priv.h>
#include "Filter.h"
using namespace std;
int main(int argc, char *argv[])
{
GDALAllRegister();
//parameters
if (argc != 2)
{
cout << "No parameter file!" << endl;
return 1;
}
ifstream parameter(argv[1]);
if (!parameter)
{
cout << "Can't open parameter file!" << endl;
return 1;
}
int* Years = nullptr;
string NDVI_path, Reliability_path, STSG_Test_path;
float cosyear, sampcorr;
int win_year, win, snow_address, n_Years;
string par;
while (getline(parameter, par))
{
if (par.substr(0, 2) == "//" || par == "")
continue;
for (int i = 0; i < par.size(); )
{
if (isspace(par[i]))
par.erase(i,1);
else
i++;
}
if (par.substr(0, par.find("=")) == "Years")
{
vector<int> year;
while (par.rfind(",") < par.size())
{
year.push_back(stoi(par.substr(par.rfind(",") + 1)));
par = par.substr(0, par.rfind(","));
}
year.push_back(stoi(par.substr(par.rfind("=") + 1)));
n_Years = year.size();
Years = new int[n_Years];
for (int i = 0; i < n_Years; i++)
Years[i] = year[n_Years - i - 1];
}
else if (par.substr(0, par.find("=")) == "NDVI_path")
NDVI_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "Reliability_path")
Reliability_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "STSG_Test_path")
STSG_Test_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "cosyear")
cosyear = stof(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "win_year")
win_year = stoi(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "win")
win = stoi(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "sampcorr")
sampcorr = stof(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "snow_address")
snow_address = stoi(par.substr(par.find("=") + 1));
}
parameter.close();
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
cout << "Device name:" << prop.name << endl;
size_t const totalGlobalMem = ::min(prop.totalGlobalMem, 4UL*1024*1024*1024);
cout << "Device global memory used: " << totalGlobalMem / 1024 / 1024 << " MB" << endl;
vector<GDALDataset*> NDVI(n_Years);
vector<GDALDataset*> QA(n_Years);
int n_X, n_Y, n_B;
GDALDataType type_NDVI, type_QA;
for (int i = 0; i < n_Years; i++)
{
string FileName = NDVI_path + to_string(Years[i]);
NDVI[i] = (GDALDataset*)GDALOpen(FileName.c_str(), GA_ReadOnly);
if (i == 0)
{
n_X = NDVI[i]->GetRasterXSize();
n_Y = NDVI[i]->GetRasterYSize();
n_B = NDVI[i]->GetRasterCount();
type_NDVI = NDVI[i]->GetRasterBand(1)->GetRasterDataType();
}
FileName = Reliability_path + to_string(Years[i]);
QA[i] = (GDALDataset*)GDALOpen(FileName.c_str(), GA_ReadOnly);
if (i == 0)
type_QA = QA[i]->GetRasterBand(1)->GetRasterDataType();
}
cout << "Execution start " << endl;
size_t PerYSize = n_X*n_B *(n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(int) + sizeof(float) + n_Years * sizeof(float)) + n_X*(2 * win + 1)*(2 * win + 1) *(7 * sizeof(float) + 3 * sizeof(int));
if (totalGlobalMem <= 2 * win*n_X*n_B* (n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(float)) + n_X*n_Y*n_B*n_Years*sizeof(float))
{
cout << "Size of vector_out is larger than total device global memory. Exit!" << endl;
return 1;
}
size_t PerStep = (totalGlobalMem - 2 * win*n_X*n_B* (n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(float)) - n_X*n_Y*n_B*n_Years*sizeof(float)) / PerYSize;
int Loops = 1;
if (PerStep < n_Y)
{
Loops = n_Y / PerStep + 1;
PerStep = n_Y / Loops + 1;
}
float *d_vector_out;
size_t nBytes = n_X*n_Y*n_B*n_Years * sizeof(float);
hipMalloc((void**)&d_vector_out, nBytes);
hipMemset((void*)d_vector_out, 0, nBytes);
nBytes = win*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
float *res = (float*)malloc(nBytes);
memset((void*)res, 0, nBytes);
int last_Buffer_Dn = 0;
printf("Number of loops: %d\n", Loops);
for (int i = 1, StartY = 0; i <= Loops && StartY < n_Y; i++, StartY += PerStep)
{
cout << "Loops " << i << endl;
if (i == Loops)
PerStep = n_Y - StartY;
int Buffer_Up = 0;
int Buffer_Dn = 0;
if (StartY + PerStep < n_Y - win)
Buffer_Dn = win;
else
Buffer_Dn = n_Y - PerStep - StartY;
if (StartY >= win)
Buffer_Up = win;
else
Buffer_Up = StartY;
int blkwidth = 16;
int blkheight = 16;
dim3 blocks(blkwidth, blkheight);
dim3 grids(n_X % blkwidth == 0 ? n_X / blkwidth : n_X / blkwidth + 1, (PerStep + Buffer_Up + Buffer_Dn) % blkheight == 0 ? (PerStep + Buffer_Up + Buffer_Dn) / blkheight : (PerStep + Buffer_Up + Buffer_Dn) / blkheight + 1);
short *img_NDVI = new short[(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years];
unsigned char *img_QA = new unsigned char[(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years];
for (int i = 0; i < n_Years; i++)
{
NDVI[i]->RasterIO(GF_Read, 0, StartY - Buffer_Up, n_X, (PerStep + Buffer_Up + Buffer_Dn), &img_NDVI[i*(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B], n_X, (PerStep + Buffer_Up + Buffer_Dn), type_NDVI, n_B, nullptr, 0, 0, 0);
QA[i]->RasterIO(GF_Read, 0, StartY - Buffer_Up, n_X, (PerStep + Buffer_Up + Buffer_Dn), &img_QA[i*(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B], n_X, (PerStep + Buffer_Up + Buffer_Dn), type_QA, n_B, nullptr, 0, 0, 0);
}
short *d_imgNDVI;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(short);
hipMalloc((void**)&d_imgNDVI, nBytes);
hipMemcpy((void*)d_imgNDVI, (void*)img_NDVI, nBytes, hipMemcpyHostToDevice);
unsigned char *d_imgQA;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(unsigned char);
hipMalloc((void**)&d_imgQA, nBytes);
hipMemcpy((void*)d_imgQA, (void*)img_QA, nBytes, hipMemcpyHostToDevice);
float *d_img_NDVI, *d_img_QA;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(float);
hipMalloc((void**)&d_img_NDVI, nBytes);
hipMalloc((void**)&d_img_QA, nBytes);
hipMemset((void*)d_img_NDVI, 0, nBytes);
hipMemset((void*)d_img_QA, 0, nBytes);
float *d_NDVI_Reference, *d_res;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B * sizeof(float);
hipMalloc((void**)&d_NDVI_Reference, nBytes);
hipMemset((void*)d_NDVI_Reference, 0, nBytes);
nBytes = (PerStep + Buffer_Dn)*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMalloc((void**)&d_res, nBytes);
hipMemset((void*)d_res, 0, nBytes);
nBytes = last_Buffer_Dn*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMemcpy((void*)d_res, (void*)res, nBytes, hipMemcpyHostToDevice);
int *d_res_vec_res1;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B * sizeof(int);
hipMalloc((void**)&d_res_vec_res1, nBytes);
hipMemset((void*)d_res_vec_res1, 0, nBytes);
float *d_vector_in, *d_res_3;
nBytes = PerStep*n_X* n_B * sizeof(float);
hipMalloc((void**)&d_vector_in, nBytes);
hipMemset((void*)d_vector_in, 0, nBytes);
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(float);
hipMalloc((void**)&d_res_3, nBytes);//(slope_intercept(2);corr_similar;)
hipMemset((void*)d_res_3, 0, nBytes);
int *d_index;
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(int);
hipMalloc((void**)&d_index, nBytes);//(similar_index(2);new_corr;)
hipMemset((void*)d_index, 0, nBytes);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( Short_to_Float) , dim3(grids), dim3(blocks) , 0, 0, d_imgNDVI, d_imgQA, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, d_img_NDVI, d_img_QA);
hipLaunchKernelGGL(( Generate_NDVI_reference) , dim3(grids), dim3(blocks) , 0, 0, cosyear, win_year, d_img_NDVI, d_img_QA, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, d_NDVI_Reference, d_res_3, d_res_vec_res1);
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(float);
hipMemset((void*)d_res_3, 0, nBytes);
hipLaunchKernelGGL(( Compute_d_res) , dim3(grids), dim3(blocks) , 0, 0, d_img_NDVI, d_img_QA, d_NDVI_Reference, StartY, n_Y, Buffer_Up, Buffer_Dn, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, win, d_res);
hipLaunchKernelGGL(( STSG_filter) , dim3(grids), dim3(blocks) , 0, 0, d_img_NDVI, d_img_QA, d_NDVI_Reference, StartY, n_Y, Buffer_Up, Buffer_Dn, n_X, PerStep, n_B, n_Years, win, sampcorr, snow_address, d_vector_out, d_vector_in, d_res, d_res_3, d_index);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
cout << "Total kernel time: " << time * 1e-9f << " s" << endl;
nBytes = win*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
memset((void*)res, 0, nBytes);
nBytes = Buffer_Dn*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMemcpy((void*)res, (void*)&d_res[(PerStep + Buffer_Dn - win)*n_X*(2 * win + 1)*(2 * win + 1) * 4], nBytes, hipMemcpyDeviceToHost);
last_Buffer_Dn = Buffer_Dn;
delete[] img_NDVI;
delete[] img_QA;
hipFree((void*)d_imgNDVI);
hipFree((void*)d_imgQA);
hipFree((void*)d_img_NDVI);
hipFree((void*)d_img_QA);
hipFree((void*)d_NDVI_Reference);
hipFree((void*)d_res);
hipFree((void*)d_res_vec_res1);
hipFree((void*)d_vector_in);
hipFree((void*)d_res_3);
hipFree((void*)d_index);
}
free((void*)res);
float *vector_out = new float[n_X*n_Y*n_B*n_Years];
nBytes = n_X*n_Y*n_B*n_Years* sizeof(float);
hipMemcpy((void*)vector_out, (void*)d_vector_out, nBytes, hipMemcpyDeviceToHost);
hipFree((void*)d_vector_out);
long cnt = 0;
double sum = 0;
for (int i = 0; i < n_X*n_Y*n_B*n_Years; i++) {
if (vector_out[i] < 1.f || vector_out[i] > 0.f) {
sum += vector_out[i];
cnt++;
}
}
cout << "Checksum: " << sum << " " << cnt << " " << sum / cnt << endl;
delete[] vector_out;
return 0;
}
| 2204a70b0f9f3014fed22732445ebe4e53d69ff3.cu | //******************************************************************
//cuSTSG is used to reconstruct high-quality NDVI time series data(MODIS/SPOT) based on STSG
//
//This procedure cuSTSG is the source code for the first version of cuSTSG.
//This is a parallel computing code using GPU.
//
//Coded by Yang Xue
// Reference:Xue Yang, Jin Chen, Qingfeng Guan, Huan Gao, and Wei Xia.
// Enhanced Spatial-Temporal Savitzky-Golay Method for Reconstructing High-quality NDVI Time Series: Reduced Sensitivity
// to Quality Flags and Improved Computational Efficiency.Transactions on Geoscience and Remote Sensing
//******************************************************************
#include <iostream>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <gdal/gdal_priv.h>
#include "Filter.h"
using namespace std;
int main(int argc, char *argv[])
{
GDALAllRegister();
//parameters
if (argc != 2)
{
cout << "No parameter file!" << endl;
return 1;
}
ifstream parameter(argv[1]);
if (!parameter)
{
cout << "Can't open parameter file!" << endl;
return 1;
}
int* Years = nullptr;
string NDVI_path, Reliability_path, STSG_Test_path;
float cosyear, sampcorr;
int win_year, win, snow_address, n_Years;
string par;
while (getline(parameter, par))
{
if (par.substr(0, 2) == "//" || par == "")
continue;
for (int i = 0; i < par.size(); )
{
if (isspace(par[i]))
par.erase(i,1);
else
i++;
}
if (par.substr(0, par.find("=")) == "Years")
{
vector<int> year;
while (par.rfind(",") < par.size())
{
year.push_back(stoi(par.substr(par.rfind(",") + 1)));
par = par.substr(0, par.rfind(","));
}
year.push_back(stoi(par.substr(par.rfind("=") + 1)));
n_Years = year.size();
Years = new int[n_Years];
for (int i = 0; i < n_Years; i++)
Years[i] = year[n_Years - i - 1];
}
else if (par.substr(0, par.find("=")) == "NDVI_path")
NDVI_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "Reliability_path")
Reliability_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "STSG_Test_path")
STSG_Test_path = par.substr(par.find("=") + 1);
else if (par.substr(0, par.find("=")) == "cosyear")
cosyear = stof(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "win_year")
win_year = stoi(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "win")
win = stoi(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "sampcorr")
sampcorr = stof(par.substr(par.find("=") + 1));
else if (par.substr(0, par.find("=")) == "snow_address")
snow_address = stoi(par.substr(par.find("=") + 1));
}
parameter.close();
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
cout << "Device name:" << prop.name << endl;
size_t const totalGlobalMem = std::min(prop.totalGlobalMem, 4UL*1024*1024*1024);
cout << "Device global memory used: " << totalGlobalMem / 1024 / 1024 << " MB" << endl;
vector<GDALDataset*> NDVI(n_Years);
vector<GDALDataset*> QA(n_Years);
int n_X, n_Y, n_B;
GDALDataType type_NDVI, type_QA;
for (int i = 0; i < n_Years; i++)
{
string FileName = NDVI_path + to_string(Years[i]);
NDVI[i] = (GDALDataset*)GDALOpen(FileName.c_str(), GA_ReadOnly);
if (i == 0)
{
n_X = NDVI[i]->GetRasterXSize();
n_Y = NDVI[i]->GetRasterYSize();
n_B = NDVI[i]->GetRasterCount();
type_NDVI = NDVI[i]->GetRasterBand(1)->GetRasterDataType();
}
FileName = Reliability_path + to_string(Years[i]);
QA[i] = (GDALDataset*)GDALOpen(FileName.c_str(), GA_ReadOnly);
if (i == 0)
type_QA = QA[i]->GetRasterBand(1)->GetRasterDataType();
}
cout << "Execution start " << endl;
size_t PerYSize = n_X*n_B *(n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(int) + sizeof(float) + n_Years * sizeof(float)) + n_X*(2 * win + 1)*(2 * win + 1) *(7 * sizeof(float) + 3 * sizeof(int));
if (totalGlobalMem <= 2 * win*n_X*n_B* (n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(float)) + n_X*n_Y*n_B*n_Years*sizeof(float))
{
cout << "Size of vector_out is larger than total device global memory. Exit!" << endl;
return 1;
}
size_t PerStep = (totalGlobalMem - 2 * win*n_X*n_B* (n_Years * sizeof(short) + n_Years * sizeof(unsigned char) + 2 * n_Years * sizeof(float) + sizeof(float)) - n_X*n_Y*n_B*n_Years*sizeof(float)) / PerYSize;
int Loops = 1;
if (PerStep < n_Y)
{
Loops = n_Y / PerStep + 1;
PerStep = n_Y / Loops + 1;
}
float *d_vector_out;
size_t nBytes = n_X*n_Y*n_B*n_Years * sizeof(float);
hipMalloc((void**)&d_vector_out, nBytes);
hipMemset((void*)d_vector_out, 0, nBytes);
nBytes = win*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
float *res = (float*)malloc(nBytes);
memset((void*)res, 0, nBytes);
int last_Buffer_Dn = 0;
printf("Number of loops: %d\n", Loops);
for (int i = 1, StartY = 0; i <= Loops && StartY < n_Y; i++, StartY += PerStep)
{
cout << "Loops " << i << endl;
if (i == Loops)
PerStep = n_Y - StartY;
int Buffer_Up = 0;
int Buffer_Dn = 0;
if (StartY + PerStep < n_Y - win)
Buffer_Dn = win;
else
Buffer_Dn = n_Y - PerStep - StartY;
if (StartY >= win)
Buffer_Up = win;
else
Buffer_Up = StartY;
int blkwidth = 16;
int blkheight = 16;
dim3 blocks(blkwidth, blkheight);
dim3 grids(n_X % blkwidth == 0 ? n_X / blkwidth : n_X / blkwidth + 1, (PerStep + Buffer_Up + Buffer_Dn) % blkheight == 0 ? (PerStep + Buffer_Up + Buffer_Dn) / blkheight : (PerStep + Buffer_Up + Buffer_Dn) / blkheight + 1);
short *img_NDVI = new short[(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years];
unsigned char *img_QA = new unsigned char[(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years];
for (int i = 0; i < n_Years; i++)
{
NDVI[i]->RasterIO(GF_Read, 0, StartY - Buffer_Up, n_X, (PerStep + Buffer_Up + Buffer_Dn), &img_NDVI[i*(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B], n_X, (PerStep + Buffer_Up + Buffer_Dn), type_NDVI, n_B, nullptr, 0, 0, 0);
QA[i]->RasterIO(GF_Read, 0, StartY - Buffer_Up, n_X, (PerStep + Buffer_Up + Buffer_Dn), &img_QA[i*(PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B], n_X, (PerStep + Buffer_Up + Buffer_Dn), type_QA, n_B, nullptr, 0, 0, 0);
}
short *d_imgNDVI;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(short);
hipMalloc((void**)&d_imgNDVI, nBytes);
hipMemcpy((void*)d_imgNDVI, (void*)img_NDVI, nBytes, hipMemcpyHostToDevice);
unsigned char *d_imgQA;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(unsigned char);
hipMalloc((void**)&d_imgQA, nBytes);
hipMemcpy((void*)d_imgQA, (void*)img_QA, nBytes, hipMemcpyHostToDevice);
float *d_img_NDVI, *d_img_QA;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B*n_Years * sizeof(float);
hipMalloc((void**)&d_img_NDVI, nBytes);
hipMalloc((void**)&d_img_QA, nBytes);
hipMemset((void*)d_img_NDVI, 0, nBytes);
hipMemset((void*)d_img_QA, 0, nBytes);
float *d_NDVI_Reference, *d_res;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B * sizeof(float);
hipMalloc((void**)&d_NDVI_Reference, nBytes);
hipMemset((void*)d_NDVI_Reference, 0, nBytes);
nBytes = (PerStep + Buffer_Dn)*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMalloc((void**)&d_res, nBytes);
hipMemset((void*)d_res, 0, nBytes);
nBytes = last_Buffer_Dn*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMemcpy((void*)d_res, (void*)res, nBytes, hipMemcpyHostToDevice);
int *d_res_vec_res1;
nBytes = (PerStep + Buffer_Up + Buffer_Dn)*n_X*n_B * sizeof(int);
hipMalloc((void**)&d_res_vec_res1, nBytes);
hipMemset((void*)d_res_vec_res1, 0, nBytes);
float *d_vector_in, *d_res_3;
nBytes = PerStep*n_X* n_B * sizeof(float);
hipMalloc((void**)&d_vector_in, nBytes);
hipMemset((void*)d_vector_in, 0, nBytes);
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(float);
hipMalloc((void**)&d_res_3, nBytes);//(slope_intercept(2);corr_similar;)
hipMemset((void*)d_res_3, 0, nBytes);
int *d_index;
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(int);
hipMalloc((void**)&d_index, nBytes);//(similar_index(2);new_corr;)
hipMemset((void*)d_index, 0, nBytes);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
Short_to_Float <<<grids, blocks >>>(d_imgNDVI, d_imgQA, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, d_img_NDVI, d_img_QA);
Generate_NDVI_reference <<<grids, blocks >>>(cosyear, win_year, d_img_NDVI, d_img_QA, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, d_NDVI_Reference, d_res_3, d_res_vec_res1);
nBytes = PerStep*n_X*(2 * win + 1)*(2 * win + 1) * 3 * sizeof(float);
hipMemset((void*)d_res_3, 0, nBytes);
Compute_d_res <<<grids, blocks >>>(d_img_NDVI, d_img_QA, d_NDVI_Reference, StartY, n_Y, Buffer_Up, Buffer_Dn, n_X, (PerStep + Buffer_Up + Buffer_Dn), n_B, n_Years, win, d_res);
STSG_filter <<<grids, blocks >>>(d_img_NDVI, d_img_QA, d_NDVI_Reference, StartY, n_Y, Buffer_Up, Buffer_Dn, n_X, PerStep, n_B, n_Years, win, sampcorr, snow_address, d_vector_out, d_vector_in, d_res, d_res_3, d_index);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
cout << "Total kernel time: " << time * 1e-9f << " s" << endl;
nBytes = win*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
memset((void*)res, 0, nBytes);
nBytes = Buffer_Dn*n_X*(2 * win + 1)*(2 * win + 1) * 4 * sizeof(float);
hipMemcpy((void*)res, (void*)&d_res[(PerStep + Buffer_Dn - win)*n_X*(2 * win + 1)*(2 * win + 1) * 4], nBytes, hipMemcpyDeviceToHost);
last_Buffer_Dn = Buffer_Dn;
delete[] img_NDVI;
delete[] img_QA;
hipFree((void*)d_imgNDVI);
hipFree((void*)d_imgQA);
hipFree((void*)d_img_NDVI);
hipFree((void*)d_img_QA);
hipFree((void*)d_NDVI_Reference);
hipFree((void*)d_res);
hipFree((void*)d_res_vec_res1);
hipFree((void*)d_vector_in);
hipFree((void*)d_res_3);
hipFree((void*)d_index);
}
free((void*)res);
float *vector_out = new float[n_X*n_Y*n_B*n_Years];
nBytes = n_X*n_Y*n_B*n_Years* sizeof(float);
hipMemcpy((void*)vector_out, (void*)d_vector_out, nBytes, hipMemcpyDeviceToHost);
hipFree((void*)d_vector_out);
long cnt = 0;
double sum = 0;
for (int i = 0; i < n_X*n_Y*n_B*n_Years; i++) {
if (vector_out[i] < 1.f || vector_out[i] > 0.f) {
sum += vector_out[i];
cnt++;
}
}
cout << "Checksum: " << sum << " " << cnt << " " << sum / cnt << endl;
delete[] vector_out;
return 0;
}
|
f21acd575e2dcc6c23eaf99a1122105a91acb341.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by henri on 09/01/2020.
//
#include "matrix_conv.hh"
#include "kernels/kernel_mat_op.hh"
double* mat_conv(double* A, double* K, int NA, int MA, int NK, bool padding_valid) {
if (NK % 2 == 0) {
std::cerr << "shape error" << std::endl;
return nullptr;
}
hipError_t rc = hipSuccess;
int SIZE_A = NA*MA;
int SIZE_K = NK*NK;
// Allocate memory on the device
double* d_A;
double* d_B;
double* d_C;
hipMalloc(&d_A, SIZE_A * sizeof(double));
hipMalloc(&d_B, SIZE_K * sizeof(double));
// Copy to device
rc = hipMemcpy(d_A, &A[0], SIZE_A * sizeof(double), hipMemcpyHostToDevice);
if (rc)
std::cout << "error memcpy\n";
hipMemcpy(d_B, &K[0], SIZE_K * sizeof(double), hipMemcpyHostToDevice);
if (!padding_valid) {
int SIZE_C = SIZE_A;
auto *C = (double *) malloc(SIZE_C * sizeof(double));
hipMalloc(&d_C, SIZE_C * sizeof(double));
hipMemset(d_C, 0, SIZE_C * sizeof(double));
// call the kernel
matrixConvSame(d_A, d_B, d_C, NA, MA, NK);
hipDeviceSynchronize();
// copy memory back to host
hipMemcpy(&C[0], d_C, SIZE_C * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return C;
}
else {
int SIZE_C = (NA - 2*(NK/2)) * (MA - 2*(NK/2));
auto *C = (double *) malloc(SIZE_C * sizeof(double));
rc = hipMalloc(&d_C, SIZE_C * sizeof(double));
if (rc)
std::cout << "error malloc\n";
rc = hipMemset(d_C, 0, SIZE_C * sizeof(double));
if (rc)
std::cout << "error memset\n";
// call the kernel
matrixConvValid(d_A, d_B, d_C, NA, MA, NK);
hipDeviceSynchronize();
// copy memory back to host
rc = hipMemcpy(&C[0], d_C, SIZE_C * sizeof(double), hipMemcpyDeviceToHost);
if (rc)
std::cout << "error memcpy\n";
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return C;
}
}
| f21acd575e2dcc6c23eaf99a1122105a91acb341.cu | //
// Created by henri on 09/01/2020.
//
#include "matrix_conv.hh"
#include "kernels/kernel_mat_op.hh"
double* mat_conv(double* A, double* K, int NA, int MA, int NK, bool padding_valid) {
if (NK % 2 == 0) {
std::cerr << "shape error" << std::endl;
return nullptr;
}
cudaError_t rc = cudaSuccess;
int SIZE_A = NA*MA;
int SIZE_K = NK*NK;
// Allocate memory on the device
double* d_A;
double* d_B;
double* d_C;
cudaMalloc(&d_A, SIZE_A * sizeof(double));
cudaMalloc(&d_B, SIZE_K * sizeof(double));
// Copy to device
rc = cudaMemcpy(d_A, &A[0], SIZE_A * sizeof(double), cudaMemcpyHostToDevice);
if (rc)
std::cout << "error memcpy\n";
cudaMemcpy(d_B, &K[0], SIZE_K * sizeof(double), cudaMemcpyHostToDevice);
if (!padding_valid) {
int SIZE_C = SIZE_A;
auto *C = (double *) malloc(SIZE_C * sizeof(double));
cudaMalloc(&d_C, SIZE_C * sizeof(double));
cudaMemset(d_C, 0, SIZE_C * sizeof(double));
// call the kernel
matrixConvSame(d_A, d_B, d_C, NA, MA, NK);
cudaDeviceSynchronize();
// copy memory back to host
cudaMemcpy(&C[0], d_C, SIZE_C * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return C;
}
else {
int SIZE_C = (NA - 2*(NK/2)) * (MA - 2*(NK/2));
auto *C = (double *) malloc(SIZE_C * sizeof(double));
rc = cudaMalloc(&d_C, SIZE_C * sizeof(double));
if (rc)
std::cout << "error malloc\n";
rc = cudaMemset(d_C, 0, SIZE_C * sizeof(double));
if (rc)
std::cout << "error memset\n";
// call the kernel
matrixConvValid(d_A, d_B, d_C, NA, MA, NK);
cudaDeviceSynchronize();
// copy memory back to host
rc = cudaMemcpy(&C[0], d_C, SIZE_C * sizeof(double), cudaMemcpyDeviceToHost);
if (rc)
std::cout << "error memcpy\n";
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return C;
}
}
|
8a457a372e01acd30489fce6966767c4356d467d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[16,32,1] --blockDim=[8,4,2] --warp-sync=32
#include "common2.h"
__global__ void CUDAkernel2DCT(float *dst, float *src, int ImgStride)
{
__requires(ImgStride == 512);
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for (unsigned int i = 0;
#define tid (threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y)
__global_invariant(__implies(tid/32 == __other_int(tid)/32
& blockIdx.x == __other_int(blockIdx.x)
& blockIdx.y == __other_int(blockIdx.y)
& blockIdx.z == __other_int(blockIdx.z), !__write(block))),
i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
CUDAsubroutineInplaceDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
CUDAsubroutineInplaceDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for (unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
| 8a457a372e01acd30489fce6966767c4356d467d.cu | //pass
//--gridDim=[16,32,1] --blockDim=[8,4,2] --warp-sync=32
#include "common2.h"
__global__ void CUDAkernel2DCT(float *dst, float *src, int ImgStride)
{
__requires(ImgStride == 512);
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for (unsigned int i = 0;
#define tid (threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y)
__global_invariant(__implies(tid/32 == __other_int(tid)/32
& blockIdx.x == __other_int(blockIdx.x)
& blockIdx.y == __other_int(blockIdx.y)
& blockIdx.z == __other_int(blockIdx.z), !__write(block))),
i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
CUDAsubroutineInplaceDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
CUDAsubroutineInplaceDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for (unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
|
955a7cc3ab91d98defd3eef2700edc852761254a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <opencv2/core.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <fstream>
#include <string>
#include <chrono>
using namespace cv;
__global__
void vegetationKernel(float *in, float *out, int row, int col, int channel);
__global__
void detectionKernel(float *in, int8_t *out, int row, int col);
// Get pixel offset in a 2D matrix at position (i, j)
// row-major memory layout
__host__ __device__
int offset2D(int i, int j, int col) {
return i * col + j;
}
// Get pixel offset in a 3D matrix at position (i, j, k)
// row-major memory layout, but channel is the innermost loop increment unit
__host__ __device__
int offset3D(int i, int j, int k, int col, int channel) {
return (i * col + j) * channel + k;
}
/**************************************************************************************************************
* C++ version
*************************************************************************************************************/
// Calculate vegetation index
void getVegetationIndex(float *img, float *out, int row, int col, int channel) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
float b2 = img[offset3D(i, j, 1, col, channel)];
float b3 = img[offset3D(i, j, 2, col, channel)];
if (b3 + b2 == 0) {
out[offset2D(i, j, col)] = 0;
}
else {
out[offset2D(i, j, col)] = (b3 - b2) / (b3 + b2);
}
}
}
return;
}
// Calculate vegetation detection
void getVegetationDetection(float *veg, int8_t *out, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
out[offset2D(i, j, col)] = veg[offset2D(i, j, col)] > 0.1 ? (int8_t)1 : (int8_t)0;
}
}
return;
}
/**************************************************************************************************************
* CUDA version
*************************************************************************************************************/
void getVegetationIndexCUDA(float *h_img, float *h_veg, int row, int col, int channel) {
int img_size = row * col * channel * sizeof(float);
int out_size = row * col * sizeof(float);
float *d_img, *d_out;
hipError_t err1 = hipMalloc((void **) &d_img, img_size);
hipError_t err2 = hipMalloc((void **) &d_out, out_size);
if (err1 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err2 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_img, h_img, img_size, hipMemcpyHostToDevice);
dim3 blocksPerGrid(ceil(row/32.0), ceil(col/32.0), 1);
dim3 threadsPerBlock(32, 32, 1); // 1024 threads per block
hipLaunchKernelGGL(( vegetationKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_img, d_out, row, col, channel);
hipMemcpy(h_veg, d_out, out_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
hipFree(d_img);
hipFree(d_out);
}
void getVegetationDetectionCUDA(float *h_veg, int8_t *h_out, int row, int col) {
int veg_size = row * col * sizeof(float);
int out_size = row * col * sizeof(int8_t);
float *d_veg;
int8_t *d_out;
hipError_t err1 = hipMalloc((void **) &d_veg, veg_size);
hipError_t err2 = hipMalloc((void **) &d_out, out_size);
if (err1 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err2 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_veg, h_veg, veg_size, hipMemcpyHostToDevice);
dim3 blocksPerGrid(ceil(row/32.0), ceil(col/32.0), 1);
dim3 threadsPerBlock(32, 32, 1); // 1024 threads per block
hipLaunchKernelGGL(( detectionKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_veg, d_out, row, col);
hipMemcpy(h_out, d_out, out_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
hipFree(d_veg);
hipFree(d_out);
}
/**************************************************************************************************************
* Driver code
*************************************************************************************************************/
void printImg(float *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << d[offset2D(i, j, col)] << " ";
}
std::cout << std::endl;
}
}
void printImg(int8_t *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << (int)d[offset2D(i, j, col)] << " ";
}
std::cout << std::endl;
}
}
void printCube(float *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << "( " << d[offset3D(i, j, 0, col, 3)] << " " << d[offset3D(i, j, 1, col, 3)] << " " << d[offset3D(i, j, 2, col, 3)] << " )";
std::cout << std::endl;
}
std::cout << std::endl;
}
}
int main()
{
// Image metadata is hard coded for now, but can be passed as arguments to main
int height = 6058;
int width = 3320;
int channel= 3;
std::cout << "width " << width << " height " << height << std::endl;
// Read from tmp .txt file
int img_size = width * height * channel;
float *dat = new float[img_size]();
std::ifstream file("../tmp.txt");
std::string str;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
for (int k = 0; k < channel; k++) {
std::getline(file, str);
dat[offset3D(i, j, k, width, channel)] = std::atof(str.c_str());
}
}
}
// // verify correct
// for (int i = 0; i < 1; i++) {
// for (int j = 0; j < 1; j++) {
// for (int k = 0; k < channel; k++) {
// std::cout << dat[offset3D(i, j, k, width, channel)] << " ";
// }
// }
// }
// C++ version
int out_size = height * width;
std::cout << "C++ ======================================\n";
float *veg_cpp = new float[out_size]();
int8_t *det_cpp = new int8_t[out_size]();
// time veg in cpp
auto t1 = std::chrono::high_resolution_clock::now();
getVegetationIndex(dat, veg_cpp, height, width, channel);
auto t2 = std::chrono::high_resolution_clock::now();
auto veg_cpp_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// time det in cpp
t1 = std::chrono::high_resolution_clock::now();
getVegetationDetection(veg_cpp, det_cpp, height, width);
t2 = std::chrono::high_resolution_clock::now();
auto det_cpp_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// verify results in cpp
std::cout << "Veg time : cpp " << veg_cpp_duration << std::endl;
std::cout << "Det time : cpp " << det_cpp_duration << std::endl;
std::cout << "Veg results : cpp " << std::endl;
printImg(veg_cpp, 5, 5);
std::cout << "Det results : cpp " << std::endl;
printImg(det_cpp, 5, 5);
// CUDA version
std::cout << "CUDA ======================================\n";
float *veg_cuda = new float[out_size]();
int8_t *det_cuda = new int8_t[out_size]();
// time veg in cuda
t1 = std::chrono::high_resolution_clock::now();
getVegetationIndexCUDA(dat, veg_cuda, height, width, channel);
t2 = std::chrono::high_resolution_clock::now();
auto veg_cuda_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// time det in cuda
t1 = std::chrono::high_resolution_clock::now();
getVegetationDetectionCUDA(veg_cuda, det_cuda, height, width);
t2 = std::chrono::high_resolution_clock::now();
auto det_cuda_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// verify results in cuda
std::cout << "Veg time : cuda " << veg_cuda_duration << std::endl;
std::cout << "Det time : cuda " << det_cuda_duration << std::endl;
std::cout << "Veg results : cuda" << std::endl;
printImg(veg_cuda, 5, 5);
std::cout << "Det results : cuda" << std::endl;
printImg(det_cuda, 5, 5);
// save results
std::cout << "Write results to image ...\n";
cv::imwrite("../veg_index.bmp", cv::Mat(height, width, CV_32FC1, veg_cpp));
cv::imwrite("../veg_detection.tif", cv::Mat(height, width, CV_8UC1, det_cpp));
return 0;
}
/**************************************************************************************************************
* CUDA kernel
*************************************************************************************************************/
__global__
void vegetationKernel(float *in, float *out, int row, int col, int channel){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < row && j < col) {
float a = (in[offset3D(i, j, 2, col, channel)] - in[offset3D(i, j, 1, col, channel)] ) ;
float b = (in[offset3D(i, j, 2, col, channel)] + in[offset3D(i, j, 1, col, channel)] ) ;
if (b != 0) {
out[offset2D(i, j, col)] = a / b;
}
else {
out[offset2D(i, j, col)] = 0;
}
}
}
__global__
void detectionKernel(float *in, int8_t *out, int row, int col){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < row && j < col) {
out[offset2D(i, j, col)] = in[offset2D(i, j, col)] > 0.1 ? 1 : 0;
}
} | 955a7cc3ab91d98defd3eef2700edc852761254a.cu | #include <iostream>
#include <cuda.h>
#include <opencv2/core.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <fstream>
#include <string>
#include <chrono>
using namespace cv;
__global__
void vegetationKernel(float *in, float *out, int row, int col, int channel);
__global__
void detectionKernel(float *in, int8_t *out, int row, int col);
// Get pixel offset in a 2D matrix at position (i, j)
// row-major memory layout
__host__ __device__
int offset2D(int i, int j, int col) {
return i * col + j;
}
// Get pixel offset in a 3D matrix at position (i, j, k)
// row-major memory layout, but channel is the innermost loop increment unit
__host__ __device__
int offset3D(int i, int j, int k, int col, int channel) {
return (i * col + j) * channel + k;
}
/**************************************************************************************************************
* C++ version
*************************************************************************************************************/
// Calculate vegetation index
void getVegetationIndex(float *img, float *out, int row, int col, int channel) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
float b2 = img[offset3D(i, j, 1, col, channel)];
float b3 = img[offset3D(i, j, 2, col, channel)];
if (b3 + b2 == 0) {
out[offset2D(i, j, col)] = 0;
}
else {
out[offset2D(i, j, col)] = (b3 - b2) / (b3 + b2);
}
}
}
return;
}
// Calculate vegetation detection
void getVegetationDetection(float *veg, int8_t *out, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
out[offset2D(i, j, col)] = veg[offset2D(i, j, col)] > 0.1 ? (int8_t)1 : (int8_t)0;
}
}
return;
}
/**************************************************************************************************************
* CUDA version
*************************************************************************************************************/
void getVegetationIndexCUDA(float *h_img, float *h_veg, int row, int col, int channel) {
int img_size = row * col * channel * sizeof(float);
int out_size = row * col * sizeof(float);
float *d_img, *d_out;
cudaError_t err1 = cudaMalloc((void **) &d_img, img_size);
cudaError_t err2 = cudaMalloc((void **) &d_out, out_size);
if (err1 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_img, h_img, img_size, cudaMemcpyHostToDevice);
dim3 blocksPerGrid(ceil(row/32.0), ceil(col/32.0), 1);
dim3 threadsPerBlock(32, 32, 1); // 1024 threads per block
vegetationKernel<<<blocksPerGrid, threadsPerBlock>>>(d_img, d_out, row, col, channel);
cudaMemcpy(h_veg, d_out, out_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
cudaFree(d_img);
cudaFree(d_out);
}
void getVegetationDetectionCUDA(float *h_veg, int8_t *h_out, int row, int col) {
int veg_size = row * col * sizeof(float);
int out_size = row * col * sizeof(int8_t);
float *d_veg;
int8_t *d_out;
cudaError_t err1 = cudaMalloc((void **) &d_veg, veg_size);
cudaError_t err2 = cudaMalloc((void **) &d_out, out_size);
if (err1 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_veg, h_veg, veg_size, cudaMemcpyHostToDevice);
dim3 blocksPerGrid(ceil(row/32.0), ceil(col/32.0), 1);
dim3 threadsPerBlock(32, 32, 1); // 1024 threads per block
detectionKernel<<<blocksPerGrid, threadsPerBlock>>>(d_veg, d_out, row, col);
cudaMemcpy(h_out, d_out, out_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
cudaFree(d_veg);
cudaFree(d_out);
}
/**************************************************************************************************************
* Driver code
*************************************************************************************************************/
void printImg(float *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << d[offset2D(i, j, col)] << " ";
}
std::cout << std::endl;
}
}
void printImg(int8_t *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << (int)d[offset2D(i, j, col)] << " ";
}
std::cout << std::endl;
}
}
void printCube(float *d, int row, int col) {
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
std::cout << "( " << d[offset3D(i, j, 0, col, 3)] << " " << d[offset3D(i, j, 1, col, 3)] << " " << d[offset3D(i, j, 2, col, 3)] << " )";
std::cout << std::endl;
}
std::cout << std::endl;
}
}
int main()
{
// Image metadata is hard coded for now, but can be passed as arguments to main
int height = 6058;
int width = 3320;
int channel= 3;
std::cout << "width " << width << " height " << height << std::endl;
// Read from tmp .txt file
int img_size = width * height * channel;
float *dat = new float[img_size]();
std::ifstream file("../tmp.txt");
std::string str;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
for (int k = 0; k < channel; k++) {
std::getline(file, str);
dat[offset3D(i, j, k, width, channel)] = std::atof(str.c_str());
}
}
}
// // verify correct
// for (int i = 0; i < 1; i++) {
// for (int j = 0; j < 1; j++) {
// for (int k = 0; k < channel; k++) {
// std::cout << dat[offset3D(i, j, k, width, channel)] << " ";
// }
// }
// }
// C++ version
int out_size = height * width;
std::cout << "C++ ======================================\n";
float *veg_cpp = new float[out_size]();
int8_t *det_cpp = new int8_t[out_size]();
// time veg in cpp
auto t1 = std::chrono::high_resolution_clock::now();
getVegetationIndex(dat, veg_cpp, height, width, channel);
auto t2 = std::chrono::high_resolution_clock::now();
auto veg_cpp_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// time det in cpp
t1 = std::chrono::high_resolution_clock::now();
getVegetationDetection(veg_cpp, det_cpp, height, width);
t2 = std::chrono::high_resolution_clock::now();
auto det_cpp_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// verify results in cpp
std::cout << "Veg time : cpp " << veg_cpp_duration << std::endl;
std::cout << "Det time : cpp " << det_cpp_duration << std::endl;
std::cout << "Veg results : cpp " << std::endl;
printImg(veg_cpp, 5, 5);
std::cout << "Det results : cpp " << std::endl;
printImg(det_cpp, 5, 5);
// CUDA version
std::cout << "CUDA ======================================\n";
float *veg_cuda = new float[out_size]();
int8_t *det_cuda = new int8_t[out_size]();
// time veg in cuda
t1 = std::chrono::high_resolution_clock::now();
getVegetationIndexCUDA(dat, veg_cuda, height, width, channel);
t2 = std::chrono::high_resolution_clock::now();
auto veg_cuda_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// time det in cuda
t1 = std::chrono::high_resolution_clock::now();
getVegetationDetectionCUDA(veg_cuda, det_cuda, height, width);
t2 = std::chrono::high_resolution_clock::now();
auto det_cuda_duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
// verify results in cuda
std::cout << "Veg time : cuda " << veg_cuda_duration << std::endl;
std::cout << "Det time : cuda " << det_cuda_duration << std::endl;
std::cout << "Veg results : cuda" << std::endl;
printImg(veg_cuda, 5, 5);
std::cout << "Det results : cuda" << std::endl;
printImg(det_cuda, 5, 5);
// save results
std::cout << "Write results to image ...\n";
cv::imwrite("../veg_index.bmp", cv::Mat(height, width, CV_32FC1, veg_cpp));
cv::imwrite("../veg_detection.tif", cv::Mat(height, width, CV_8UC1, det_cpp));
return 0;
}
/**************************************************************************************************************
* CUDA kernel
*************************************************************************************************************/
__global__
void vegetationKernel(float *in, float *out, int row, int col, int channel){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < row && j < col) {
float a = (in[offset3D(i, j, 2, col, channel)] - in[offset3D(i, j, 1, col, channel)] ) ;
float b = (in[offset3D(i, j, 2, col, channel)] + in[offset3D(i, j, 1, col, channel)] ) ;
if (b != 0) {
out[offset2D(i, j, col)] = a / b;
}
else {
out[offset2D(i, j, col)] = 0;
}
}
}
__global__
void detectionKernel(float *in, int8_t *out, int row, int col){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < row && j < col) {
out[offset2D(i, j, col)] = in[offset2D(i, j, col)] > 0.1 ? 1 : 0;
}
} |
9f93f1a11d74d3a04b439f3b783b58070cd73a65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "transpose_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _TransposeKernel(int32_t shape_rank, const TArray<int64_t> input_strides,
const T* input_data, const TArray<fast_divmod> output_strides, T* output_data, CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
CUDA_LONG output_index = id;
#pragma unroll
for (auto dim = 0; dim < input_strides.GetCapacity(); ++dim) {
if (dim >= shape_rank) {
break;
}
int out_coord, r;
output_strides.data_[dim].divmod(output_index, out_coord, r);
output_index = r;
input_index += input_strides.data_[dim] * out_coord;
}
output_data[id] = input_data[input_index];
}
Status TransposeImpl(size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int64_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
switch (element_size) {
case sizeof(int8_t):
hipLaunchKernelGGL(( _TransposeKernel<int8_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
N);
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( _TransposeKernel<int16_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
N);
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( _TransposeKernel<int32_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
N);
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( _TransposeKernel<int64_t>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
| 9f93f1a11d74d3a04b439f3b783b58070cd73a65.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "transpose_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _TransposeKernel(int32_t shape_rank, const TArray<int64_t> input_strides,
const T* input_data, const TArray<fast_divmod> output_strides, T* output_data, CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
CUDA_LONG output_index = id;
#pragma unroll
for (auto dim = 0; dim < input_strides.GetCapacity(); ++dim) {
if (dim >= shape_rank) {
break;
}
int out_coord, r;
output_strides.data_[dim].divmod(output_index, out_coord, r);
output_index = r;
input_index += input_strides.data_[dim] * out_coord;
}
output_data[id] = input_data[input_index];
}
Status TransposeImpl(size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides,
const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int64_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
switch (element_size) {
case sizeof(int8_t):
_TransposeKernel<int8_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
N);
break;
case sizeof(int16_t):
_TransposeKernel<int16_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
N);
break;
case sizeof(int32_t):
_TransposeKernel<int32_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
N);
break;
case sizeof(int64_t):
_TransposeKernel<int64_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
shape_rank, input_strides,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
fdm_output_strides,
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ",
element_size);
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
01c0e8e264696d347afb1deae2bedec869eca518.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
//real g = grad3dn_cd1(wd,wd,p,ii,flux,dir);
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
/*if(field==rho && (p->ipe)==0 && ((p)->it)==1 && isnan(g))
{
printf("nant %d %d %lg %lg %lg %lg\n",ii[0],ii[1],g,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]+=1;
printf("nant 0+1 %d %d %lg %lg %lg\n",ii[0]+1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant 0-1 %d %d %lg %lg %lg\n",ii[0]-1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[1]+=1;
printf("nant 1+1 %d %d %lg %lg %lg\n",ii[0],ii[1]+1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant %1-1 d %d %lg %lg %lg\n\n",ii[0],ii[1]-1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
}*/
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==1 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)]) || w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg\n",ii[0],ii[1],field, direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==2 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)])|| w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg \n",ii[0],ii[1],field,direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( centdiff1init_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1af_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
}
| 01c0e8e264696d347afb1deae2bedec869eca518.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
//real g = grad3dn_cd1(wd,wd,p,ii,flux,dir);
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
/*if(field==rho && (p->ipe)==0 && ((p)->it)==1 && isnan(g))
{
printf("nant %d %d %lg %lg %lg %lg\n",ii[0],ii[1],g,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]+=1;
printf("nant 0+1 %d %d %lg %lg %lg\n",ii[0]+1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant 0-1 %d %d %lg %lg %lg\n",ii[0]-1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[1]+=1;
printf("nant 1+1 %d %d %lg %lg %lg\n",ii[0],ii[1]+1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant %1-1 d %d %lg %lg %lg\n\n",ii[0],ii[1]-1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
}*/
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==1 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)]) || w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg\n",ii[0],ii[1],field, direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==2 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)])|| w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg \n",ii[0],ii[1],field,direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
centdiff1init_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1af_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
}
|
3b96bf78de7b6876f5fa91546398f59c6bd725f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o cudaimg cudaImage.cu -lglut -lGL -lm
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char results[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,0,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,0,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,255,255,0,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,
0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,0,0,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,0,0,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,
0,0,255,255,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,
0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,0,0,255,255,255,0,0,255,
255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0,
0,255,255,255,255,0,0,255,255,255,255,0,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,0,255,255,
255,255,0,0,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,0,0,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0,0,
0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,
255,255,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,0,255,
255,255,255,255,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,0,
0,0,255,255,255,255,0,0,255,255,255,255,255,0,0,0,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,
255,255,255,255,0,0,0,0,255,255,255,0,0,255,255,255,255,255,0,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,0,0,255,255,255,0,0,0,0,0,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,255,0,0,255,255,255,255,0,255,255,255,0,0,255,0,0,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
0,0,0,255,0,0,0,0,0,0,255,255,255,255,0,0,255,255,0,
255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,0,
0,0,255,255,255,255,0,0,0,0,0,255,0,0,255,255,255,255,255,
0,0,255,0,0,255,255,255,0,0,255,255,255,255,0,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,0,
255,255,255,255,255,0,0,0,0,0,255,255,255,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,0,0,
255,255,255,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,
255,255,255,255,0,0,255,255,0,0,255,255,255,255,255,255,0,0,0,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,0,0,255,0,0,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
int i;
int n_pixels = width * height;
for(i=0;i<n_pixels;i++) {
int s, p; // the pixel of interest
int b, d, f, h; // the pixels adjacent to s,p used for the calculation
int r; // the result of calculate
p = i / width;
s = i - (width * p);
if (s == 0 || p == 0 || s == width - 1 || p == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int s, int p) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
hipMemcpy(d_image,&image,sizeof(unsigned char) * (width * height),hipMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image,d_results);
hipDeviceSynchronize();
hipMemcpy(&results, d_results, sizeof(unsigned char) * (width*height), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
hipFree(&d_image);
hipFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| 3b96bf78de7b6876f5fa91546398f59c6bd725f6.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <cuda_runtime_api.h>
#include <errno.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o cudaimg cudaImage.cu -lglut -lGL -lm
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char results[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,0,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,0,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,255,255,0,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,
0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,0,0,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,0,0,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,
0,0,255,255,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,
0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,0,0,255,255,255,0,0,255,
255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0,
0,255,255,255,255,0,0,255,255,255,255,0,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,0,255,255,
255,255,0,0,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,0,0,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0,0,
0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,
255,255,255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,0,255,
255,255,255,255,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,0,
0,0,255,255,255,255,0,0,255,255,255,255,255,0,0,0,0,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,
255,255,255,255,0,0,0,0,255,255,255,0,0,255,255,255,255,255,0,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,0,0,255,255,255,0,0,0,0,0,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,
255,255,255,0,0,255,255,255,255,0,255,255,255,0,0,255,0,0,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
0,0,0,255,0,0,0,0,0,0,255,255,255,255,0,0,255,255,0,
255,255,255,0,255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,0,
0,0,255,255,255,255,0,0,0,0,0,255,0,0,255,255,255,255,255,
0,0,255,0,0,255,255,255,0,0,255,255,255,255,0,0,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,0,
255,255,255,255,255,0,0,0,0,0,255,255,255,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,0,0,
255,255,255,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,
255,255,255,255,0,0,255,255,0,0,255,255,255,255,255,255,0,0,0,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,0,0,255,0,0,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
int i;
int n_pixels = width * height;
for(i=0;i<n_pixels;i++) {
int s, p; // the pixel of interest
int b, d, f, h; // the pixels adjacent to s,p used for the calculation
int r; // the result of calculate
p = i / width;
s = i - (width * p);
if (s == 0 || p == 0 || s == width - 1 || p == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int s, int p) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
cudaMemcpy(d_image,&image,sizeof(unsigned char) * (width * height),cudaMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
detect_edges<<<100,72>>>(d_image,d_results);
cudaThreadSynchronize();
cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width*height), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
cudaFree(&d_image);
cudaFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
fcf6deb9927ccd3784f783ecf1ae4943403f2021.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <torch/torch.h>
#include <iostream>
__global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M,
long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2;
float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i = idx; i < M * N; i += idxJump){
b1_idx = i / N;
b2_idx = i % N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1, xmin2);
y_tl = fmaxf(ymin1, ymin2);
x_br = fminf(xmax1, xmax2);
y_br = fminf(ymax1, ymax2);
w = (x_br - x_tl + 1) < 0 ? 0.0f : (x_br - x_tl + 1);
h = (y_br - y_tl + 1) < 0 ? 0.0f : (y_br - y_tl + 1);
inter = w * h;
area1 = (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1);
area2 = (xmax2 - xmin2 + 1) * (ymax2 - ymin2 + 1);
iou = inter / (area1 + area2 - inter);
box_iou[b1_idx * N + b2_idx] = iou;
}
}
at::Tensor box_iou_cuda(at::Tensor box1, at::Tensor box2){
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(void*) box_iou_cuda_kernel,
0, // dynamic memory
0); // maximum utilized threads
long M = box1.size(0);
long N = box2.size(0);
auto box_iou = torch::ones({M, N}, torch::CUDA(at::kFloat));
dim3 gridDim(minGridSize);
dim3 blockDim(blockSize);
int idxJump = minGridSize * blockSize;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( box_iou_cuda_kernel), dim3(gridDim), dim3(blockDim), 0, stream.stream(), box_iou.data_ptr<float>(),
(float4*) box1.data_ptr<float>(),
(float4*) box2.data_ptr<float>(),
M, N,
idxJump);
return box_iou;
}
| fcf6deb9927ccd3784f783ecf1ae4943403f2021.cu | /**
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <torch/torch.h>
#include <iostream>
__global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M,
long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2;
float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i = idx; i < M * N; i += idxJump){
b1_idx = i / N;
b2_idx = i % N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1, xmin2);
y_tl = fmaxf(ymin1, ymin2);
x_br = fminf(xmax1, xmax2);
y_br = fminf(ymax1, ymax2);
w = (x_br - x_tl + 1) < 0 ? 0.0f : (x_br - x_tl + 1);
h = (y_br - y_tl + 1) < 0 ? 0.0f : (y_br - y_tl + 1);
inter = w * h;
area1 = (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1);
area2 = (xmax2 - xmin2 + 1) * (ymax2 - ymin2 + 1);
iou = inter / (area1 + area2 - inter);
box_iou[b1_idx * N + b2_idx] = iou;
}
}
at::Tensor box_iou_cuda(at::Tensor box1, at::Tensor box2){
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(void*) box_iou_cuda_kernel,
0, // dynamic memory
0); // maximum utilized threads
long M = box1.size(0);
long N = box2.size(0);
auto box_iou = torch::ones({M, N}, torch::CUDA(at::kFloat));
dim3 gridDim(minGridSize);
dim3 blockDim(blockSize);
int idxJump = minGridSize * blockSize;
auto stream = at::cuda::getCurrentCUDAStream();
box_iou_cuda_kernel<<<gridDim, blockDim, 0, stream.stream()>>>(box_iou.data_ptr<float>(),
(float4*) box1.data_ptr<float>(),
(float4*) box2.data_ptr<float>(),
M, N,
idxJump);
return box_iou;
}
|
da781eb5556036c2f1bd07fccd735bb7191a2e63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
// Thread block size
#define BLOCK_SIZE 16
/* Matrices */
float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
/* returns a seed for srand based on the time */
__global__ void matrixSD(float* d_in, float* d_mean, float* d_sd, int N)
{
extern __shared__ float sdata1[];
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int tid = threadIdx.y;
unsigned int i = idx_y * N + idx_x;
sdata1[tid] = powf(d_in[i] - d_mean[blockIdx.x], 2.0);
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.y; s *= 2)
{
if(tid +s < N)
{
if(tid % (2*s) == 0)
{
sdata1[tid] += sdata1[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0)
d_sd[blockIdx.x] = sqrtf(sdata1[0]/(float) N);
} | da781eb5556036c2f1bd07fccd735bb7191a2e63.cu | #include "includes.h"
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
// Thread block size
#define BLOCK_SIZE 16
/* Matrices */
float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
/* returns a seed for srand based on the time */
__global__ void matrixSD(float* d_in, float* d_mean, float* d_sd, int N)
{
extern __shared__ float sdata1[];
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int tid = threadIdx.y;
unsigned int i = idx_y * N + idx_x;
sdata1[tid] = powf(d_in[i] - d_mean[blockIdx.x], 2.0);
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.y; s *= 2)
{
if(tid +s < N)
{
if(tid % (2*s) == 0)
{
sdata1[tid] += sdata1[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0)
d_sd[blockIdx.x] = sqrtf(sdata1[0]/(float) N);
} |
aaf2d6a3e5706470e9a1532898bafe48ee1e2932.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define DIM 512
// Select precision here! float or double!
#define MYFLOAT float
// Complex number class
__device__
struct hipComplex
{
MYFLOAT r;
MYFLOAT i;
hipComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
float magnitude2( void )
{
return r * r + i * i;
}
hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrotGPU( int x, int y, int gImageWidth, int gImageHeight, int maxit)
{
MYFLOAT offsetx = -200, offsety = 0;//, zoom = 0;
MYFLOAT scale = 1.5;
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<maxit; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void fractalGPU(unsigned char *image, int gImageWidth, int gImageHeight){
int maxiter = 20;
int y = (blockIdx.y * blockDim.y + threadIdx.y);
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = x + y * gImageWidth;
int fractalValue = mandelbrotGPU(x, y, gImageWidth, gImageHeight, maxiter);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
image[offset*4 + 0] = red;
image[offset*4 + 1] = green;
image[offset*4 + 2] = blue;
image[offset*4 + 3] = 255;
}
// Image data
unsigned char *pixels = NULL;
int gImageWidth, gImageHeight;
int imagesize;
unsigned char *gpuimage;
int blocksize = 64;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = new unsigned char[(width * height * 4)];
gImageWidth = width;
gImageHeight = height;
imagesize = gImageWidth*gImageHeight*4*sizeof(unsigned char);
hipMalloc( (void**)&gpuimage, imagesize );
}
// User controlled parameters
int maxiter = 200;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
int mandelbrot( int x, int y)
{
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
void computeFractal( unsigned char *ptr)
{
// map from x, y to pixel position
for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
//computeFractal(pixels);
dim3 dimBlockmatrix( blocksize, blocksize );
dim3 dimGridmatrix( gImageWidth/blocksize, gImageHeight/blocksize );
hipLaunchKernelGGL(( fractalGPU), dim3(dimGridmatrix), dim3(dimBlockmatrix), 0, 0, gpuimage, gImageWidth, gImageHeight);
hipMemcpy( pixels, gpuimage, imagesize, hipMemcpyDeviceToHost );
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
| aaf2d6a3e5706470e9a1532898bafe48ee1e2932.cu | // Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define DIM 512
// Select precision here! float or double!
#define MYFLOAT float
// Complex number class
__device__
struct cuComplex
{
MYFLOAT r;
MYFLOAT i;
cuComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
float magnitude2( void )
{
return r * r + i * i;
}
cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrotGPU( int x, int y, int gImageWidth, int gImageHeight, int maxit)
{
MYFLOAT offsetx = -200, offsety = 0;//, zoom = 0;
MYFLOAT scale = 1.5;
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<maxit; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void fractalGPU(unsigned char *image, int gImageWidth, int gImageHeight){
int maxiter = 20;
int y = (blockIdx.y * blockDim.y + threadIdx.y);
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = x + y * gImageWidth;
int fractalValue = mandelbrotGPU(x, y, gImageWidth, gImageHeight, maxiter);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
image[offset*4 + 0] = red;
image[offset*4 + 1] = green;
image[offset*4 + 2] = blue;
image[offset*4 + 3] = 255;
}
// Image data
unsigned char *pixels = NULL;
int gImageWidth, gImageHeight;
int imagesize;
unsigned char *gpuimage;
int blocksize = 64;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = new unsigned char[(width * height * 4)];
gImageWidth = width;
gImageHeight = height;
imagesize = gImageWidth*gImageHeight*4*sizeof(unsigned char);
cudaMalloc( (void**)&gpuimage, imagesize );
}
// User controlled parameters
int maxiter = 200;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
int mandelbrot( int x, int y)
{
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
void computeFractal( unsigned char *ptr)
{
// map from x, y to pixel position
for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
//computeFractal(pixels);
dim3 dimBlockmatrix( blocksize, blocksize );
dim3 dimGridmatrix( gImageWidth/blocksize, gImageHeight/blocksize );
fractalGPU<<<dimGridmatrix, dimBlockmatrix>>>(gpuimage, gImageWidth, gImageHeight);
cudaMemcpy( pixels, gpuimage, imagesize, cudaMemcpyDeviceToHost );
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
|
1728b0fe2d013b9ed417f1475d56e7e614ea20c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void momentum_contiguous_kernel(size_t size,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
const TensorDataType * __restrict__ gradient,
TensorDataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void nesterov_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
template <typename TensorDataType>
void sgd<TensorDataType>::momentum_step_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch CUDA kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = El::GPUManager::Stream();
if (m_nesterov) {
hipLaunchKernelGGL(( nesterov_kernel<TensorDataType>), dim3(grid_size), dim3(block_size), 0, stream,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
hipLaunchKernelGGL(( momentum_contiguous_kernel<TensorDataType>), dim3(grid_size), dim3(block_size), 0, stream,
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
hipLaunchKernelGGL(( momentum_noncontiguous_kernel<TensorDataType>), dim3(grid_size), dim3(block_size), 0, stream,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
#ifdef LBANN_HAS_HALF
template <>
void sgd<cpu_fp16>::momentum_step_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void sgd<T>::momentum_step_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 1728b0fe2d013b9ed417f1475d56e7e614ea20c6.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void momentum_contiguous_kernel(size_t size,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
const TensorDataType * __restrict__ gradient,
TensorDataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void nesterov_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
template <typename TensorDataType>
void sgd<TensorDataType>::momentum_step_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch CUDA kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = El::GPUManager::Stream();
if (m_nesterov) {
nesterov_kernel<TensorDataType><<<grid_size, block_size, 0, stream>>>(
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
momentum_contiguous_kernel<TensorDataType><<<grid_size, block_size, 0, stream>>>(
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
momentum_noncontiguous_kernel<TensorDataType><<<grid_size, block_size, 0, stream>>>(
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
#ifdef LBANN_HAS_HALF
template <>
void sgd<cpu_fp16>::momentum_step_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void sgd<T>::momentum_step_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
6955e49c8d06c855be319af283c35e9eeec030e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file scalar_tensor_product_device.cu
* @author Daniel Nichols
* @version 0.1
* @date 2019-06-07
*
* @copyright Copyright (c) 2019
*/
#include "math/scalar_tensor_product.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace math {
template <typename T>
__global__ void kernel_scalar_tensor_product_device(T scalar, T *x, T *out, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = scalar * x[i];
}
}
template <typename T>
void scalar_tensor_product_device(T scalar, Tensor<T> *x, Tensor<T> *out) {
unsigned int size = out->get_size();
hipLaunchKernelGGL(( kernel_scalar_tensor_product_device), dim3((size + BLK_SIZE - 1) / BLK_SIZE), dim3(BLK_SIZE), 0, 0, scalar, x->get_ptr(),
out->get_ptr(), size);
}
template void scalar_tensor_product_device(int scalar, Tensor<int> *x, Tensor<int> *out);
template void scalar_tensor_product_device(float scalar, Tensor<float> *x, Tensor<float> *out);
template void scalar_tensor_product_device(double scalar, Tensor<double> *x, Tensor<double> *out);
} // namespace math
} // namespace magmadnn
#undef BLK_SIZE | 6955e49c8d06c855be319af283c35e9eeec030e1.cu | /**
* @file scalar_tensor_product_device.cu
* @author Daniel Nichols
* @version 0.1
* @date 2019-06-07
*
* @copyright Copyright (c) 2019
*/
#include "math/scalar_tensor_product.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace math {
template <typename T>
__global__ void kernel_scalar_tensor_product_device(T scalar, T *x, T *out, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = scalar * x[i];
}
}
template <typename T>
void scalar_tensor_product_device(T scalar, Tensor<T> *x, Tensor<T> *out) {
unsigned int size = out->get_size();
kernel_scalar_tensor_product_device<<<(size + BLK_SIZE - 1) / BLK_SIZE, BLK_SIZE>>>(scalar, x->get_ptr(),
out->get_ptr(), size);
}
template void scalar_tensor_product_device(int scalar, Tensor<int> *x, Tensor<int> *out);
template void scalar_tensor_product_device(float scalar, Tensor<float> *x, Tensor<float> *out);
template void scalar_tensor_product_device(double scalar, Tensor<double> *x, Tensor<double> *out);
} // namespace math
} // namespace magmadnn
#undef BLK_SIZE |
gpujpeg_huffman_gpu_decoder.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include <libgpujpeg/gpujpeg_util.h>
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* 0x00000 to 0x0ffff: luminance DC table
* 0x10000 to 0x1ffff: luminance AC table
* 0x20000 to 0x2ffff: chrominance DC table
* 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
*/
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_full[4 * (1 << 16)];
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick[QUICK_TABLE_ITEMS];
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
if(0 == packed_info) {
packed_info = gpujpeg_huffman_gpu_decoder_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
int & dc, int16_t* const data_output, const unsigned int table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, table_offset + 0x10000, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(dc[0], block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int huffman_table_offset = packed_block_info & 0x80 ? 0x20000 : 0x00000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(dc[last_dc_idx], block, huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
gpujpeg_huffman_gpu_decoder_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
gpujpeg_huffman_gpu_decoder_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_ac, 3);
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_init()
{
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init");
return 0;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_table_kernel), dim3(256), dim3(256), 0, 0,
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table setup failed");
// Get pointer to quick decoding table in device memory
void * d_src_ptr = 0;
hipGetSymbolAddress(&d_src_ptr, gpujpeg_huffman_gpu_decoder_tables_quick);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table address lookup failed");
// Copy quick decoding table into constant memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
d_src_ptr,
sizeof(*gpujpeg_huffman_gpu_decoder_tables_quick) * QUICK_TABLE_ITEMS,
0,
hipMemcpyDeviceToDevice
);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table copy failed");
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, 0,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, 0,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
hipError_t cuerr = hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman decoding failed");
return 0;
}
| gpujpeg_huffman_gpu_decoder.cu | /**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include <libgpujpeg/gpujpeg_util.h>
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* 0x00000 to 0x0ffff: luminance DC table
* 0x10000 to 0x1ffff: luminance AC table
* 0x20000 to 0x2ffff: chrominance DC table
* 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
*/
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_full[4 * (1 << 16)];
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick[QUICK_TABLE_ITEMS];
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
if(0 == packed_info) {
packed_info = gpujpeg_huffman_gpu_decoder_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
int & dc, int16_t* const data_output, const unsigned int table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, table_offset + 0x10000, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(dc[0], block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int huffman_table_offset = packed_block_info & 0x80 ? 0x20000 : 0x00000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(dc[last_dc_idx], block, huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
gpujpeg_huffman_gpu_decoder_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
gpujpeg_huffman_gpu_decoder_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_ac, 3);
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_init()
{
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init");
return 0;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
gpujpeg_huffman_decoder_table_kernel<<<256, 256>>>(
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table setup failed");
// Get pointer to quick decoding table in device memory
void * d_src_ptr = 0;
cudaGetSymbolAddress(&d_src_ptr, gpujpeg_huffman_gpu_decoder_tables_quick);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table address lookup failed");
// Copy quick decoding table into constant memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
d_src_ptr,
sizeof(*gpujpeg_huffman_gpu_decoder_tables_quick) * QUICK_TABLE_ITEMS,
0,
cudaMemcpyDeviceToDevice
);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman decoder table copy failed");
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK><<<grid, thread>>>(
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK><<<grid, thread>>>(
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
cudaError cuerr = cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman decoding failed");
return 0;
}
|
2ff84b446a8c63c97511e5bca16c630feb192d07.hip | // !!! This is a file automatically generated by hipify!!!
/** GPU Laplace solver using optimized red-black GaussSeidel with SOR solver
*
* \author Kyle E. Niemeyer
* \date 09/21/2012
*
* Solves Laplace's equation in 2D (e.g., heat conduction in a rectangular plate)
* on GPU using CUDA with the red-black GaussSeidel with sucessive overrelaxation
* (SOR) that has been "optimized". This means that the red and black kernels
* only loop over their respective cells, instead of over all cells and skipping
* even/odd cells. This requires separate arrays for red and black cells.
*
* Boundary conditions:
* T = 0 at x = 0, x = L, y = 0
* T = TN at y = H
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "timer.h"
// CUDA libraries
#include <hip/hip_runtime.h>
/** Problem size along one side; total number of cells is this squared */
#define NUM 1024
// block size
#define BLOCK_SIZE 128
#define Real float
#define ZERO 0.0f
#define ONE 1.0f
#define TWO 2.0f
/** SOR relaxation parameter */
const Real omega = 1.85f;
///////////////////////////////////////////////////////////////////////////////
/** Function to evaluate coefficient matrix and right-hand side vector.
*
* \param[in] rowmax number of rows
* \param[in] colmax number of columns
* \param[in] th_cond thermal conductivity
* \param[in] dx grid size in x dimension (uniform)
* \param[in] dy grid size in y dimension (uniform)
* \param[in] width width of plate (z dimension)
* \param[in] TN temperature at top boundary
* \param[out] aP array of self coefficients
* \param[out] aW array of west neighbor coefficients
* \param[out] aE array of east neighbor coefficients
* \param[out] aS array of south neighbor coefficients
* \param[out] aN array of north neighbor coefficients
* \param[out] b right-hand side array
*/
void fill_coeffs (int rowmax, int colmax, Real th_cond, Real dx, Real dy,
Real width, Real TN, Real * aP, Real * aW, Real * aE,
Real * aS, Real * aN, Real * b)
{
int col, row;
for (col = 0; col < colmax; ++col) {
for (row = 0; row < rowmax; ++row) {
int ind = col * rowmax + row;
b[ind] = ZERO;
Real SP = ZERO;
if (col == 0) {
// left BC: temp = 0
aW[ind] = ZERO;
SP = -TWO * th_cond * width * dy / dx;
} else {
aW[ind] = th_cond * width * dy / dx;
}
if (col == colmax - 1) {
// right BC: temp = 0
aE[ind] = ZERO;
SP = -TWO * th_cond * width * dy / dx;
} else {
aE[ind] = th_cond * width * dy / dx;
}
if (row == 0) {
// bottom BC: temp = 0
aS[ind] = ZERO;
SP = -TWO * th_cond * width * dx / dy;
} else {
aS[ind] = th_cond * width * dx / dy;
}
if (row == rowmax - 1) {
// top BC: temp = TN
aN[ind] = ZERO;
b[ind] = TWO * th_cond * width * dx * TN / dy;
SP = -TWO * th_cond * width * dx / dy;
} else {
aN[ind] = th_cond * width * dx / dy;
}
aP[ind] = aW[ind] + aE[ind] + aS[ind] + aN[ind] - SP;
} // end for row
} // end for col
} // end fill_coeffs
///////////////////////////////////////////////////////////////////////////////
/** Function to update temperature for red cells
*
* \param[in] aP array of self coefficients
* \param[in] aW array of west neighbor coefficients
* \param[in] aE array of east neighbor coefficients
* \param[in] aS array of south neighbor coefficients
* \param[in] aN array of north neighbor coefficients
* \param[in] b right-hand side array
* \param[in] temp_black temperatures of black cells, constant in this function
* \param[inout] temp_red temperatures of red cells
* \param[out] bl_norm_L2 array with residual information for blocks
*/
__global__ void red_kernel (const Real * aP, const Real * aW, const Real * aE,
const Real * aS, const Real * aN, const Real * b,
const Real * temp_black, Real * temp_red,
Real * norm_L2)
{
int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x;
int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y;
int ind_red = col * ((NUM >> 1) + 2) + row; // local (red) index
int ind = 2 * row - (col & 1) - 1 + NUM * (col - 1); // global index
Real temp_old = temp_red[ind_red];
Real res = b[ind]
+ (aW[ind] * temp_black[row + (col - 1) * ((NUM >> 1) + 2)]
+ aE[ind] * temp_black[row + (col + 1) * ((NUM >> 1) + 2)]
+ aS[ind] * temp_black[row - (col & 1) + col * ((NUM >> 1) + 2)]
+ aN[ind] * temp_black[row + ((col + 1) & 1) + col * ((NUM >> 1) + 2)]);
Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]);
temp_red[ind_red] = temp_new;
res = temp_new - temp_old;
norm_L2[ind_red] = res * res;
} // end red_kernel
///////////////////////////////////////////////////////////////////////////////
/** Function to update temperature for black cells
*
* \param[in] aP array of self coefficients
* \param[in] aW array of west neighbor coefficients
* \param[in] aE array of east neighbor coefficients
* \param[in] aS array of south neighbor coefficients
* \param[in] aN array of north neighbor coefficients
* \param[in] b right-hand side array
* \param[in] temp_red temperatures of red cells, constant in this function
* \param[inout] temp_black temperatures of black cells
* \param[out] bl_norm_L2 array with residual information for blocks
*/
__global__ void black_kernel (const Real * aP, const Real * aW, const Real * aE,
const Real * aS, const Real * aN, const Real * b,
const Real * temp_red, Real * temp_black,
Real * norm_L2)
{
int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x;
int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y;
int ind_black = col * ((NUM >> 1) + 2) + row; // local (black) index
int ind = 2 * row - ((col + 1) & 1) - 1 + NUM * (col - 1); // global index
Real temp_old = temp_black[ind_black];
Real res = b[ind]
+ (aW[ind] * temp_red[row + (col - 1) * ((NUM >> 1) + 2)]
+ aE[ind] * temp_red[row + (col + 1) * ((NUM >> 1) + 2)]
+ aS[ind] * temp_red[row - ((col + 1) & 1) + col * ((NUM >> 1) + 2)]
+ aN[ind] * temp_red[row + (col & 1) + col * ((NUM >> 1) + 2)]);
Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]);
temp_black[ind_black] = temp_new;
res = temp_new - temp_old;
norm_L2[ind_black] = res * res;
} // end black_kernel
///////////////////////////////////////////////////////////////////////////////
/** Main function that solves Laplace's equation in 2D (heat conduction in plate)
*
* Contains iteration loop for red-black Gauss-Seidel with SOR GPU kernels
*/
int main (void) {
// size of plate
Real L = 1.0;
Real H = 1.0;
Real width = 0.01;
// thermal conductivity
Real th_cond = 1.0;
// temperature at top boundary
Real TN = 1.0;
// SOR iteration tolerance
Real tol = 1.e-6;
// number of cells in x and y directions
// including unused boundary cells
int num_rows = (NUM / 2) + 2;
int num_cols = NUM + 2;
int size_temp = num_rows * num_cols;
int size = NUM * NUM;
// size of cells
Real dx = L / NUM;
Real dy = H / NUM;
// iterations for Red-Black Gauss-Seidel with SOR
int iter;
int it_max = 1e6;
// allocate memory
Real *aP, *aW, *aE, *aS, *aN, *b;
Real *temp_red, *temp_black;
// arrays of coefficients
aP = (Real *) calloc (size, sizeof(Real));
aW = (Real *) calloc (size, sizeof(Real));
aE = (Real *) calloc (size, sizeof(Real));
aS = (Real *) calloc (size, sizeof(Real));
aN = (Real *) calloc (size, sizeof(Real));
// RHS
b = (Real *) calloc (size, sizeof(Real));
// temperature arrays
temp_red = (Real *) calloc (size_temp, sizeof(Real));
temp_black = (Real *) calloc (size_temp, sizeof(Real));
// set coefficients
fill_coeffs (NUM, NUM, th_cond, dx, dy, width, TN, aP, aW, aE, aS, aN, b);
int i;
for (i = 0; i < size_temp; ++i) {
temp_red[i] = ZERO;
temp_black[i] = ZERO;
}
// block and grid dimensions
dim3 dimBlock (BLOCK_SIZE, 1);
dim3 dimGrid (NUM / (2 * BLOCK_SIZE), NUM);
// residual
Real *bl_norm_L2;
// one for each temperature value
int size_norm = size_temp;
bl_norm_L2 = (Real *) calloc (size_norm, sizeof(Real));
for (i = 0; i < size_norm; ++i) {
bl_norm_L2[i] = ZERO;
}
// print problem info
printf("Problem size: %d x %d \n", NUM, NUM);
StartTimer();
// allocate device memory
Real *aP_d, *aW_d, *aE_d, *aS_d, *aN_d, *b_d;
Real *temp_red_d;
Real *temp_black_d;
hipMalloc ((void**) &aP_d, size * sizeof(Real));
hipMalloc ((void**) &aW_d, size * sizeof(Real));
hipMalloc ((void**) &aE_d, size * sizeof(Real));
hipMalloc ((void**) &aS_d, size * sizeof(Real));
hipMalloc ((void**) &aN_d, size * sizeof(Real));
hipMalloc ((void**) &b_d, size * sizeof(Real));
hipMalloc ((void**) &temp_red_d, size_temp * sizeof(Real));
hipMalloc ((void**) &temp_black_d, size_temp * sizeof(Real));
// copy to device memory
hipMemcpy (aP_d, aP, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (aW_d, aW, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (aE_d, aE, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (aS_d, aS, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (aN_d, aN, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (b_d, b, size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (temp_red_d, temp_red, size_temp * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy (temp_black_d, temp_black, size_temp * sizeof(Real), hipMemcpyHostToDevice);
// residual
Real *bl_norm_L2_d;
hipMalloc ((void**) &bl_norm_L2_d, size_norm * sizeof(Real));
hipMemcpy (bl_norm_L2_d, bl_norm_L2, size_norm * sizeof(Real), hipMemcpyHostToDevice);
// iteration loop
for (iter = 1; iter <= it_max; ++iter) {
Real norm_L2 = ZERO;
hipLaunchKernelGGL(( red_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_black_d, temp_red_d, bl_norm_L2_d);
// transfer residual value(s) back to CPU
hipMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), hipMemcpyDeviceToHost);
// add red cell contributions to residual
for (int i = 0; i < size_norm; ++i) {
norm_L2 += bl_norm_L2[i];
}
hipLaunchKernelGGL(( black_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_red_d, temp_black_d, bl_norm_L2_d);
// transfer residual value(s) back to CPU and
// add black cell contributions to residual
hipMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), hipMemcpyDeviceToHost);
for (int i = 0; i < size_norm; ++i) {
norm_L2 += bl_norm_L2[i];
}
// calculate residual
norm_L2 = sqrt(norm_L2 / ((Real)size));
if (iter % 5000 == 0) printf("%5d, %0.6f\n", iter, norm_L2);
// if tolerance has been reached, end SOR iterations
if (norm_L2 < tol) {
break;
}
break;
}
// transfer final temperature values back
hipMemcpy (temp_red, temp_red_d, size_temp * sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy (temp_black, temp_red_d, size_temp * sizeof(Real), hipMemcpyDeviceToHost);
double runtime = GetTimer();
printf("GPU\n");
printf("Iterations: %i\n", iter);
printf("Total time: %f s\n", runtime / 1000.0);
// print temperature data to file
FILE * pfile;
pfile = fopen("temperature.dat", "w");
if (pfile != NULL) {
fprintf(pfile, "#x\ty\ttemp(K)\n");
int row, col;
for (row = 1; row < NUM + 1; ++row) {
for (col = 1; col < NUM + 1; ++col) {
Real x_pos = (col - 1) * dx + (dx / 2);
Real y_pos = (row - 1) * dy + (dy / 2);
if ((row + col) % 2 == 0) {
// even, so red cell
int ind = col * num_rows + (row + (col % 2)) / 2;
fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_red[ind]);
} else {
// odd, so black cell
int ind = col * num_rows + (row + ((col + 1) % 2)) / 2;
fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_black[ind]);
}
}
fprintf(pfile, "\n");
}
}
fclose(pfile);
// free device memory
hipFree(aP_d);
hipFree(aW_d);
hipFree(aE_d);
hipFree(aS_d);
hipFree(aN_d);
hipFree(b_d);
hipFree(temp_red_d);
hipFree(temp_black_d);
hipFree(bl_norm_L2_d);
free(aP);
free(aW);
free(aE);
free(aS);
free(aN);
free(b);
free(temp_red);
free(temp_black);
free(bl_norm_L2);
return 0;
}
| 2ff84b446a8c63c97511e5bca16c630feb192d07.cu | /** GPU Laplace solver using optimized red-black Gauss–Seidel with SOR solver
*
* \author Kyle E. Niemeyer
* \date 09/21/2012
*
* Solves Laplace's equation in 2D (e.g., heat conduction in a rectangular plate)
* on GPU using CUDA with the red-black Gauss–Seidel with sucessive overrelaxation
* (SOR) that has been "optimized". This means that the red and black kernels
* only loop over their respective cells, instead of over all cells and skipping
* even/odd cells. This requires separate arrays for red and black cells.
*
* Boundary conditions:
* T = 0 at x = 0, x = L, y = 0
* T = TN at y = H
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "timer.h"
// CUDA libraries
#include <cuda.h>
/** Problem size along one side; total number of cells is this squared */
#define NUM 1024
// block size
#define BLOCK_SIZE 128
#define Real float
#define ZERO 0.0f
#define ONE 1.0f
#define TWO 2.0f
/** SOR relaxation parameter */
const Real omega = 1.85f;
///////////////////////////////////////////////////////////////////////////////
/** Function to evaluate coefficient matrix and right-hand side vector.
*
* \param[in] rowmax number of rows
* \param[in] colmax number of columns
* \param[in] th_cond thermal conductivity
* \param[in] dx grid size in x dimension (uniform)
* \param[in] dy grid size in y dimension (uniform)
* \param[in] width width of plate (z dimension)
* \param[in] TN temperature at top boundary
* \param[out] aP array of self coefficients
* \param[out] aW array of west neighbor coefficients
* \param[out] aE array of east neighbor coefficients
* \param[out] aS array of south neighbor coefficients
* \param[out] aN array of north neighbor coefficients
* \param[out] b right-hand side array
*/
void fill_coeffs (int rowmax, int colmax, Real th_cond, Real dx, Real dy,
Real width, Real TN, Real * aP, Real * aW, Real * aE,
Real * aS, Real * aN, Real * b)
{
int col, row;
for (col = 0; col < colmax; ++col) {
for (row = 0; row < rowmax; ++row) {
int ind = col * rowmax + row;
b[ind] = ZERO;
Real SP = ZERO;
if (col == 0) {
// left BC: temp = 0
aW[ind] = ZERO;
SP = -TWO * th_cond * width * dy / dx;
} else {
aW[ind] = th_cond * width * dy / dx;
}
if (col == colmax - 1) {
// right BC: temp = 0
aE[ind] = ZERO;
SP = -TWO * th_cond * width * dy / dx;
} else {
aE[ind] = th_cond * width * dy / dx;
}
if (row == 0) {
// bottom BC: temp = 0
aS[ind] = ZERO;
SP = -TWO * th_cond * width * dx / dy;
} else {
aS[ind] = th_cond * width * dx / dy;
}
if (row == rowmax - 1) {
// top BC: temp = TN
aN[ind] = ZERO;
b[ind] = TWO * th_cond * width * dx * TN / dy;
SP = -TWO * th_cond * width * dx / dy;
} else {
aN[ind] = th_cond * width * dx / dy;
}
aP[ind] = aW[ind] + aE[ind] + aS[ind] + aN[ind] - SP;
} // end for row
} // end for col
} // end fill_coeffs
///////////////////////////////////////////////////////////////////////////////
/** Function to update temperature for red cells
*
* \param[in] aP array of self coefficients
* \param[in] aW array of west neighbor coefficients
* \param[in] aE array of east neighbor coefficients
* \param[in] aS array of south neighbor coefficients
* \param[in] aN array of north neighbor coefficients
* \param[in] b right-hand side array
* \param[in] temp_black temperatures of black cells, constant in this function
* \param[inout] temp_red temperatures of red cells
* \param[out] bl_norm_L2 array with residual information for blocks
*/
__global__ void red_kernel (const Real * aP, const Real * aW, const Real * aE,
const Real * aS, const Real * aN, const Real * b,
const Real * temp_black, Real * temp_red,
Real * norm_L2)
{
int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x;
int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y;
int ind_red = col * ((NUM >> 1) + 2) + row; // local (red) index
int ind = 2 * row - (col & 1) - 1 + NUM * (col - 1); // global index
Real temp_old = temp_red[ind_red];
Real res = b[ind]
+ (aW[ind] * temp_black[row + (col - 1) * ((NUM >> 1) + 2)]
+ aE[ind] * temp_black[row + (col + 1) * ((NUM >> 1) + 2)]
+ aS[ind] * temp_black[row - (col & 1) + col * ((NUM >> 1) + 2)]
+ aN[ind] * temp_black[row + ((col + 1) & 1) + col * ((NUM >> 1) + 2)]);
Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]);
temp_red[ind_red] = temp_new;
res = temp_new - temp_old;
norm_L2[ind_red] = res * res;
} // end red_kernel
///////////////////////////////////////////////////////////////////////////////
/** Function to update temperature for black cells
*
* \param[in] aP array of self coefficients
* \param[in] aW array of west neighbor coefficients
* \param[in] aE array of east neighbor coefficients
* \param[in] aS array of south neighbor coefficients
* \param[in] aN array of north neighbor coefficients
* \param[in] b right-hand side array
* \param[in] temp_red temperatures of red cells, constant in this function
* \param[inout] temp_black temperatures of black cells
* \param[out] bl_norm_L2 array with residual information for blocks
*/
__global__ void black_kernel (const Real * aP, const Real * aW, const Real * aE,
const Real * aS, const Real * aN, const Real * b,
const Real * temp_red, Real * temp_black,
Real * norm_L2)
{
int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x;
int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y;
int ind_black = col * ((NUM >> 1) + 2) + row; // local (black) index
int ind = 2 * row - ((col + 1) & 1) - 1 + NUM * (col - 1); // global index
Real temp_old = temp_black[ind_black];
Real res = b[ind]
+ (aW[ind] * temp_red[row + (col - 1) * ((NUM >> 1) + 2)]
+ aE[ind] * temp_red[row + (col + 1) * ((NUM >> 1) + 2)]
+ aS[ind] * temp_red[row - ((col + 1) & 1) + col * ((NUM >> 1) + 2)]
+ aN[ind] * temp_red[row + (col & 1) + col * ((NUM >> 1) + 2)]);
Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]);
temp_black[ind_black] = temp_new;
res = temp_new - temp_old;
norm_L2[ind_black] = res * res;
} // end black_kernel
///////////////////////////////////////////////////////////////////////////////
/** Main function that solves Laplace's equation in 2D (heat conduction in plate)
*
* Contains iteration loop for red-black Gauss-Seidel with SOR GPU kernels
*/
int main (void) {
// size of plate
Real L = 1.0;
Real H = 1.0;
Real width = 0.01;
// thermal conductivity
Real th_cond = 1.0;
// temperature at top boundary
Real TN = 1.0;
// SOR iteration tolerance
Real tol = 1.e-6;
// number of cells in x and y directions
// including unused boundary cells
int num_rows = (NUM / 2) + 2;
int num_cols = NUM + 2;
int size_temp = num_rows * num_cols;
int size = NUM * NUM;
// size of cells
Real dx = L / NUM;
Real dy = H / NUM;
// iterations for Red-Black Gauss-Seidel with SOR
int iter;
int it_max = 1e6;
// allocate memory
Real *aP, *aW, *aE, *aS, *aN, *b;
Real *temp_red, *temp_black;
// arrays of coefficients
aP = (Real *) calloc (size, sizeof(Real));
aW = (Real *) calloc (size, sizeof(Real));
aE = (Real *) calloc (size, sizeof(Real));
aS = (Real *) calloc (size, sizeof(Real));
aN = (Real *) calloc (size, sizeof(Real));
// RHS
b = (Real *) calloc (size, sizeof(Real));
// temperature arrays
temp_red = (Real *) calloc (size_temp, sizeof(Real));
temp_black = (Real *) calloc (size_temp, sizeof(Real));
// set coefficients
fill_coeffs (NUM, NUM, th_cond, dx, dy, width, TN, aP, aW, aE, aS, aN, b);
int i;
for (i = 0; i < size_temp; ++i) {
temp_red[i] = ZERO;
temp_black[i] = ZERO;
}
// block and grid dimensions
dim3 dimBlock (BLOCK_SIZE, 1);
dim3 dimGrid (NUM / (2 * BLOCK_SIZE), NUM);
// residual
Real *bl_norm_L2;
// one for each temperature value
int size_norm = size_temp;
bl_norm_L2 = (Real *) calloc (size_norm, sizeof(Real));
for (i = 0; i < size_norm; ++i) {
bl_norm_L2[i] = ZERO;
}
// print problem info
printf("Problem size: %d x %d \n", NUM, NUM);
StartTimer();
// allocate device memory
Real *aP_d, *aW_d, *aE_d, *aS_d, *aN_d, *b_d;
Real *temp_red_d;
Real *temp_black_d;
cudaMalloc ((void**) &aP_d, size * sizeof(Real));
cudaMalloc ((void**) &aW_d, size * sizeof(Real));
cudaMalloc ((void**) &aE_d, size * sizeof(Real));
cudaMalloc ((void**) &aS_d, size * sizeof(Real));
cudaMalloc ((void**) &aN_d, size * sizeof(Real));
cudaMalloc ((void**) &b_d, size * sizeof(Real));
cudaMalloc ((void**) &temp_red_d, size_temp * sizeof(Real));
cudaMalloc ((void**) &temp_black_d, size_temp * sizeof(Real));
// copy to device memory
cudaMemcpy (aP_d, aP, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (aW_d, aW, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (aE_d, aE, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (aS_d, aS, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (aN_d, aN, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (b_d, b, size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (temp_red_d, temp_red, size_temp * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy (temp_black_d, temp_black, size_temp * sizeof(Real), cudaMemcpyHostToDevice);
// residual
Real *bl_norm_L2_d;
cudaMalloc ((void**) &bl_norm_L2_d, size_norm * sizeof(Real));
cudaMemcpy (bl_norm_L2_d, bl_norm_L2, size_norm * sizeof(Real), cudaMemcpyHostToDevice);
// iteration loop
for (iter = 1; iter <= it_max; ++iter) {
Real norm_L2 = ZERO;
red_kernel <<<dimGrid, dimBlock>>> (aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_black_d, temp_red_d, bl_norm_L2_d);
// transfer residual value(s) back to CPU
cudaMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), cudaMemcpyDeviceToHost);
// add red cell contributions to residual
for (int i = 0; i < size_norm; ++i) {
norm_L2 += bl_norm_L2[i];
}
black_kernel <<<dimGrid, dimBlock>>> (aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_red_d, temp_black_d, bl_norm_L2_d);
// transfer residual value(s) back to CPU and
// add black cell contributions to residual
cudaMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), cudaMemcpyDeviceToHost);
for (int i = 0; i < size_norm; ++i) {
norm_L2 += bl_norm_L2[i];
}
// calculate residual
norm_L2 = sqrt(norm_L2 / ((Real)size));
if (iter % 5000 == 0) printf("%5d, %0.6f\n", iter, norm_L2);
// if tolerance has been reached, end SOR iterations
if (norm_L2 < tol) {
break;
}
break;
}
// transfer final temperature values back
cudaMemcpy (temp_red, temp_red_d, size_temp * sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy (temp_black, temp_red_d, size_temp * sizeof(Real), cudaMemcpyDeviceToHost);
double runtime = GetTimer();
printf("GPU\n");
printf("Iterations: %i\n", iter);
printf("Total time: %f s\n", runtime / 1000.0);
// print temperature data to file
FILE * pfile;
pfile = fopen("temperature.dat", "w");
if (pfile != NULL) {
fprintf(pfile, "#x\ty\ttemp(K)\n");
int row, col;
for (row = 1; row < NUM + 1; ++row) {
for (col = 1; col < NUM + 1; ++col) {
Real x_pos = (col - 1) * dx + (dx / 2);
Real y_pos = (row - 1) * dy + (dy / 2);
if ((row + col) % 2 == 0) {
// even, so red cell
int ind = col * num_rows + (row + (col % 2)) / 2;
fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_red[ind]);
} else {
// odd, so black cell
int ind = col * num_rows + (row + ((col + 1) % 2)) / 2;
fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_black[ind]);
}
}
fprintf(pfile, "\n");
}
}
fclose(pfile);
// free device memory
cudaFree(aP_d);
cudaFree(aW_d);
cudaFree(aE_d);
cudaFree(aS_d);
cudaFree(aN_d);
cudaFree(b_d);
cudaFree(temp_red_d);
cudaFree(temp_black_d);
cudaFree(bl_norm_L2_d);
free(aP);
free(aW);
free(aE);
free(aS);
free(aN);
free(b);
free(temp_red);
free(temp_black);
free(bl_norm_L2);
return 0;
}
|
317a3ae35d9c65e0d1add40629902998a446170c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "./prelu_layer.hpp"
#include "../util/math_functions.hpp"
namespace caffe {
// CUDA kernele for forward
__global__ void PReLUForward(const int n, const int channels, const int dim,
const real_t* in, real_t* out, const real_t* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
void PReLULayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const real_t* bottom_data = bottom[0]->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const real_t* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
| 317a3ae35d9c65e0d1add40629902998a446170c.cu | #include <algorithm>
#include <vector>
#include "./prelu_layer.hpp"
#include "../util/math_functions.hpp"
namespace caffe {
// CUDA kernele for forward
__global__ void PReLUForward(const int n, const int channels, const int dim,
const real_t* in, real_t* out, const real_t* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
void PReLULayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const real_t* bottom_data = bottom[0]->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const real_t* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
|
b4f22811106be298edb93efe3ae8ef3772f881b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lattice.h"
__device__ int
get_global_index(dim3 tid,
dim3 bid,
dim3 bdim)
{
return tid.x + bid.x * bdim.x;
}
__device__
double compute(double coef,
double p,
double w1,
double w2,
double strike,
double up,
double down,
double price,
int ind,
int n,
int type)
{
double euro = coef * (p * w2 + (1 - p) * w1);
if (type == EUROPEAN) {
return euro;
}
else if (type == AMERICAN) {
// this is wrong becuase we need to take into account down for the drifting lattice
// this is also wrong becuase calls
return max(euro, max(strike - price * pow(up, 2 * ind - n - 1), 0.0));
}
return 0.0;
}
__global__ void
get_payoff(double* w,
double price,
double up,
double down,
int opttype,
double strike,
int n,
int base)
{
int index = get_global_index(threadIdx, blockIdx, blockDim);
double payoff;
while (index < n) {
payoff = price * pow(down, n - 1 - index) * pow(up, index);
if (opttype == CALL) {
w[index] = payoff > strike ? payoff - strike : 0.0;
} else {
w[index] = strike > payoff ? strike - payoff : 0.0;
}
index += base;
}
}
__global__ void
smooth_payoff(double * w, const int n){
if (n < 5)
return;
int index = n / 2 - 2;
while (w[++index] != 0);
w[index-1] = (w[index-2] + w[index])/2;
w[index] = (w[index-1] + w[index+1])/2;
w[index+1] = (w[index] + w[index+2])/2;
}
__global__ void
backward_recursion(double* w1,
double* w2,
int n,
int base,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int index = get_global_index(threadIdx, blockIdx, blockDim);
while (index < n) {
w2[index] = compute(coef, p, w1[index], w1[index+1], strike, up, down, price, index, n, type);
index += base;
}
}
__global__ void
backward_recursion_lower_triangle(double* w,
int n,
int base,
int len,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int tid = threadIdx.x;
int index = get_global_index(threadIdx, blockIdx, blockDim);
int upper = min(THREAD_LIMIT, n);
for (int k = 1; k < upper; k++) {
if (tid < upper - k && index < n) {
int i = (k - 1) * len + index;
double res = compute(coef, p, w[i], w[i+1], strike, up, down, price, i, n, type);
w[i + len] = res;
}
__syncthreads();
}
}
__global__ void
backward_recursion_upper_triangle(double* w,
int n,
int base,
int len,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int tid = threadIdx.x;
int index = get_global_index(threadIdx, blockIdx, blockDim);
int upper = min(THREAD_LIMIT, n);
for (int k = 1; k <= upper; k++) {
if (tid >= upper - k && index < n) {
int i = (k - 1) * len + index;
double res = compute(coef, p, w[i], w[i+1], strike, up, down, price, i, n, type);
if (k == upper) {
w[index] = res;
} else {
w[i + len] = res;
}
}
__syncthreads();
}
}
| b4f22811106be298edb93efe3ae8ef3772f881b1.cu | #include "lattice.h"
__device__ int
get_global_index(dim3 tid,
dim3 bid,
dim3 bdim)
{
return tid.x + bid.x * bdim.x;
}
__device__
double compute(double coef,
double p,
double w1,
double w2,
double strike,
double up,
double down,
double price,
int ind,
int n,
int type)
{
double euro = coef * (p * w2 + (1 - p) * w1);
if (type == EUROPEAN) {
return euro;
}
else if (type == AMERICAN) {
// this is wrong becuase we need to take into account down for the drifting lattice
// this is also wrong becuase calls
return max(euro, max(strike - price * pow(up, 2 * ind - n - 1), 0.0));
}
return 0.0;
}
__global__ void
get_payoff(double* w,
double price,
double up,
double down,
int opttype,
double strike,
int n,
int base)
{
int index = get_global_index(threadIdx, blockIdx, blockDim);
double payoff;
while (index < n) {
payoff = price * pow(down, n - 1 - index) * pow(up, index);
if (opttype == CALL) {
w[index] = payoff > strike ? payoff - strike : 0.0;
} else {
w[index] = strike > payoff ? strike - payoff : 0.0;
}
index += base;
}
}
__global__ void
smooth_payoff(double * w, const int n){
if (n < 5)
return;
int index = n / 2 - 2;
while (w[++index] != 0);
w[index-1] = (w[index-2] + w[index])/2;
w[index] = (w[index-1] + w[index+1])/2;
w[index+1] = (w[index] + w[index+2])/2;
}
__global__ void
backward_recursion(double* w1,
double* w2,
int n,
int base,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int index = get_global_index(threadIdx, blockIdx, blockDim);
while (index < n) {
w2[index] = compute(coef, p, w1[index], w1[index+1], strike, up, down, price, index, n, type);
index += base;
}
}
__global__ void
backward_recursion_lower_triangle(double* w,
int n,
int base,
int len,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int tid = threadIdx.x;
int index = get_global_index(threadIdx, blockIdx, blockDim);
int upper = min(THREAD_LIMIT, n);
for (int k = 1; k < upper; k++) {
if (tid < upper - k && index < n) {
int i = (k - 1) * len + index;
double res = compute(coef, p, w[i], w[i+1], strike, up, down, price, i, n, type);
w[i + len] = res;
}
__syncthreads();
}
}
__global__ void
backward_recursion_upper_triangle(double* w,
int n,
int base,
int len,
double coef,
double p,
double strike,
double up,
double down,
double price,
int type)
{
int tid = threadIdx.x;
int index = get_global_index(threadIdx, blockIdx, blockDim);
int upper = min(THREAD_LIMIT, n);
for (int k = 1; k <= upper; k++) {
if (tid >= upper - k && index < n) {
int i = (k - 1) * len + index;
double res = compute(coef, p, w[i], w[i+1], strike, up, down, price, i, n, type);
if (k == upper) {
w[index] = res;
} else {
w[i + len] = res;
}
}
__syncthreads();
}
}
|
7d4fb9517dc43f2948bace4b3e0de25d68d1991f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void reduce (int *a, int *b, int N){
extern __shared__ int s_a[];
int tid = threadIdx.x;
int id = threadIdx.x + (blockDim.x*2) * blockIdx.x;
s_a[tid] = 0;
if (id >= N) return;
if (id+blockDim.x >= N)
s_a[tid] = a[id];
else
s_a[tid] = a[id] + a[id+blockDim.x];
__syncthreads();
for (int i = blockDim.x/2; i > 32; i>>=1){
if (tid < i)
s_a[tid] += s_a[tid + i];
__syncthreads();
}
if (tid < 32){
volatile int* s_b = s_a;
s_b[tid] += s_b[tid + 32];
}
int local = s_a[tid];
local += __shfl_down(local,16);
local += __shfl_down(local,8);
local += __shfl_down(local,4);
local += __shfl_down(local,2);
local += __shfl_down(local,1);
/*
s_b[tid] += s_b[tid + 16];
s_b[tid] += s_b[tid + 8];
s_b[tid] += s_b[tid + 4];
s_b[tid] += s_b[tid + 2];
s_b[tid] += s_b[tid + 1];
local = s_b[tid];
*/
if (tid == 0)
b[blockIdx.x] = local;
}
int main (int argc, char *argv[]){
int N = atoi(argv[1]);
int num_thread = 1024;
int num_block = ceil((float)N/1024);
num_block = ceil((float)num_block/2);
size_t size = N * sizeof(int);
size_t size_result = num_block * sizeof(int);
int *h_a, *h_b;
h_a = (int*)malloc(size);
h_b = (int*)malloc(size_result);
int *d_a, *d_b;
hipMalloc(&d_a,size);
hipMalloc(&d_b,size_result);
for (int i = 0; i < N; i++)
h_a[i] = 1;
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
printf("Blocks: %d Threads: %d \n", num_block, num_thread);
hipLaunchKernelGGL(( reduce), dim3(num_block), dim3(num_thread), num_thread * sizeof(int), 0, d_a, d_b, N);
hipMemcpy(h_b, d_b, size_result, hipMemcpyDeviceToHost);
int result = 0;
for (int i = 0; i < num_block; i++){
result += h_b[i];
//printf("%d ", h_b[i]);
}
printf("\nResultado: %d\n", result);
return 0;
} | 7d4fb9517dc43f2948bace4b3e0de25d68d1991f.cu | #include <stdio.h>
__global__ void reduce (int *a, int *b, int N){
extern __shared__ int s_a[];
int tid = threadIdx.x;
int id = threadIdx.x + (blockDim.x*2) * blockIdx.x;
s_a[tid] = 0;
if (id >= N) return;
if (id+blockDim.x >= N)
s_a[tid] = a[id];
else
s_a[tid] = a[id] + a[id+blockDim.x];
__syncthreads();
for (int i = blockDim.x/2; i > 32; i>>=1){
if (tid < i)
s_a[tid] += s_a[tid + i];
__syncthreads();
}
if (tid < 32){
volatile int* s_b = s_a;
s_b[tid] += s_b[tid + 32];
}
int local = s_a[tid];
local += __shfl_down(local,16);
local += __shfl_down(local,8);
local += __shfl_down(local,4);
local += __shfl_down(local,2);
local += __shfl_down(local,1);
/*
s_b[tid] += s_b[tid + 16];
s_b[tid] += s_b[tid + 8];
s_b[tid] += s_b[tid + 4];
s_b[tid] += s_b[tid + 2];
s_b[tid] += s_b[tid + 1];
local = s_b[tid];
*/
if (tid == 0)
b[blockIdx.x] = local;
}
int main (int argc, char *argv[]){
int N = atoi(argv[1]);
int num_thread = 1024;
int num_block = ceil((float)N/1024);
num_block = ceil((float)num_block/2);
size_t size = N * sizeof(int);
size_t size_result = num_block * sizeof(int);
int *h_a, *h_b;
h_a = (int*)malloc(size);
h_b = (int*)malloc(size_result);
int *d_a, *d_b;
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size_result);
for (int i = 0; i < N; i++)
h_a[i] = 1;
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
printf("Blocks: %d Threads: %d \n", num_block, num_thread);
reduce<<<num_block, num_thread, num_thread * sizeof(int)>>>(d_a, d_b, N);
cudaMemcpy(h_b, d_b, size_result, cudaMemcpyDeviceToHost);
int result = 0;
for (int i = 0; i < num_block; i++){
result += h_b[i];
//printf("%d ", h_b[i]);
}
printf("\nResultado: %d\n", result);
return 0;
} |
b7e927dd22e4012ad9f5e21e01047a5dab5f1c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <limits.h>
/* Part A */
__global__ void part_a(int n, int *A, int *B){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&B[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&B[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&B[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&B[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&B[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&B[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&B[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&B[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&B[9], 1); }
}
}
/* Part B */
__global__ void part_b(int n, int *A, int *B){
__shared__ int s[10];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&s[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&s[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&s[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&s[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&s[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&s[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&s[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&s[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&s[9], 1); }
}
__syncthreads();
if (threadIdx.x == 0)
for (int i = 0; i < 10; i++)
atomicAdd(&B[i], s[i]);
}
/* Part C */
__global__ void part_c(int *B, int *C){
for (int i = 0; i < 10; i += 1) {
int sum = 0;
for (int j = 0; j < i; j++) { sum += B[j]; }
C[i] += sum;
}
}
int main() {
/* Open File */
FILE *fp;
fp = fopen("inp.txt", "r");
char buff[256];
const int M = 1<<20;
const int d = 10;
int *A = new int[M];
int *B = new int[d];
int *B2 = new int[d];
int *C = new int[d];
int i, count = 0;
/* Copy to GPU Memory */
hipMallocManaged(&A, M * sizeof(int));
hipMallocManaged(&B, d * sizeof(int));
hipMallocManaged(&B2, d * sizeof(int));
hipMallocManaged(&C, d * sizeof(int));
/* Read numbers as integers one by one */
while (fscanf(fp, "%d", &i) != EOF) {
A[count++] = i; // Add number to array
fscanf(fp, "%s", buff); // Read until whitespace
}
/* Close FilePointer */
fclose(fp);
/**************************************************/
/* Part A */
int blockSize = 256;
int numBlocks = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( part_a), dim3(numBlocks), dim3(blockSize), 0, 0, count, A, B);
/* Wait for GPU */
hipDeviceSynchronize();
/* Part A to File */
FILE *f = fopen("q2a.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f, "%d", B[i]);
if (i + 1 != d) { fprintf(f, ", "); }
} fclose(f);
/* Print B */
printf("B: ");
for (int i = 0; i < d; i++) {
printf("%d", B[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/* Copy B to C */
for (int i = 0; i < d; i++) { C[i] = B[i]; }
/**************************************************/
/* Part B */
hipLaunchKernelGGL(( part_b), dim3(numBlocks), dim3(blockSize), 0, 0, count, A, B2);
/* Wait for GPU */
hipDeviceSynchronize();
/* Part B to File */
FILE *f2 = fopen("q2b.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f2, "%d", B2[i]);
if (i + 1 != d) { fprintf(f2, ", "); }
} fclose(f2);
/* Print B2 */
printf("B2: ");
for (int i = 0; i < d; i++) {
printf("%d", B2[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Part C */
hipLaunchKernelGGL(( part_c), dim3(1), dim3(1), 0, 0, B, C);
/* Wait for GPU */
hipDeviceSynchronize();
/* Part C to File */
FILE *f3 = fopen("q2c.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f3, "%d", C[i]);
if (i + 1 != d) { fprintf(f3, ", "); }
} fclose(f3);
/* Print C */
printf("C: ");
for (int i = 0; i < d; i++) {
printf("%d", C[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Free Memory */
hipFree(A);
hipFree(B);
hipFree(B2);
hipFree(C);
return 0;
}
| b7e927dd22e4012ad9f5e21e01047a5dab5f1c68.cu | #include <stdio.h>
#include <limits.h>
/* Part A */
__global__ void part_a(int n, int *A, int *B){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&B[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&B[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&B[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&B[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&B[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&B[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&B[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&B[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&B[9], 1); }
}
}
/* Part B */
__global__ void part_b(int n, int *A, int *B){
__shared__ int s[10];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&s[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&s[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&s[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&s[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&s[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&s[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&s[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&s[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&s[9], 1); }
}
__syncthreads();
if (threadIdx.x == 0)
for (int i = 0; i < 10; i++)
atomicAdd(&B[i], s[i]);
}
/* Part C */
__global__ void part_c(int *B, int *C){
for (int i = 0; i < 10; i += 1) {
int sum = 0;
for (int j = 0; j < i; j++) { sum += B[j]; }
C[i] += sum;
}
}
int main() {
/* Open File */
FILE *fp;
fp = fopen("inp.txt", "r");
char buff[256];
const int M = 1<<20;
const int d = 10;
int *A = new int[M];
int *B = new int[d];
int *B2 = new int[d];
int *C = new int[d];
int i, count = 0;
/* Copy to GPU Memory */
cudaMallocManaged(&A, M * sizeof(int));
cudaMallocManaged(&B, d * sizeof(int));
cudaMallocManaged(&B2, d * sizeof(int));
cudaMallocManaged(&C, d * sizeof(int));
/* Read numbers as integers one by one */
while (fscanf(fp, "%d", &i) != EOF) {
A[count++] = i; // Add number to array
fscanf(fp, "%s", buff); // Read until whitespace
}
/* Close FilePointer */
fclose(fp);
/**************************************************/
/* Part A */
int blockSize = 256;
int numBlocks = (count + blockSize - 1) / blockSize;
part_a<<<numBlocks, blockSize>>>(count, A, B);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part A to File */
FILE *f = fopen("q2a.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f, "%d", B[i]);
if (i + 1 != d) { fprintf(f, ", "); }
} fclose(f);
/* Print B */
printf("B: ");
for (int i = 0; i < d; i++) {
printf("%d", B[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/* Copy B to C */
for (int i = 0; i < d; i++) { C[i] = B[i]; }
/**************************************************/
/* Part B */
part_b<<<numBlocks, blockSize>>>(count, A, B2);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part B to File */
FILE *f2 = fopen("q2b.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f2, "%d", B2[i]);
if (i + 1 != d) { fprintf(f2, ", "); }
} fclose(f2);
/* Print B2 */
printf("B2: ");
for (int i = 0; i < d; i++) {
printf("%d", B2[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Part C */
part_c<<<1, 1>>>(B, C);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part C to File */
FILE *f3 = fopen("q2c.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f3, "%d", C[i]);
if (i + 1 != d) { fprintf(f3, ", "); }
} fclose(f3);
/* Print C */
printf("C: ");
for (int i = 0; i < d; i++) {
printf("%d", C[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Free Memory */
cudaFree(A);
cudaFree(B);
cudaFree(B2);
cudaFree(C);
return 0;
}
|
bc6965e7977ed43354a85d73c5c254306ffe8fd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Part of the following code in this file refs to
// https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_psroi_pooling.cu
//
// Copyright (c) 2017 Microsoft
// Licensed under The Apache-2.0 License [see LICENSE for details]
// \file deformable_psroi_pooling.cu
// \brief
// \author Yi Li, Guodong Zhang, Jifeng Dai
#pragma once
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/deformable_psroi_pooling_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
static inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interpolation(
const T* data, const T x, const T y, const int width, const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernel(const int count,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const T* bottom_trans,
const bool no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_height,
const int group_width,
const int part_height,
const int part_width,
const int num_classes,
const int channels_each_class,
T* top_data,
T* top_count,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval ineach bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels) * height * width;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
// bilinear interpolation
T val = bilinear_interpolation(
offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
Tensor* out = ctx.Output<Tensor>("Output");
out->mutable_data<T>(ctx.GetPlace());
Tensor* top_count = ctx.Output<Tensor>("TopCount");
top_count->mutable_data<T>(ctx.GetPlace());
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(
num_rois,
out->dims()[0],
platform::errors::InvalidArgument(
"The number of Input(ROIs) should be same with the number of "
"Output(Output), but received ROIs number is:%d, Output number "
"is:%d.",
num_rois,
out->dims()[0]));
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE_GE(channels_each_class,
1,
platform::errors::InvalidArgument(
"channels_each_class should not be lower than 1, but "
"channels_each_class is:%d.",
channels_each_class));
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch,
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size,
batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois,
rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
roi_batch_id_data,
bytes,
dev_ctx.stream());
T* top_data = out->mutable_data<T>(ctx.GetPlace());
T* top_count_data = top_count->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernel), dim3(GET_BLOCKS(count)),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
dev_ctx.stream(), count,
bottom_data,
(T)spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
bottom_rois,
bottom_trans,
no_trans,
(T)trans_std,
sample_per_part,
output_dim,
group_height,
group_width,
part_height,
part_width,
num_classes,
channels_each_class,
top_data,
top_count_data,
roi_id_data);
}
};
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count,
const T* top_diff,
const T* top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_data_diff,
T* bottom_trans_diff,
const T* bottom_data,
const T* bottom_rois,
const T* bottom_trans,
const bool no_trans,
const T trans_std,
const int sample_per_part,
const int group_height,
const int group_width,
const int part_height,
const int part_width,
const int num_classes,
const int channels_each_class,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
int num_box = count / pooled_height / pooled_width / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1);
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval in each bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T* offset_bottom_data =
bottom_data + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
int c = (ctop * group_height + gh) * group_width + gw;
int bottom_index_base = c * height * width;
int bottom_index =
roi_batch_ind * channels * height * width + bottom_index_base;
int trans_index_x =
(((n * num_classes + class_id) * 2) * part_height + part_h) *
part_width +
part_w;
int trans_index_y =
(((n * num_classes + class_id) * 2 + 1) * part_height + part_h) *
part_width +
part_w;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
// compute coefficient of gradient
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
// compute gradient of input
if (bottom_data_diff) {
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y0 * width + x0,
q00 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y1 * width + x0,
q01 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y0 * width + x1,
q10 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y1 * width + x1,
q11 * diff_val);
}
// compute gradient of trans
if (no_trans || bottom_trans_diff == NULL) {
continue;
}
T u00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T u01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T u10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T u11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (u11 * dist_y + u10 * (1 - dist_y) - u01 * dist_y -
u00 * (1 - dist_y)) *
trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (u11 * dist_x + u01 * (1 - dist_x) - u10 * dist_x -
u00 * (1 - dist_x)) *
trans_std * diff_val;
diff_y *= roi_height;
platform::CudaAtomicAdd(bottom_trans_diff + trans_index_x, diff_x);
platform::CudaAtomicAdd(bottom_trans_diff + trans_index_y, diff_y);
}
}
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
const Tensor* top_count = ctx.Input<Tensor>("TopCount");
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* trans_grad = ctx.Output<Tensor>(framework::GradVarName("Trans"));
phi::funcs::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = ctx.cuda_device_context();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (trans_grad) {
trans_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, trans_grad, static_cast<T>(0));
}
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
const T* top_diff = output_grad->data<T>();
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
T* bottom_data_diff = NULL;
T* bottom_trans_diff = NULL;
if (input_grad) {
bottom_data_diff = input_grad->mutable_data<T>(ctx.GetPlace());
}
if (trans_grad) {
bottom_trans_diff =
no_trans ? NULL : trans_grad->mutable_data<T>(ctx.GetPlace());
}
const T* top_count_data = top_count->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch,
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size,
batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois,
rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
roi_batch_id_data,
bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel), dim3(GET_BLOCKS(count)),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
dev_ctx.stream(),
count,
top_diff,
top_count_data,
num_rois,
(T)spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
(T)trans_std,
sample_per_part,
group_height,
group_width,
part_height,
part_width,
num_classes,
channels_each_class,
roi_id_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling,
ops::DeformablePSROIPoolCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling_grad,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, double>);
| bc6965e7977ed43354a85d73c5c254306ffe8fd1.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Part of the following code in this file refs to
// https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_psroi_pooling.cu
//
// Copyright (c) 2017 Microsoft
// Licensed under The Apache-2.0 License [see LICENSE for details]
// \file deformable_psroi_pooling.cu
// \brief
// \author Yi Li, Guodong Zhang, Jifeng Dai
#pragma once
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/deformable_psroi_pooling_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
static inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interpolation(
const T* data, const T x, const T y, const int width, const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernel(const int count,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const T* bottom_trans,
const bool no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_height,
const int group_width,
const int part_height,
const int part_width,
const int num_classes,
const int channels_each_class,
T* top_data,
T* top_count,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval ineach bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels) * height * width;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
// bilinear interpolation
T val = bilinear_interpolation(
offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
Tensor* out = ctx.Output<Tensor>("Output");
out->mutable_data<T>(ctx.GetPlace());
Tensor* top_count = ctx.Output<Tensor>("TopCount");
top_count->mutable_data<T>(ctx.GetPlace());
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(
num_rois,
out->dims()[0],
platform::errors::InvalidArgument(
"The number of Input(ROIs) should be same with the number of "
"Output(Output), but received ROIs number is:%d, Output number "
"is:%d.",
num_rois,
out->dims()[0]));
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE_GE(channels_each_class,
1,
platform::errors::InvalidArgument(
"channels_each_class should not be lower than 1, but "
"channels_each_class is:%d.",
channels_each_class));
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch,
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size,
batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois,
rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
roi_batch_id_data,
bytes,
dev_ctx.stream());
T* top_data = out->mutable_data<T>(ctx.GetPlace());
T* top_count_data = top_count->mutable_data<T>(ctx.GetPlace());
DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count),
PADDLE_CUDA_NUM_THREADS,
0,
dev_ctx.stream()>>>(count,
bottom_data,
(T)spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
bottom_rois,
bottom_trans,
no_trans,
(T)trans_std,
sample_per_part,
output_dim,
group_height,
group_width,
part_height,
part_width,
num_classes,
channels_each_class,
top_data,
top_count_data,
roi_id_data);
}
};
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count,
const T* top_diff,
const T* top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_data_diff,
T* bottom_trans_diff,
const T* bottom_data,
const T* bottom_rois,
const T* bottom_trans,
const bool no_trans,
const T trans_std,
const int sample_per_part,
const int group_height,
const int group_width,
const int part_height,
const int part_width,
const int num_classes,
const int channels_each_class,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
int num_box = count / pooled_height / pooled_width / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1);
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval in each bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T* offset_bottom_data =
bottom_data + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
int c = (ctop * group_height + gh) * group_width + gw;
int bottom_index_base = c * height * width;
int bottom_index =
roi_batch_ind * channels * height * width + bottom_index_base;
int trans_index_x =
(((n * num_classes + class_id) * 2) * part_height + part_h) *
part_width +
part_w;
int trans_index_y =
(((n * num_classes + class_id) * 2 + 1) * part_height + part_h) *
part_width +
part_w;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
// compute coefficient of gradient
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
// compute gradient of input
if (bottom_data_diff) {
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y0 * width + x0,
q00 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y1 * width + x0,
q01 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y0 * width + x1,
q10 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + bottom_index + y1 * width + x1,
q11 * diff_val);
}
// compute gradient of trans
if (no_trans || bottom_trans_diff == NULL) {
continue;
}
T u00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T u01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T u10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T u11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (u11 * dist_y + u10 * (1 - dist_y) - u01 * dist_y -
u00 * (1 - dist_y)) *
trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (u11 * dist_x + u01 * (1 - dist_x) - u10 * dist_x -
u00 * (1 - dist_x)) *
trans_std * diff_val;
diff_y *= roi_height;
platform::CudaAtomicAdd(bottom_trans_diff + trans_index_x, diff_x);
platform::CudaAtomicAdd(bottom_trans_diff + trans_index_y, diff_y);
}
}
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
const Tensor* top_count = ctx.Input<Tensor>("TopCount");
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* trans_grad = ctx.Output<Tensor>(framework::GradVarName("Trans"));
phi::funcs::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = ctx.cuda_device_context();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (trans_grad) {
trans_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, trans_grad, static_cast<T>(0));
}
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
const T* top_diff = output_grad->data<T>();
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
T* bottom_data_diff = NULL;
T* bottom_trans_diff = NULL;
if (input_grad) {
bottom_data_diff = input_grad->mutable_data<T>(ctx.GetPlace());
}
if (trans_grad) {
bottom_trans_diff =
no_trans ? NULL : trans_grad->mutable_data<T>(ctx.GetPlace());
}
const T* top_count_data = top_count->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch,
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size,
batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois,
rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
roi_batch_id_data,
bytes,
dev_ctx.stream());
DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count),
PADDLE_CUDA_NUM_THREADS,
0,
dev_ctx.stream()>>>(
count,
top_diff,
top_count_data,
num_rois,
(T)spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
(T)trans_std,
sample_per_part,
group_height,
group_width,
part_height,
part_width,
num_classes,
channels_each_class,
roi_id_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling,
ops::DeformablePSROIPoolCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling_grad,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, double>);
|
b5e724b965e11e9536783adc1846bfd0344cb161.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cudaKernel.cuh"
static hipStream_t dataTransferStream;
static hipStream_t modelingStream;
extern "C"
int launch_cudaMalloc(void **devPtr, size_t size) {
if (hipMalloc(devPtr, size) != hipSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMallocHost(void **hostPtr, size_t size) {
if (hipHostMalloc(hostPtr, size) != hipSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMemset(void *devPtr, int value, size_t count) {
if (hipMemset(devPtr, value, count) != hipSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMemcpy(void *dst, const void *src, size_t count, int direction) {
if (direction == 0) {
if (hipMemcpy(dst, src, count, hipMemcpyHostToDevice) != hipSuccess) {
return -1;
}
} else {
if (hipMemcpy(dst, src, count, hipMemcpyDeviceToHost) != hipSuccess) {
return -1;
}
}
return 0;
}
extern "C"
void launch_cudaMemcpyAsync(void *dst, const void *src, size_t count, int direction) {
if (direction == 0) {
if (hipMemcpyAsync(dst, src, count, hipMemcpyHostToDevice, dataTransferStream) != hipSuccess) {
fprintf(stdout, "Error hipMemcpyAsync from host to device!\n");
exit(0);
}
} else {
if (hipMemcpyAsync(dst, src, count, hipMemcpyDeviceToHost, dataTransferStream) != hipSuccess) {
fprintf(stdout, "Error hipMemcpyAsync from device to host!\n");
exit(0);
}
}
}
extern "C"
void launch_cudaFree(void *devPtr) {
hipFree(devPtr);
}
extern "C"
void launch_cudaFreeHost(void *hostPtr) {
hipHostFree(hostPtr);
}
extern "C"
void launch_dstress_dxy(float *fx_in, float *fy_in, float *f_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dstress_dxy_forward), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, fx_in, fy_in, f_out);
} else {
hipLaunchKernelGGL(( dstress_dxy_backward), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, fx_in, fy_in, f_out);
}
}
extern "C"
void launch_dstress_dz(float *f_in, float *f_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dstress_dz_forward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
} else {
hipLaunchKernelGGL(( dstress_dz_backward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
}
}
extern "C"
void launch_dvxy_dxy(float *vx_in, float *vy_in, float *lambda_in, float *dsxy_out, float *dsxx_out, float *dsyy_out, float *dszz_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dvxy_dxy_forward), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx_in, vy_in, lambda_in, dsxy_out, dsxx_out, dsyy_out, dszz_out);
} else {
hipLaunchKernelGGL(( dvxy_dxy_backward), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx_in, vy_in, lambda_in, dsxy_out, dsxx_out, dsyy_out, dszz_out);
}
}
extern "C"
void launch_dvxz_dxz(float *vx_in, float *vz_in, float *lambda_in, float *dsxz_out, float *dsxx_out, float *dsyy_out, float *dszz_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dvxz_dxz_forward), dim3(dim3(NX/32, NY, NZ/32)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx_in, vz_in, lambda_in, dsxz_out, dsxx_out, dsyy_out, dszz_out);
} else {
hipLaunchKernelGGL(( dvxz_dxz_backward), dim3(dim3(NX/32, NY, NZ/32)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx_in, vz_in, lambda_in, dsxz_out, dsxx_out, dsyy_out, dszz_out);
}
}
extern "C"
void launch_dvelocity_dy(float *f_in, float *f_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dvelocity_dy_forward_32x32), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
} else {
hipLaunchKernelGGL(( dvelocity_dy_backward_32x32), dim3(dim3(NX/32, NY/32, NZ)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
}
}
extern "C"
void launch_dvelocity_dz(float *f_in, float *f_out, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( dvelocity_dz_forward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
} else {
hipLaunchKernelGGL(( dvelocity_dz_backward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, f_in, f_out);
}
}
extern "C"
void launch_pml_frontv(float *vx, float *vy, float *vz, float *vxx, float *vyx, float *vzx, float *dvxx, float *dvyx, float *dvzx, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsxz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_frontv_forward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_frontv_backward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_fronts(float *sxx, float *sxy, float *sxz, float *sxxx, float *sxyx, float *sxzx, float *dsxxx, float *dsxyx, float *dsxzx, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_fronts_forward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_fronts_backward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_backv(float *vx, float *vy, float *vz, float *vxx, float *vyx, float *vzx, float *dvxx, float *dvyx, float *dvzx, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsxz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_backv_forward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_backv_backward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_backs(float *sxx, float *sxy, float *sxz, float *sxxx, float *sxyx, float *sxzx, float *dsxxx, float *dsxyx, float *dsxzx, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_backs_forward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_backs_backward), dim3(dim3(NY/32, NZ, 1)), dim3(dim3(32, 32, 1)), 0, modelingStream, sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_leftv(float *vx, float *vy, float *vz, float *vxy, float *vyy, float *vzy, float *dvxy, float *dvyy, float *dvzy, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_leftv_forward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_leftv_backward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_lefts(float *sxy, float *syy, float *syz, float *sxyy, float *syyy, float *syzy, float *dsxyy, float *dsyyy, float *dsyzy, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_lefts_forward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_lefts_backward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_rightv(float *vx, float *vy, float *vz, float *vxy, float *vyy, float *vzy, float *dvxy, float *dvyy, float *dvzy, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_rightv_forward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_rightv_backward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_rights(float *sxy, float *syy, float *syz, float *sxyy, float *syyy, float *syzy, float *dsxyy, float *dsyyy, float *dsyzy, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_rights_forward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_rights_backward), dim3(dim3(NX/32, NZ, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_bottomv(float *vx, float *vy, float *vz, float *vxz, float *vyz, float *vzz, float *dvxz, float *dvyz, float *dvzz, float *dsxx, float *dsyy, float *dszz, float *dsxz, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_bottomv_forward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxz, vyz, vzz, dvxz, dvyz, dvzz, dsxx, dsyy, dszz, dsxz, dsyz, lambda_in, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_bottomv_backward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, vx, vy, vz, vxz, vyz, vzz, dvxz, dvyz, dvzz, dsxx, dsyy, dszz, dsxz, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_bottoms(float *sxz, float *syz, float *szz, float *sxzz, float *syzz, float *szzz, float *dsxzz, float *dsyzz, float *dszzz, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( pml_bottoms_forward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxz, syz, szz, sxzz, syzz, szzz, dsxzz, dsyzz, dszzz, dvx, dvy, dvz, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( pml_bottoms_backward), dim3(dim3(NX/32, NY, 1)), dim3(dim3(32, PML_NUM, 1)), 0, modelingStream, sxz, syz, szz, sxzz, syzz, szzz, dsxzz, dsyzz, dszzz, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_free_surface(float *dsxz, float *dsyz, float *dszz, float *dsxx, float *dsyy, float *lambda_in, float *vx, float *vy, float *vz, int isForward) {
if (isForward) {
hipLaunchKernelGGL(( free_surface_forward), dim3(NY), dim3(NX), 0, modelingStream, dsxz, dsyz, dszz, dsxx, dsyy, lambda_in);
} else {
hipLaunchKernelGGL(( free_surface_backward), dim3(NY), dim3(NX), 0, modelingStream, dsxz, dsyz, dszz, dsxx, dsyy, lambda_in, vx, vy, vz);
}
}
extern "C"
int launch_set_modeling_parameter(float dt, float dx) {
if (set_constant_memory(dt, dx) != 0 ||
hipStreamCreate(&dataTransferStream) != hipSuccess ||
hipStreamCreate(&modelingStream) != hipSuccess) {
return -1;
}
return 0;
}
extern "C"
void launch_kernel_backward_xcoor(float *sxx1, float *sxy1, float *sxz1, float *syy1, float *syz1, float *szz1, float *sxx2, float *sxy2, float *sxz2, float *syy2, float *syz2, float *szz2, float *Klambda, float *Kmu, int zNum) {
hipLaunchKernelGGL(( backward_xcoor), dim3(dim3(NY, zNum, 1)), dim3(NX), 0, modelingStream, sxx1, sxy1, sxz1, syy1, syz1, szz1, sxx2, sxy2, sxz2, syy2, syz2, szz2, Klambda, Kmu);
}
extern "C"
void launch_kernel_finalize(float *Klambda, float *Kmu, float *lambda, float *mu, int zNum) {
hipLaunchKernelGGL(( kernel_processing), dim3(dim3(NY, zNum, 1)), dim3(NX), 0, modelingStream, Klambda, Kmu, lambda, mu);
}
extern "C"
void launch_station_clip(float *Klambda0, float *Kmu0, int *iX, int *iY, int numberOfStations) {
hipLaunchKernelGGL(( station_clip), dim3(dim3(numberOfStations, 5)), dim3(dim3(16, 16)), 0, modelingStream, Klambda0, Kmu0, iX, iY);
}
extern "C"
void launch_add_kernel(float *Klambda0, float *Kmu0, float *Klambda, float *Kmu, int zNum) {
hipLaunchKernelGGL(( add_kernel), dim3(dim3(NY, zNum, 1)), dim3(NX), 0, modelingStream, Klambda0, Kmu0, Klambda, Kmu);
}
extern "C"
void launch_update_stress(float *sxx, float *dsxx, float *syy, float *dsyy, float *szz, float *dszz, float *sxy, float *dsxy, float *sxz, float *dsxz, float *syz, float *dsyz, float *mu, float scaleA, float scaleB) {
hipLaunchKernelGGL(( update_stress), dim3(dim3(NY, NZ, 1)), dim3(NX), 0, modelingStream, sxx, dsxx, syy, dsyy, szz, dszz, sxy, dsxy, sxz, dsxz, syz, dsyz, mu, scaleA, scaleB);
}
extern "C"
void launch_update_velocity(float *fx, float *dfx, float *fy, float *dfy, float *fz, float *dfz, float *rho, float scaleA, float scaleB) {
hipLaunchKernelGGL(( update_velocity), dim3(dim3(NY, NZ, 1)), dim3(NX), 0, modelingStream, fx, dfx, fy, dfy, fz, dfz, rho, scaleA, scaleB);
}
extern "C"
void launch_source_inject_forward(float *f, int x, int y, float *skx, float *sky, float stf) {
hipLaunchKernelGGL(( source_inject), dim3(1), dim3(dim3(16, 16, 1)), 0, modelingStream, f, x, y, skx, sky, stf);
}
extern "C"
void launch_source_inject_forward_gaussian(float *f, int x, int y, float skx, float sky, float stf) {
hipLaunchKernelGGL(( source_inject_gaussian), dim3(1), dim3(dim3(16, 16, 1)), 0, modelingStream, f, x, y, skx, sky, stf);
}
extern "C"
void launch_source_inject_backward(int *iX, int *iY, float *kx, float *ky, float *vz, float *f, int numberOfStations) {
hipLaunchKernelGGL(( source_inject_station), dim3(numberOfStations), dim3(dim3(16, 16, 1)), 0, modelingStream, iX, iY, kx, ky, vz, f);
}
extern "C"
void launch_source_inject_backward_gaussian(int *iX, int *iY, float *kx, float *ky, float *vz, float *f, int numberOfStations) {
hipLaunchKernelGGL(( source_inject_station_gaussian), dim3(numberOfStations), dim3(dim3(16, 16, 1)), 0, modelingStream, iX, iY, kx, ky, vz, f);
}
extern "C"
void launch_station_extract(int *iX, int *iY, float *x, float *y, float *vz_out, float *vz_in, int myNumberOfStations) {
hipLaunchKernelGGL(( station_extract), dim3(myNumberOfStations), dim3(dim3(16, 16, 1)), 0, modelingStream, iX, iY, x, y, vz_out, vz_in);
}
extern "C"
void launch_update_model(float stepLength, float *Kmu, float *mu, float *rho, int zNum) {
hipLaunchKernelGGL(( update_mu), dim3(dim3(NY, zNum, 1)), dim3(NX), 0, modelingStream, stepLength, Kmu, mu, rho);
}
extern "C"
void launch_cuda_device_synchronize() {
hipDeviceSynchronize();
}
extern "C"
void launch_cuda_stream_synchronize() {
hipStreamSynchronize(dataTransferStream);
hipStreamSynchronize(modelingStream);
}
extern "C"
void launch_delete_modeling_parameter() {
hipStreamDestroy(dataTransferStream);
hipStreamDestroy(modelingStream);
}
/*
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("\nDevice Name: %s\n", prop.name);
printf("Total global memory: %fGB\n", prop.totalGlobalMem/1024./1024./1024.);
printf("Total const memory: %u\n", prop.totalConstMem);
printf("Shared memory per block: %u\n", prop.sharedMemPerBlock);
printf("Texture alignment: %u\n", prop.textureAlignment);
printf("Regs per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Max threads per block: %u\n", prop.maxThreadsPerBlock);
printf("Multi processor counts: %d\n", prop.multiProcessorCount);
printf("Compute Capability: %d.%d\n\n", prop.major, prop.minor);
hipEvent_t startEvent, stopEvent;
float time;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventRecord(startEvent, 0);
for (int i = 0; i < 1000; i++) {
dstress_dx_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32)>>>(f_in, f_out);
}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&time, startEvent, stopEvent);
fprintf(stdout, "Total running time: %f\n", time);
fprintf(stdout, "Bandwidth (GB/s): %f\n", 2 * 256. * 256. * 256. * sizeof(float) * 1000. / 1024. / 1024 / time);
*/ | b5e724b965e11e9536783adc1846bfd0344cb161.cu | #include <stdio.h>
#include <stdlib.h>
#include "cudaKernel.cuh"
static cudaStream_t dataTransferStream;
static cudaStream_t modelingStream;
extern "C"
int launch_cudaMalloc(void **devPtr, size_t size) {
if (cudaMalloc(devPtr, size) != cudaSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMallocHost(void **hostPtr, size_t size) {
if (cudaMallocHost(hostPtr, size) != cudaSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMemset(void *devPtr, int value, size_t count) {
if (cudaMemset(devPtr, value, count) != cudaSuccess) {
return -1;
}
return 0;
}
extern "C"
int launch_cudaMemcpy(void *dst, const void *src, size_t count, int direction) {
if (direction == 0) {
if (cudaMemcpy(dst, src, count, cudaMemcpyHostToDevice) != cudaSuccess) {
return -1;
}
} else {
if (cudaMemcpy(dst, src, count, cudaMemcpyDeviceToHost) != cudaSuccess) {
return -1;
}
}
return 0;
}
extern "C"
void launch_cudaMemcpyAsync(void *dst, const void *src, size_t count, int direction) {
if (direction == 0) {
if (cudaMemcpyAsync(dst, src, count, cudaMemcpyHostToDevice, dataTransferStream) != cudaSuccess) {
fprintf(stdout, "Error cudaMemcpyAsync from host to device!\n");
exit(0);
}
} else {
if (cudaMemcpyAsync(dst, src, count, cudaMemcpyDeviceToHost, dataTransferStream) != cudaSuccess) {
fprintf(stdout, "Error cudaMemcpyAsync from device to host!\n");
exit(0);
}
}
}
extern "C"
void launch_cudaFree(void *devPtr) {
cudaFree(devPtr);
}
extern "C"
void launch_cudaFreeHost(void *hostPtr) {
cudaFreeHost(hostPtr);
}
extern "C"
void launch_dstress_dxy(float *fx_in, float *fy_in, float *f_out, int isForward) {
if (isForward) {
dstress_dxy_forward<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>>(fx_in, fy_in, f_out);
} else {
dstress_dxy_backward<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>>(fx_in, fy_in, f_out);
}
}
extern "C"
void launch_dstress_dz(float *f_in, float *f_out, int isForward) {
if (isForward) {
dstress_dz_forward<<<dim3(NX/32, NY, 1), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
} else {
dstress_dz_backward<<<dim3(NX/32, NY, 1), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
}
}
extern "C"
void launch_dvxy_dxy(float *vx_in, float *vy_in, float *lambda_in, float *dsxy_out, float *dsxx_out, float *dsyy_out, float *dszz_out, int isForward) {
if (isForward) {
dvxy_dxy_forward<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>> (vx_in, vy_in, lambda_in, dsxy_out, dsxx_out, dsyy_out, dszz_out);
} else {
dvxy_dxy_backward<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>> (vx_in, vy_in, lambda_in, dsxy_out, dsxx_out, dsyy_out, dszz_out);
}
}
extern "C"
void launch_dvxz_dxz(float *vx_in, float *vz_in, float *lambda_in, float *dsxz_out, float *dsxx_out, float *dsyy_out, float *dszz_out, int isForward) {
if (isForward) {
dvxz_dxz_forward<<<dim3(NX/32, NY, NZ/32), dim3(32, 32, 1), 0, modelingStream>>> (vx_in, vz_in, lambda_in, dsxz_out, dsxx_out, dsyy_out, dszz_out);
} else {
dvxz_dxz_backward<<<dim3(NX/32, NY, NZ/32), dim3(32, 32, 1), 0, modelingStream>>> (vx_in, vz_in, lambda_in, dsxz_out, dsxx_out, dsyy_out, dszz_out);
}
}
extern "C"
void launch_dvelocity_dy(float *f_in, float *f_out, int isForward) {
if (isForward) {
dvelocity_dy_forward_32x32<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
} else {
dvelocity_dy_backward_32x32<<<dim3(NX/32, NY/32, NZ), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
}
}
extern "C"
void launch_dvelocity_dz(float *f_in, float *f_out, int isForward) {
if (isForward) {
dvelocity_dz_forward<<<dim3(NX/32, NY, 1), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
} else {
dvelocity_dz_backward<<<dim3(NX/32, NY, 1), dim3(32, 32, 1), 0, modelingStream>>>(f_in, f_out);
}
}
extern "C"
void launch_pml_frontv(float *vx, float *vy, float *vz, float *vxx, float *vyx, float *vzx, float *dvxx, float *dvyx, float *dvzx, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsxz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_frontv_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
} else {
pml_frontv_backward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_fronts(float *sxx, float *sxy, float *sxz, float *sxxx, float *sxyx, float *sxzx, float *dsxxx, float *dsxyx, float *dsxzx, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_fronts_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
} else {
pml_fronts_backward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_backv(float *vx, float *vy, float *vz, float *vxx, float *vyx, float *vzx, float *dvxx, float *dvyx, float *dvzx, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsxz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_backv_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
} else {
pml_backv_backward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(vx, vy, vz, vxx, vyx, vzx, dvxx, dvyx, dvzx, dsxx, dsyy, dszz, dsxy, dsxz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_backs(float *sxx, float *sxy, float *sxz, float *sxxx, float *sxyx, float *sxzx, float *dsxxx, float *dsxyx, float *dsxzx, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_backs_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
} else {
pml_backs_backward<<<dim3(NY/32, NZ, 1), dim3(32, 32, 1), 0, modelingStream>>>(sxx, sxy, sxz, sxxx, sxyx, sxzx, dsxxx, dsxyx, dsxzx, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_leftv(float *vx, float *vy, float *vz, float *vxy, float *vyy, float *vzy, float *dvxy, float *dvyy, float *dvzy, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_leftv_forward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
} else {
pml_leftv_backward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_lefts(float *sxy, float *syy, float *syz, float *sxyy, float *syyy, float *syzy, float *dsxyy, float *dsyyy, float *dsyzy, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_lefts_forward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
} else {
pml_lefts_backward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_rightv(float *vx, float *vy, float *vz, float *vxy, float *vyy, float *vzy, float *dvxy, float *dvyy, float *dvzy, float *dsxx, float *dsyy, float *dszz, float *dsxy, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_rightv_forward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
} else {
pml_rightv_backward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxy, vyy, vzy, dvxy, dvyy, dvzy, dsxx, dsyy, dszz, dsxy, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_rights(float *sxy, float *syy, float *syz, float *sxyy, float *syyy, float *syzy, float *dsxyy, float *dsyyy, float *dsyzy, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_rights_forward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
} else {
pml_rights_backward<<<dim3(NX/32, NZ, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxy, syy, syz, sxyy, syyy, syzy, dsxyy, dsyyy, dsyzy, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_pml_bottomv(float *vx, float *vy, float *vz, float *vxz, float *vyz, float *vzz, float *dvxz, float *dvyz, float *dvzz, float *dsxx, float *dsyy, float *dszz, float *dsxz, float *dsyz, float *lambda_in, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_bottomv_forward<<<dim3(NX/32, NY, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxz, vyz, vzz, dvxz, dvyz, dvzz, dsxx, dsyy, dszz, dsxz, dsyz, lambda_in, scaleA, scaleB);
} else {
pml_bottomv_backward<<<dim3(NX/32, NY, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(vx, vy, vz, vxz, vyz, vzz, dvxz, dvyz, dvzz, dsxx, dsyy, dszz, dsxz, dsyz, lambda_in, scaleA, scaleB);
}
}
extern "C"
void launch_pml_bottoms(float *sxz, float *syz, float *szz, float *sxzz, float *syzz, float *szzz, float *dsxzz, float *dsyzz, float *dszzz, float *dvx, float *dvy, float *dvz, float scaleA, float scaleB, int isForward) {
if (isForward) {
pml_bottoms_forward<<<dim3(NX/32, NY, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxz, syz, szz, sxzz, syzz, szzz, dsxzz, dsyzz, dszzz, dvx, dvy, dvz, scaleA, scaleB);
} else {
pml_bottoms_backward<<<dim3(NX/32, NY, 1), dim3(32, PML_NUM, 1), 0, modelingStream>>>(sxz, syz, szz, sxzz, syzz, szzz, dsxzz, dsyzz, dszzz, dvx, dvy, dvz, scaleA, scaleB);
}
}
extern "C"
void launch_free_surface(float *dsxz, float *dsyz, float *dszz, float *dsxx, float *dsyy, float *lambda_in, float *vx, float *vy, float *vz, int isForward) {
if (isForward) {
free_surface_forward<<<NY, NX, 0, modelingStream>>>(dsxz, dsyz, dszz, dsxx, dsyy, lambda_in);
} else {
free_surface_backward<<<NY, NX, 0, modelingStream>>>(dsxz, dsyz, dszz, dsxx, dsyy, lambda_in, vx, vy, vz);
}
}
extern "C"
int launch_set_modeling_parameter(float dt, float dx) {
if (set_constant_memory(dt, dx) != 0 ||
cudaStreamCreate(&dataTransferStream) != cudaSuccess ||
cudaStreamCreate(&modelingStream) != cudaSuccess) {
return -1;
}
return 0;
}
extern "C"
void launch_kernel_backward_xcoor(float *sxx1, float *sxy1, float *sxz1, float *syy1, float *syz1, float *szz1, float *sxx2, float *sxy2, float *sxz2, float *syy2, float *syz2, float *szz2, float *Klambda, float *Kmu, int zNum) {
backward_xcoor<<<dim3(NY, zNum, 1), NX, 0, modelingStream>>>(sxx1, sxy1, sxz1, syy1, syz1, szz1, sxx2, sxy2, sxz2, syy2, syz2, szz2, Klambda, Kmu);
}
extern "C"
void launch_kernel_finalize(float *Klambda, float *Kmu, float *lambda, float *mu, int zNum) {
kernel_processing<<<dim3(NY, zNum, 1), NX, 0, modelingStream>>>(Klambda, Kmu, lambda, mu);
}
extern "C"
void launch_station_clip(float *Klambda0, float *Kmu0, int *iX, int *iY, int numberOfStations) {
station_clip<<<dim3(numberOfStations, 5), dim3(16, 16), 0, modelingStream>>>(Klambda0, Kmu0, iX, iY);
}
extern "C"
void launch_add_kernel(float *Klambda0, float *Kmu0, float *Klambda, float *Kmu, int zNum) {
add_kernel<<<dim3(NY, zNum, 1), NX, 0, modelingStream>>>(Klambda0, Kmu0, Klambda, Kmu);
}
extern "C"
void launch_update_stress(float *sxx, float *dsxx, float *syy, float *dsyy, float *szz, float *dszz, float *sxy, float *dsxy, float *sxz, float *dsxz, float *syz, float *dsyz, float *mu, float scaleA, float scaleB) {
update_stress<<<dim3(NY, NZ, 1), NX, 0, modelingStream>>>(sxx, dsxx, syy, dsyy, szz, dszz, sxy, dsxy, sxz, dsxz, syz, dsyz, mu, scaleA, scaleB);
}
extern "C"
void launch_update_velocity(float *fx, float *dfx, float *fy, float *dfy, float *fz, float *dfz, float *rho, float scaleA, float scaleB) {
update_velocity<<<dim3(NY, NZ, 1), NX, 0, modelingStream>>>(fx, dfx, fy, dfy, fz, dfz, rho, scaleA, scaleB);
}
extern "C"
void launch_source_inject_forward(float *f, int x, int y, float *skx, float *sky, float stf) {
source_inject<<<1, dim3(16, 16, 1), 0, modelingStream>>>(f, x, y, skx, sky, stf);
}
extern "C"
void launch_source_inject_forward_gaussian(float *f, int x, int y, float skx, float sky, float stf) {
source_inject_gaussian<<<1, dim3(16, 16, 1), 0, modelingStream>>>(f, x, y, skx, sky, stf);
}
extern "C"
void launch_source_inject_backward(int *iX, int *iY, float *kx, float *ky, float *vz, float *f, int numberOfStations) {
source_inject_station<<<numberOfStations, dim3(16, 16, 1), 0, modelingStream>>>(iX, iY, kx, ky, vz, f);
}
extern "C"
void launch_source_inject_backward_gaussian(int *iX, int *iY, float *kx, float *ky, float *vz, float *f, int numberOfStations) {
source_inject_station_gaussian<<<numberOfStations, dim3(16, 16, 1), 0, modelingStream>>>(iX, iY, kx, ky, vz, f);
}
extern "C"
void launch_station_extract(int *iX, int *iY, float *x, float *y, float *vz_out, float *vz_in, int myNumberOfStations) {
station_extract<<<myNumberOfStations, dim3(16, 16, 1), 0, modelingStream>>>(iX, iY, x, y, vz_out, vz_in);
}
extern "C"
void launch_update_model(float stepLength, float *Kmu, float *mu, float *rho, int zNum) {
update_mu<<<dim3(NY, zNum, 1), NX, 0, modelingStream>>>(stepLength, Kmu, mu, rho);
}
extern "C"
void launch_cuda_device_synchronize() {
cudaDeviceSynchronize();
}
extern "C"
void launch_cuda_stream_synchronize() {
cudaStreamSynchronize(dataTransferStream);
cudaStreamSynchronize(modelingStream);
}
extern "C"
void launch_delete_modeling_parameter() {
cudaStreamDestroy(dataTransferStream);
cudaStreamDestroy(modelingStream);
}
/*
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("\nDevice Name: %s\n", prop.name);
printf("Total global memory: %fGB\n", prop.totalGlobalMem/1024./1024./1024.);
printf("Total const memory: %u\n", prop.totalConstMem);
printf("Shared memory per block: %u\n", prop.sharedMemPerBlock);
printf("Texture alignment: %u\n", prop.textureAlignment);
printf("Regs per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Max threads per block: %u\n", prop.maxThreadsPerBlock);
printf("Multi processor counts: %d\n", prop.multiProcessorCount);
printf("Compute Capability: %d.%d\n\n", prop.major, prop.minor);
cudaEvent_t startEvent, stopEvent;
float time;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
for (int i = 0; i < 1000; i++) {
dstress_dx_forward<<<dim3(NY/32, NZ, 1), dim3(32, 32)>>>(f_in, f_out);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
fprintf(stdout, "Total running time: %f\n", time);
fprintf(stdout, "Bandwidth (GB/s): %f\n", 2 * 256. * 256. * 256. * sizeof(float) * 1000. / 1024. / 1024 / time);
*/ |
d41343f0987e21654dbe898bab42458c79c1d3fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace device {
#endif /* CUDA_DISABLER */
| d41343f0987e21654dbe898bab42458c79c1d3fd.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace device {
#endif /* CUDA_DISABLER */
|
5463c8f33c33cd0dc4ae94787634bf3d7c74882b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void kernel_divide_proj(float *h_proj_correction, float *h_proj_data, float *h_proj_sumLen, float *h_proj_weightedLen)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
float temp = h_proj_sumLen[idx];
if ( temp < volumn_z*1e-6f)
h_proj_correction[idx] = 0.0f;
else
{
h_proj_correction[idx] = (h_proj_data[idx] - h_proj_weightedLen[idx])*1.0f / temp ;
}
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta (only a portion of the whole projection view)
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
// subPrjIdx: sub projection portion index
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
// Note: this condition can be combined into either of the two branches.
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y >= vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z <= vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6f)
d_proj_correction[proj_pixel_index] = 0.0f;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART)
}
// __syncthreads();
}
__global__ void forward_ray_driven_3d_kernel_correction_separate(float *d_f , float *d_proj_sumLen, float *d_proj_weightedLen, float sin_theta, float cos_theta, int subVolIdx)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_sumLen: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source positions (X2): Coordinate in (x,y,z) system ---
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
float BOUNDARY_VOXEL_Z = boundary_voxel_z + volumn_z*ZETA/Number_of_Devices*subVolIdx;
int ZETA_new = ZETA/Number_of_Devices;
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (CT_style==1 || CT_style==2) && (Detector_z_idx<Z_prj/Number_of_Devices*subVolIdx || Detector_z_idx>=Z_prj/Number_of_Devices*(subVolIdx+1)) )
{
one_ray_sum = 0.0f;
one_ray_length = 0.00f;
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (BOUNDARY_VOXEL_Z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (BOUNDARY_VOXEL_Z + volumn_z*ZETA_new - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
one_ray_length = 0.0f ;
one_ray_sum=0.0f;
}
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA_new;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) ;
}
else //if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA_new-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -BOUNDARY_VOXEL_Z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA_new-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//else if the ray interacts with the volume
}//else if the ray is oblique
d_proj_weightedLen[proj_pixel_index] = one_ray_sum ;
d_proj_sumLen[proj_pixel_index] = one_ray_length;
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_multiGPU(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int subVolIdx, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*(ZETA/Number_of_Devices*subVolIdx+0.5f) + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5f)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5f)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5f))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5f))*volumn_y
>= (float(M)*0.5f-0.5f)*volumn_x*(float(N)*0.5f-0.5f)*volumn_y )
{
sumLength = 0.0f;
sumWeight = 0.0f;
}
else
// Note: The following codes apply to all the voxels simutaneously
{
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// the projcetion of the vertex of the voxel on the detector, in <t,s> system
if (CT_style==0) //CBCT geometry
{
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ;
}
else if (CT_style==1) //FBCT geometry, no magnification along z axis
{
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
coord_vertexOnDetector_z = coord_voxel_z ;
}
else if (CT_style==2) //PBCT, direct projection
{
coord_vertexOnDetector_x = coord_vertex_s;
coord_vertexOnDetector_z = coord_voxel_z ;
}
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ((minY_index<0) && (maxY_index <0) || minY_index>(R-1) && maxY_index >(R-1) || (minZ_index<0) && (maxZ_index <0) || (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)))
{
sumWeight = 0.0f;
sumLength = 0.0f;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// coordinate of the source in (x,y,z) system after normal gantry rotation
if (CT_style==0) // CBCT geometry, single source
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
coord_source_z = Source_z;
}
else if (CT_style==1) // FBCT geometry, multiple sources
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
coord_source_z = coord_voxel_z;
}
else if (CT_style==2)
{
// NOT defined here.
// The source position goes with the detector element
}
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
if (CT_style==2)
{
coord_source_x = Source_x * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta;
coord_source_y = Source_x * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta;
coord_source_z = coord_voxel_z;
}
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6f)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}// end for loop: all the rays whose projection fits in the rectangle
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
if (sumLength < volumn_x*1e-6f)
{
// d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] = sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
// __syncthreads();
}
| 5463c8f33c33cd0dc4ae94787634bf3d7c74882b.cu | __global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void kernel_divide_proj(float *h_proj_correction, float *h_proj_data, float *h_proj_sumLen, float *h_proj_weightedLen)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
float temp = h_proj_sumLen[idx];
if ( temp < volumn_z*1e-6f)
h_proj_correction[idx] = 0.0f;
else
{
h_proj_correction[idx] = (h_proj_data[idx] - h_proj_weightedLen[idx])*1.0f / temp ;
}
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta (only a portion of the whole projection view)
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
// subPrjIdx: sub projection portion index
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
// Note: this condition can be combined into either of the two branches.
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y >= vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z <= vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6f)
d_proj_correction[proj_pixel_index] = 0.0f;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART)
}
// __syncthreads();
}
__global__ void forward_ray_driven_3d_kernel_correction_separate(float *d_f , float *d_proj_sumLen, float *d_proj_weightedLen, float sin_theta, float cos_theta, int subVolIdx)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_sumLen: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source positions (X2): Coordinate in (x,y,z) system ---
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
float BOUNDARY_VOXEL_Z = boundary_voxel_z + volumn_z*ZETA/Number_of_Devices*subVolIdx;
int ZETA_new = ZETA/Number_of_Devices;
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (CT_style==1 || CT_style==2) && (Detector_z_idx<Z_prj/Number_of_Devices*subVolIdx || Detector_z_idx>=Z_prj/Number_of_Devices*(subVolIdx+1)) )
{
one_ray_sum = 0.0f;
one_ray_length = 0.00f;
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (BOUNDARY_VOXEL_Z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (BOUNDARY_VOXEL_Z + volumn_z*ZETA_new - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
one_ray_length = 0.0f ;
one_ray_sum=0.0f;
}
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA_new;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) ;
}
else //if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA_new-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - BOUNDARY_VOXEL_Z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -BOUNDARY_VOXEL_Z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA_new-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + BOUNDARY_VOXEL_Z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//else if the ray interacts with the volume
}//else if the ray is oblique
d_proj_weightedLen[proj_pixel_index] = one_ray_sum ;
d_proj_sumLen[proj_pixel_index] = one_ray_length;
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_multiGPU(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int subVolIdx, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*(ZETA/Number_of_Devices*subVolIdx+0.5f) + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5f)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5f)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5f))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5f))*volumn_y
>= (float(M)*0.5f-0.5f)*volumn_x*(float(N)*0.5f-0.5f)*volumn_y )
{
sumLength = 0.0f;
sumWeight = 0.0f;
}
else
// Note: The following codes apply to all the voxels simutaneously
{
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// the projcetion of the vertex of the voxel on the detector, in <t,s> system
if (CT_style==0) //CBCT geometry
{
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ;
}
else if (CT_style==1) //FBCT geometry, no magnification along z axis
{
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
coord_vertexOnDetector_z = coord_voxel_z ;
}
else if (CT_style==2) //PBCT, direct projection
{
coord_vertexOnDetector_x = coord_vertex_s;
coord_vertexOnDetector_z = coord_voxel_z ;
}
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ((minY_index<0) && (maxY_index <0) || minY_index>(R-1) && maxY_index >(R-1) || (minZ_index<0) && (maxZ_index <0) || (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)))
{
sumWeight = 0.0f;
sumLength = 0.0f;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// coordinate of the source in (x,y,z) system after normal gantry rotation
if (CT_style==0) // CBCT geometry, single source
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
coord_source_z = Source_z;
}
else if (CT_style==1) // FBCT geometry, multiple sources
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
coord_source_z = coord_voxel_z;
}
else if (CT_style==2)
{
// NOT defined here.
// The source position goes with the detector element
}
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
if (CT_style==2)
{
coord_source_x = Source_x * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta;
coord_source_y = Source_x * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta;
coord_source_z = coord_voxel_z;
}
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6f)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}// end for loop: all the rays whose projection fits in the rectangle
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
if (sumLength < volumn_x*1e-6f)
{
// d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] = sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
// __syncthreads();
}
|
5e4aa40cd7adc3d4aa8f97742293490aa87cc1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x, data_t* y, const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = max(x[i], static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy, const data_t* y, data_t* dx, const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.);
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor();
out.Reshape(x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(x.type(), "relu_cuda_forward_kernel", ([&]{
hipLaunchKernelGGL(( relu_cuda_forward_kernel<data_t>), dim3(grid), dim3(block), 0, 0,
x.data<data_t>(),
out.mutable_data<data_t>(x.place()),
numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(
const paddle::Tensor& grad_out,
const paddle::Tensor& out,
const paddle::Tensor& x) {
auto grad_x = paddle::Tensor();
grad_x.Reshape(x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(out.type(), "relu_cuda_backward_kernel", ([&]{
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, 0,
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
| 5e4aa40cd7adc3d4aa8f97742293490aa87cc1d2.cu | #include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x, data_t* y, const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = max(x[i], static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy, const data_t* y, data_t* dx, const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.);
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor();
out.Reshape(x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(x.type(), "relu_cuda_forward_kernel", ([&]{
relu_cuda_forward_kernel<data_t><<<grid, block>>>(
x.data<data_t>(),
out.mutable_data<data_t>(x.place()),
numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(
const paddle::Tensor& grad_out,
const paddle::Tensor& out,
const paddle::Tensor& x) {
auto grad_x = paddle::Tensor();
grad_x.Reshape(x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(out.type(), "relu_cuda_backward_kernel", ([&]{
relu_cuda_backward_kernel<data_t><<<grid, block>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
|
0ec24c1fa9caf47de86ea370e9b70c8215fd6caf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gemm.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const float alpha = 1;
const float beta = 1;
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const int input_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,alpha,beta,output,input_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,alpha,beta,output,input_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gemm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,alpha,beta,output,input_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0ec24c1fa9caf47de86ea370e9b70c8215fd6caf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gemm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const float alpha = 1;
const float beta = 1;
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const int input_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gemm<<<gridBlock,threadBlock>>>(a,b,c,alpha,beta,output,input_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gemm<<<gridBlock,threadBlock>>>(a,b,c,alpha,beta,output,input_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gemm<<<gridBlock,threadBlock>>>(a,b,c,alpha,beta,output,input_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a4c30c3fbb7dcd5ec129b9a5690ac76978211eba.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Jacob Austin on 5/17/18.
//
#include "sim.h"
#include "stlparser.h"
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#ifdef GRAPHICS
#include <GLFW/glfw3.h>
#endif
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <cuda_gl_interop.h>
#include <exception>
namespace titan {
#ifdef GRAPHICS
#ifndef SDL2
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
#endif
#endif
__global__ void createSpringPointers(CUDA_SPRING ** ptrs, CUDA_SPRING * data, int size);
__global__ void createMassPointers(CUDA_MASS ** ptrs, CUDA_MASS * data, int size);
__global__ void computeSpringForces(CUDA_SPRING ** device_springs, int num_springs, double t);
__global__ void massForcesAndUpdate(CUDA_MASS ** d_mass, int num_masses, double dt, double T, Vec global_acc, CUDA_GLOBAL_CONSTRAINTS c);
bool Simulation::RUNNING;
bool Simulation::STARTED;
bool Simulation::ENDED;
bool Simulation::FREED;
bool Simulation::GPU_DONE;
#ifdef GRAPHICS
GLFWwindow * Simulation::window;
GLuint Simulation::VertexArrayID;
GLuint Simulation::programID;
GLuint Simulation::MatrixID;
glm::mat4 Simulation::MVP;
GLuint Simulation::vertices;
GLuint Simulation::colors;
GLuint Simulation::indices;
bool Simulation::update_indices;
bool Simulation::update_colors;
int Simulation::lineWidth;
int Simulation::pointSize;
bool Simulation::resize_buffers;
Vec Simulation::camera;
Vec Simulation::looks_at;
Vec Simulation::up;
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=false)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %d %s %s %d\n", code, hipGetErrorString(code), file, line);
if (abort) {
char buffer[200];
snprintf(buffer, sizeof(buffer), "GPUassert error in CUDA kernel: %s %s %d\n", hipGetErrorString(code), file, line);
std::string buffer_string = buffer;
throw std::runtime_error(buffer_string);
}
}
}
Simulation::Simulation() {
dt = 0.0001;
RUNNING = false;
STARTED = false;
ENDED = false;
FREED = false;
GPU_DONE = false;
update_constraints = true;
_global_acc = Vec(0, 0, -9.81);
#ifdef GRAPHICS
resize_buffers = true;
update_colors = true;
update_indices = true;
lineWidth = 1;
pointSize = 3;
camera = Vec(15, 15, 7);
looks_at = Vec(0, 0, 2);
up = Vec(0, 0, 1);
#endif
}
void Simulation::reset() {
this -> masses.clear();
this -> springs.clear();
this -> containers.clear();
this -> constraints.clear();
RUNNING = false;
STARTED = false;
ENDED = false;
FREED = false;
GPU_DONE = false;
update_constraints = true;
_global_acc = Vec(0, 0, -9.81);
#ifdef GRAPHICS
resize_buffers = true;
update_colors = true;
update_indices = true;
lineWidth = 1;
pointSize = 3;
camera = Vec(15, 15, 7);
looks_at = Vec(0, 0, 2);
up = Vec(0, 0, 1);
#endif
}
void Simulation::freeGPU() {
for (Spring * s : springs) {
if (s -> _left && ! s -> _left -> valid) {
if (s -> _left -> arrayptr) {
gpuErrchk(hipFree(s -> _left -> arrayptr));
}
delete s -> _left;
}
if (s -> _right && ! s -> _right -> valid) {
if (s -> _right -> arrayptr) {
gpuErrchk(hipFree(s -> _right -> arrayptr));
}
delete s -> _right;
}
delete s;
}
for (Mass * m : masses) {
delete m;
}
for (Container * c : containers) {
delete c;
}
d_balls.clear();
d_balls.shrink_to_fit();
d_planes.clear();
d_planes.shrink_to_fit();
// freeSprings<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size()); // MUST COME BEFORE freeMasses
// freeMasses<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size());
// #ifdef GRAPHICS
// hipGLUnmapBufferObject(this -> colors);
// hipGLUnmapBufferObject(this -> indices);
// hipGLUnmapBufferObject(this -> vertices);
// hipGLUnregisterBufferObject(this -> colors);
// hipGLUnregisterBufferObject(this -> indices);
// hipGLUnregisterBufferObject(this -> vertices);
// #endif
FREED = true; // just to be safe
ENDED = true; // just to be safe
}
Simulation::~Simulation() {
std::cerr << "Simulation destructor called." << std::endl;
if (STARTED) {
waitForEvent();
ENDED = true; // TODO maybe race condition
while (!GPU_DONE) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
std::this_thread::sleep_for(std::chrono::milliseconds(10)); // TODO fix race condition
if (gpu_thread.joinable()) {
gpu_thread.join();
} else {
std::cout << "could not join GPU thread." << std::endl;
exit(1);
}
if (!FREED) {
freeGPU();
FREED = true;
}
} else {
for (Mass * m : masses) {
delete m;
}
for (Spring * s : springs) {
delete s;
}
for (Container * c : containers) {
delete c;
}
}
}
Mass * Simulation::createMass(Mass * m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
m -> ref_count++;
if (!STARTED) {
masses.push_back(m);
return m;
} else {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Stop the simulation to make changes.");
}
masses.push_back(m);
CUDA_MASS * d_mass;
gpuErrchk(hipMalloc((void **) &d_mass, sizeof(CUDA_MASS)));
m -> arrayptr = d_mass;
d_masses.push_back(d_mass);
CUDA_MASS temp = CUDA_MASS(*m);
gpuErrchk(hipMemcpy(d_mass, &temp, sizeof(CUDA_MASS), hipMemcpyHostToDevice));
#ifdef GRAPHICS
resize_buffers = true;
#endif
return m;
}
}
Spring * Simulation::getSpringByIndex(int i) {
assert(i < springs.size() && i >= 0);
return springs[i];
}
Mass * Simulation::getMassByIndex(int i) {
assert(i < masses.size() && i >= 0);
return masses[i];
}
Container * Simulation::getContainerByIndex(int i) {
assert(i < containers.size() && i >= 0);
return containers[i];
}
Mass * Simulation::createMass() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Mass * m = new Mass();
return createMass(m);
}
Mass * Simulation::createMass(const Vec & pos) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Mass * m = new Mass(pos);
return createMass(m);
}
Spring * Simulation::createSpring(Spring * s) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (s -> _right) { s -> _right -> ref_count++; }
if (s -> _left) { s -> _left -> ref_count++; }
if (!STARTED) {
springs.push_back(s);
return s;
} else {
if (RUNNING) {
exit(1);
}
springs.push_back(s);
CUDA_SPRING * d_spring;
gpuErrchk(hipMalloc((void **) &d_spring, sizeof(CUDA_SPRING)));
s -> arrayptr = d_spring;
d_springs.push_back(d_spring);
CUDA_SPRING temp = CUDA_SPRING(*s);
gpuErrchk(hipMemcpy(d_spring, &temp, sizeof(CUDA_SPRING), hipMemcpyHostToDevice));
#ifdef GRAPHICS
resize_buffers = true;
#endif
return s;
}
}
Spring * Simulation::createSpring() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Spring * s = new Spring();
return createSpring(s);
}
Spring * Simulation::createSpring(Mass * m1, Mass * m2) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Spring * s = new Spring(m1, m2);
return createSpring(s);
}
__global__ void invalidate(CUDA_MASS ** ptrs, CUDA_MASS * m, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (ptrs[i] == m) {
m -> valid = false;
}
}
}
void Simulation::deleteMass(Mass * m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
masses.resize(std::remove(masses.begin(), masses.end(), m) - masses.begin());
m -> decrementRefCount();
} else {
if (RUNNING) {
exit(1);
}
updateCudaParameters();
hipLaunchKernelGGL(( invalidate), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, m -> arrayptr, masses.size());
m -> valid = false;
thrust::remove(thrust::device, d_masses.begin(), d_masses.begin() + masses.size(), m -> arrayptr);
masses.resize(std::remove(masses.begin(), masses.end(), m) - masses.begin());
d_masses.resize(masses.size());
m -> decrementRefCount();
#ifdef GRAPHICS
resize_buffers = true;
#endif
}
}
void Simulation::deleteSpring(Spring * s) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
springs.resize(std::remove(springs.begin(), springs.end(), s) - springs.begin());
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
} else {
if (RUNNING) {
exit(1);
}
gpuErrchk(hipFree(s -> arrayptr));
thrust::remove(thrust::device, d_springs.begin(), d_springs.begin() + springs.size(), s -> arrayptr);
springs.resize(std::remove(springs.begin(), springs.end(), s) - springs.begin());
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
delete s;
#ifdef GRAPHICS
resize_buffers = true;
#endif
}
}
struct mass_in_list {
__device__ __host__ mass_in_list(CUDA_MASS ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(CUDA_MASS * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
data -> valid = false;
return true;
}
}
return false;
}
CUDA_MASS ** list;
int size;
};
struct spring_in_list {
__device__ __host__ spring_in_list(CUDA_SPRING ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(CUDA_SPRING * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
CUDA_SPRING ** list;
int size;
};
struct host_mass_in_list {
__device__ __host__ host_mass_in_list(Mass ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(Mass * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
Mass ** list;
int size;
};
struct host_spring_in_list {
__device__ __host__ host_spring_in_list(Spring ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(Spring * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
Spring ** list;
int size;
};
void Simulation::deleteContainer(Container * c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (RUNNING) {
throw std::runtime_error("The simulation is running. Stop the simulation to make changes.");
}
if (!STARTED) {
for (Mass * m : c -> masses) {
deleteMass(m);
}
for (Spring * s : c -> springs) {
deleteSpring(s);
}
delete c;
containers.resize(std::remove(containers.begin(), containers.end(), c) - containers.begin());
return;
}
{
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
c -> masses[i] -> valid = false;
c -> masses[i] -> decrementRefCount();
}
masses.resize(thrust::remove_if(thrust::host, masses.begin(), masses.end(), host_mass_in_list(c -> masses.data(), c -> masses.size())) - masses.begin());
CUDA_MASS ** temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
gpuErrchk(hipMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), hipMemcpyHostToDevice));
delete [] d_ptrs;
thrust::remove_if(thrust::device, d_masses.begin(), d_masses.begin() + masses.size() + c -> masses.size(), mass_in_list(temp, c -> masses.size()));
d_masses.resize(masses.size());
gpuErrchk(hipFree(temp));
}
{
CUDA_SPRING ** d_ptrs = new CUDA_SPRING * [c -> springs.size()];
for (int i = 0; i < c -> springs.size(); i++) {
Spring * s = c -> springs[i];
d_ptrs[i] = s -> arrayptr;
gpuErrchk(hipFree(s -> arrayptr));
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
}
springs.resize(thrust::remove_if(thrust::host, springs.begin(), springs.end(), host_spring_in_list(c -> springs.data(), c -> springs.size())) - springs.begin());
CUDA_SPRING ** temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_SPRING *) * c -> springs.size()));
gpuErrchk(hipMemcpy(temp, d_ptrs, c -> springs.size() * sizeof(CUDA_SPRING *), hipMemcpyHostToDevice));
delete [] d_ptrs;
thrust::remove_if(thrust::device, d_springs.begin(), d_springs.begin() + springs.size() + c -> springs.size(), spring_in_list(temp, c -> springs.size()));
d_springs.resize(springs.size());
gpuErrchk(hipFree(temp));
}
#ifdef GRAPHICS // TODO make a decision about this
resize_buffers = true;
#endif
delete c;
containers.resize(std::remove(containers.begin(), containers.end(), c) - containers.begin());
}
//void Simulation::deleteContainer(Container * c) {
// if (RUNNING) {
// exit(1);
// }
//
// std::cout << c -> masses.size() << " " << c -> springs.size() << std::endl;
//
// for (Mass * m : c -> masses) {
// deleteMass(m);
// }
//
// for (Spring * s : c -> springs) {
// deleteSpring(s);
// }
//
//#ifdef GRAPHICS
// resize_buffers = true;
//#endif
//
// delete c;
// containers.remove(c);
//}
void Simulation::get(Mass * m) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_MASS temp;
gpuErrchk(hipMemcpy(&temp, m -> arrayptr, sizeof(CUDA_MASS), hipMemcpyDeviceToHost));
*m = temp;
}
void Simulation::set(Mass * m) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_MASS temp = CUDA_MASS(*m);
gpuErrchk(hipMemcpy(m -> arrayptr, &temp, sizeof(CUDA_MASS), hipMemcpyHostToDevice));
}
void Simulation::get(Spring * s) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_SPRING temp;
gpuErrchk(hipMemcpy(&temp, s -> arrayptr, sizeof(CUDA_SPRING), hipMemcpyDeviceToHost));
s -> update(temp);
}
void Simulation::set(Spring * s) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
CUDA_SPRING temp = CUDA_SPRING(*s);
gpuErrchk(hipMemcpy(s -> arrayptr, &temp, sizeof(CUDA_SPRING), hipMemcpyHostToDevice));
}
void Simulation::getAll() {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
massFromArray(); // TODO make a note of this
}
void Simulation::set(Container * c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
{
CUDA_MASS * h_data = new CUDA_MASS[c -> masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
h_data[i] = CUDA_MASS(*c -> masses[i]);
}
CUDA_MASS ** temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
gpuErrchk(hipMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), hipMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(hipMalloc((void **)&d_data, sizeof(CUDA_MASS) * c -> masses.size()));
gpuErrchk(hipMemcpy(d_data, h_data, sizeof(CUDA_MASS) * c -> masses.size(), hipMemcpyHostToDevice));
delete [] h_data;
updateCudaParameters();
hipLaunchKernelGGL(( createMassPointers), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, temp, d_data, c -> masses.size());
gpuErrchk(hipFree(d_data));
gpuErrchk(hipFree(temp));
}
{
CUDA_SPRING * h_spring = new CUDA_SPRING[c -> springs.size()];
CUDA_SPRING ** d_ptrs = new CUDA_SPRING *[c -> springs.size()];
int count = 0;
for (Spring * s : springs) {
d_ptrs[count] = c -> springs[count] -> arrayptr;
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
count++;
}
CUDA_SPRING ** temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_SPRING *) * c -> springs.size()));
gpuErrchk(hipMemcpy(temp, d_ptrs, c -> springs.size() * sizeof(CUDA_SPRING *), hipMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_SPRING * d_data;
gpuErrchk(hipMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(hipMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), hipMemcpyHostToDevice));
delete [] h_spring;
hipLaunchKernelGGL(( createSpringPointers), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, temp, d_data, springs.size());
gpuErrchk(hipFree(d_data));
gpuErrchk(hipFree(temp));
}
}
void Simulation::setAll() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
{
CUDA_MASS * h_data = new CUDA_MASS[masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
int count = 0;
for (Mass * m : masses) {
h_data[count] = CUDA_MASS(*m);
count++;
}
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(hipMalloc((void **)&d_data, sizeof(CUDA_MASS) * masses.size()));
gpuErrchk(hipMemcpy(d_data, h_data, sizeof(CUDA_MASS) * masses.size(), hipMemcpyHostToDevice));
delete [] h_data;
updateCudaParameters();
hipLaunchKernelGGL(( createMassPointers), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(d_masses.data()), d_data, masses.size());
gpuErrchk(hipFree(d_data));
}
{
CUDA_SPRING * h_spring = new CUDA_SPRING[springs.size()];
int count = 0;
for (Spring * s : springs) {
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
count++;
}
CUDA_SPRING * d_data;
gpuErrchk(hipMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(hipMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), hipMemcpyHostToDevice));
delete [] h_spring;
hipLaunchKernelGGL(( createSpringPointers), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(d_springs.data()), d_data, springs.size());
gpuErrchk(hipFree(d_data));
}
}
void Simulation::setAllSpringConstantValues(double k) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Spring * s : springs) {
s -> _k = k;
}
}
void Simulation::defaultRestLengths() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Spring * s : springs) {
s -> defaultLength();
}
}
void Simulation::setAllMassValues(double m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Mass * mass : masses) {
mass -> m += m;
}
}
void Simulation::setTimeStep(double delta_t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (delta_t <= 0) {
throw std::runtime_error("Cannot set time step to negative or zero value.");
}
this -> dt = delta_t;
}
double Simulation::getTimeStep() {
return this -> dt;
}
void Simulation::setBreakpoint(double time) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot set breakpoints after the end of the simulation run.");
}
bpts.insert(time); // TODO mutex breakpoints
}
__global__ void createMassPointers(CUDA_MASS ** ptrs, CUDA_MASS * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
*ptrs[i] = data[i];
}
}
CUDA_MASS ** Simulation::massToArray() {
CUDA_MASS ** d_ptrs = new CUDA_MASS * [masses.size()]; // array of pointers
for (int i = 0; i < masses.size(); i++) { // potentially slow
gpuErrchk(hipMalloc((void **) (d_ptrs + i), sizeof(CUDA_MASS))); // TODO Fix this shit
}
d_masses = thrust::device_vector<CUDA_MASS *>(d_ptrs, d_ptrs + masses.size());
CUDA_MASS * h_data = new CUDA_MASS[masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
int count = 0;
for (Mass * m : masses) {
m -> arrayptr = d_ptrs[count];
h_data[count] = CUDA_MASS(*m);
count++;
}
delete [] d_ptrs;
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(hipMalloc((void **)&d_data, sizeof(CUDA_MASS) * masses.size()));
gpuErrchk(hipMemcpy(d_data, h_data, sizeof(CUDA_MASS) * masses.size(), hipMemcpyHostToDevice));
delete [] h_data;
massBlocksPerGrid = (masses.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (massBlocksPerGrid > MAX_BLOCKS) {
massBlocksPerGrid = MAX_BLOCKS;
}
if (massBlocksPerGrid < 1) {
massBlocksPerGrid = 1;
}
hipLaunchKernelGGL(( createMassPointers), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(d_masses.data()), d_data, masses.size());
gpuErrchk(hipFree(d_data));
return thrust::raw_pointer_cast(d_masses.data()); // doesn't really do anything
}
__global__ void createSpringPointers(CUDA_SPRING ** ptrs, CUDA_SPRING * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
// ptrs[i] = (CUDA_SPRING *) malloc(sizeof(CUDA_SPRING));
*ptrs[i] = data[i];
}
}
CUDA_SPRING ** Simulation::springToArray() {
CUDA_SPRING ** d_ptrs = new CUDA_SPRING * [springs.size()]; // array of pointers
for (int i = 0; i < springs.size(); i++) { // potentially slow, allocate memory for every spring
gpuErrchk(hipMalloc((void **) d_ptrs + i, sizeof(CUDA_SPRING)));
}
d_springs = thrust::device_vector<CUDA_SPRING *>(d_ptrs, d_ptrs + springs.size()); // copy those pointers to the GPU using thrust
CUDA_SPRING * h_spring = new CUDA_SPRING[springs.size()]; // array for the springs themselves
int count = 0;
for (Spring * s : springs) {
s -> arrayptr = d_ptrs[count];
if (s -> _left && s -> _right) {
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
} else {
h_spring[count] = CUDA_SPRING(*s);
}
count++;
}
delete [] d_ptrs;
CUDA_SPRING * d_data;
gpuErrchk(hipMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(hipMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), hipMemcpyHostToDevice));
delete [] h_spring;
hipLaunchKernelGGL(( createSpringPointers), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(d_springs.data()), d_data, springs.size());
gpuErrchk(hipFree(d_data));
return thrust::raw_pointer_cast(d_springs.data());
}
//void Simulation::constraintsToArray() {
// d_constraints.reserve(constraints.size());
//
// for (Constraint * c : constraints) {
// Constraint * d_temp;
// hipMalloc((void **)& d_temp, sizeof(Constraint));
// hipMemcpy(d_temp, c, sizeof(Constraint), hipMemcpyHostToDevice);
// d_constraints.push_back(d_temp);
// }
//}
void Simulation::toArray() {
CUDA_MASS ** d_mass = massToArray(); // must come first
CUDA_SPRING ** d_spring = springToArray();
}
__global__ void fromMassPointers(CUDA_MASS ** d_mass, CUDA_MASS * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
data[i] = *d_mass[i];
}
}
void Simulation::get(Container *c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot get updates from the GPU after the end of the simulation.");
} else if (!STARTED) {
std::cerr << "sim.get() does nothing if called before the simulation has started." << std::endl;
return;
}
CUDA_MASS ** temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
}
gpuErrchk(hipMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), hipMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_MASS * temp_data;
gpuErrchk(hipMalloc((void **) &temp_data, sizeof(CUDA_MASS) * c -> masses.size()));
updateCudaParameters();
hipLaunchKernelGGL(( fromMassPointers), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, temp, temp_data, c -> masses.size());
gpuErrchk(hipFree(temp));
CUDA_MASS * h_mass = new CUDA_MASS[masses.size()];
gpuErrchk(hipMemcpy(h_mass, temp_data, sizeof(CUDA_MASS) * masses.size(), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(temp_data));
int count = 0;
for (Mass * m : c -> masses) {
*m = h_mass[count];
count++;
}
delete [] h_mass;
}
void Simulation::massFromArray() {
CUDA_MASS * temp;
gpuErrchk(hipMalloc((void **) &temp, sizeof(CUDA_MASS) * masses.size()));
hipLaunchKernelGGL(( fromMassPointers), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, temp, masses.size());
CUDA_MASS * h_mass = new CUDA_MASS[masses.size()];
gpuErrchk(hipMemcpy(h_mass, temp, sizeof(CUDA_MASS) * masses.size(), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(temp));
int count = 0;
Mass temp_data;
for (Mass * m : masses) {
*m = h_mass[count];
count++;
}
delete [] h_mass;
// hipFree(d_mass);
}
void Simulation::springFromArray() {
}
void Simulation::constraintsFromArray() {
}
void Simulation::fromArray() {
massFromArray();
springFromArray();
constraintsFromArray();
}
__global__ void printMasses(CUDA_MASS ** d_masses, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
CUDA_MASS data = *d_masses[i];
printf("%d: (%3f, %3f, %3f)", i, data.pos[0], data.pos[1], data.pos[2]);
}
}
__global__ void printForce(CUDA_MASS ** d_masses, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
Vec & data = d_masses[i] -> force;
printf("%d: (%3f, %3f, %3f)\n", i, data[0], data[1], data[2]);
}
}
__global__ void printSpring(CUDA_SPRING ** d_springs, int num_springs) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_springs) {
CUDA_SPRING data = *d_springs[i];
printf("%d: left: (%5f, %5f, %5f), right: (%5f, %5f, %5f), k: %f, rest: %f\n ", i, data._left -> pos[0], data._left -> pos[1], data._left -> pos[2], data._right -> pos[0], data._right -> pos[1], data._right -> pos[2], data._k, data._rest);
}
}
__global__ void computeSpringForces(CUDA_SPRING ** d_spring, int num_springs, double t) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if ( i < num_springs ) {
CUDA_SPRING & spring = *d_spring[i];
if (spring._left == nullptr || spring._right == nullptr || ! spring._left -> valid || ! spring._right -> valid) // TODO might be expensive with CUDA instruction set
return;
Vec temp = (spring._right -> pos) - (spring._left -> pos);
double scale = 1.0;
if (spring._type == ACTIVE_CONTRACT_THEN_EXPAND){
scale = (1 - 0.2 * sin(spring._omega * t));
} else if (spring._type == ACTIVE_EXPAND_THEN_CONTRACT){
scale = (1 + 0.2 * sin(spring._omega * t));
}
Vec force = spring._k * (spring._rest * scale - temp.norm()) * (temp / temp.norm()); // normal spring force
force += dot(spring._left -> vel - spring._right -> vel, temp / temp.norm()) * spring._damping * (temp / temp.norm()); // damping
#ifdef CONSTRAINTS
if (spring._right -> constraints.fixed == false) {
spring._right->force.atomicVecAdd(force); // need atomics here
}
if (spring._left -> constraints.fixed == false) {
spring._left->force.atomicVecAdd(-force);
}
#else
spring._right -> force.atomicVecAdd(force);
spring._left -> force.atomicVecAdd(-force);
#endif
}
}
double Simulation::time() {
return this -> T;
}
bool Simulation::running() {
return this -> RUNNING;
}
#ifdef RK2
template <bool step>
#endif
__global__ void massForcesAndUpdate(CUDA_MASS ** d_mass, int num_masses, double dt, double T, Vec global_acc, CUDA_GLOBAL_CONSTRAINTS c) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
CUDA_MASS &mass = *d_mass[i];
#ifdef CONSTRAINTS
if (mass.constraints.fixed == 1)
return;
#endif
mass.force += mass.m * global_acc;
mass.force += mass.extern_force;
// mass.force += mass.external;
for (int j = 0; j < c.num_planes; j++) { // global constraints
c.d_planes[j].applyForce(&mass);
}
for (int j = 0; j < c.num_balls; j++) {
c.d_balls[j].applyForce(&mass);
}
#ifdef CONSTRAINTS
for (int j = 0; j < mass.constraints.num_contact_planes; j++) { // local constraints
mass.constraints.contact_plane[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_balls; j++) {
mass.constraints.ball[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_constraint_planes; j++) {
mass.constraints.constraint_plane[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_directions; j++) {
mass.constraints.direction[j].applyForce(&mass);
}
// NOTE TODO this is really janky. On certain platforms, the following code causes excessive memory usage on the GPU.
if (mass.vel.norm() != 0.0) {
double norm = mass.vel.norm();
mass.force += - mass.constraints.drag_coefficient * pow(norm, 2) * mass.vel / norm; // drag
}
#endif
#ifdef RK2
if constexpr(step) {
mass.acc = mass.force / mass.m;
mass.__rk2_backup_vel = mass.vel;
mass.__rk2_backup_pos = mass.pos;
mass.pos = mass.pos + 0.5 * mass.vel * dt;
mass.vel = mass.vel + 0.5 * mass.acc * dt;
mass.T += 0.5 * dt;
} else {
mass.acc = mass.force / mass.m;
mass.pos = mass.__rk2_backup_pos + mass.vel * dt;
mass.vel = mass.__rk2_backup_vel + mass.acc * dt;
mass.T += 0.5 * dt;
}
#elif VERLET
mass.vel += 0.5 * (mass.acc + mass.force / mass.m) * dt;
mass.acc = mass.force / mass.m;
mass.pos += mass.vel * dt + 0.5 * mass.acc * pow(dt, 2);
mass.T += dt;
#else // simple leapfrog Euler integration
mass.acc = mass.force / mass.m;
mass.vel = mass.vel + mass.acc * dt;
mass.pos = mass.pos + mass.vel * dt;
mass.T += dt;
#endif
mass.force = Vec(0, 0, 0);
}
}
#ifdef GRAPHICS
void Simulation::clearScreen() {
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear screen
// Use our shader
glUseProgram(programID);
// Send our transformation to the currently bound shader in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
}
void Simulation::renderScreen() {
// Swap buffers
#ifdef SDL2
SDL_GL_SwapWindow(window);
#else
glfwPollEvents();
glfwSwapBuffers(window);
#endif
}
#ifdef SDL2
void Simulation::createSDLWindow() {
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
std::cout << "Failed to init SDL\n";
return;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
// Turn on double buffering with a 24bit Z buffer.
// You may need to change this to 16 or 32 for your system
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);
SDL_GL_SetSwapInterval(1);
// Open a window and create its OpenGL context
window = SDL_CreateWindow("CUDA Physics Simulation", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 1920, 1080, SDL_WINDOW_OPENGL);
SDL_SetWindowResizable(window, SDL_TRUE);
if (window == NULL) {
fprintf(stderr,
"Failed to open SDL window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
getchar();
SDL_Quit();
return;
}
context = SDL_GL_CreateContext(window);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
SDL_Quit();
return;
}
glEnable(GL_DEPTH_TEST);
// // Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_MULTISAMPLE);
}
#else
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void Simulation::createGLFWWindow() {
// Initialise GLFW
if( !glfwInit() ) // TODO throw errors here
{
fprintf( stderr, "Failed to initialize GLFW\n" );
getchar();
exit(1);
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //We don't want the old OpenGL
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE);
glfwSwapInterval(1);
// Open a window and create its OpenGL context
window = glfwCreateWindow(1920, 1080, "CUDA Physics Simulation", NULL, NULL);
if (window == NULL) {
fprintf(stderr,
"Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
getchar();
glfwTerminate();
exit(1);
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glEnable(GL_DEPTH_TEST);
// // Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
exit(1);
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
}
#endif
#endif
void Simulation::stop() { // no race condition actually
if (RUNNING) {
setBreakpoint(time());
waitForEvent();
}
ENDED = true;
freeGPU();
FREED = true;
return;
}
void Simulation::stop(double t) {
if (RUNNING) {
setBreakpoint(t);
waitForEvent();
}
ENDED = true;
freeGPU();
FREED = true;
return;
}
void Simulation::start() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot call sim.start() after the end of the simulation.");
}
if (masses.size() == 0) {
throw std::runtime_error("No masses have been added. Please add masses before starting the simulation.");
}
std::cout << "Starting simulation with " << masses.size() << " masses and " << springs.size() << " springs." << std::endl;
RUNNING = true;
STARTED = true;
T = 0;
if (this -> dt <= 0) {
throw std::runtime_error("Simultation timestep Simulation::dt is invalid. Please choose a positive non-zero value.");
}
#ifdef GRAPHICS // SDL2 window needs to be created here for Mac OS
#ifdef SDL2
createSDLWindow();
#endif
#endif
updateCudaParameters();
d_constraints.d_balls = thrust::raw_pointer_cast(&d_balls[0]);
d_constraints.d_planes = thrust::raw_pointer_cast(&d_planes[0]);
d_constraints.num_balls = d_balls.size();
d_constraints.num_planes = d_planes.size();
update_constraints = false;
// hipDeviceSetLimit(hipLimitMallocHeapSize, 5 * (masses.size() * sizeof(CUDA_MASS) + springs.size() * sizeof(CUDA_SPRING)));
toArray();
d_mass = thrust::raw_pointer_cast(d_masses.data());
d_spring = thrust::raw_pointer_cast(d_springs.data());
gpu_thread = std::thread(&Simulation::_run, this);
}
void Simulation::_run() { // repeatedly start next
#ifdef GRAPHICS
#ifndef SDL2 // GLFW window needs to be created here for Windows
createGLFWWindow();
#endif
#ifdef SDL2
SDL_GL_MakeCurrent(window, context);
#endif
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// glEnable(GL_LIGHTING);
// glEnable(GL_LIGHT0);
// Create and compile our GLSL program from the shaders
this -> programID = LoadShaders(); // ("shaders/StandardShading.vertexshader", "shaders/StandardShading.fragmentshader"); //
// Get a handle for our "MVP" uniform
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
this -> MatrixID = glGetUniformLocation(programID, "MVP"); // doesn't seem to be necessary
generateBuffers(); // generate buffers for all masses and springs
for (Constraint * c : constraints) { // generate buffers for constraint objects
c -> generateBuffers();
}
#endif
execute();
GPU_DONE = true;
}
#ifdef GRAPHICS
glm::mat4 & Simulation::getProjectionMatrix() {
return this -> MVP;
}
void Simulation::setViewport(const Vec & camera_position, const Vec & target_location, const Vec & up_vector) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Cannot modify viewport during simulation run.");
}
this -> camera = camera_position;
this -> looks_at = target_location;
this -> up = up_vector;
if (STARTED) {
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
}
}
void Simulation::moveViewport(const Vec & displacement) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Cannot modify viewport during simulation run.");
}
this -> camera += displacement;
if (STARTED) {
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
}
}
#endif
void Simulation::updateCudaParameters() {
massBlocksPerGrid = (masses.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
springBlocksPerGrid = (springs.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (massBlocksPerGrid > MAX_BLOCKS) {
massBlocksPerGrid = MAX_BLOCKS;
}
if (springBlocksPerGrid > MAX_BLOCKS) {
springBlocksPerGrid = MAX_BLOCKS;
}
if (springBlocksPerGrid == 0) {
springBlocksPerGrid = 1;
}
d_mass = thrust::raw_pointer_cast(d_masses.data());
d_spring = thrust::raw_pointer_cast(d_springs.data());
}
void Simulation::resume() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot resume the simulation.");
}
if (!STARTED) {
throw std::runtime_error("The simulation has not started. You cannot resume a simulation before calling sim.start().");
}
if (masses.size() == 0) {
throw std::runtime_error("No masses have been added. Please add masses before starting the simulation.");
}
updateCudaParameters();
hipDeviceSynchronize();
RUNNING = true;
}
void Simulation::execute() {
while (1) {
if (!bpts.empty() && *bpts.begin() <= T) {
hipDeviceSynchronize(); // synchronize before updating the springs and mass positions
// std::cout << "Breakpoint set for time " << *bpts.begin() << " reached at simulation time " << T << "!" << std::endl;
bpts.erase(bpts.begin());
RUNNING = false;
while (!RUNNING) {
std::this_thread::sleep_for(std::chrono::microseconds(1));
if (ENDED) {
for (Constraint * c : constraints) {
delete c;
}
#ifdef GRAPHICS
glDeleteBuffers(1, &vertices);
glDeleteBuffers(1, &colors);
glDeleteBuffers(1, &indices);
glDeleteProgram(programID);
glDeleteVertexArrays(1, &VertexArrayID);
// Close OpenGL window and terminate GLFW
#ifdef SDL2
SDL_GL_DeleteContext(context);
SDL_DestroyWindow(window);
SDL_Quit();
#else
glfwTerminate();
#endif
#endif
return;
}
}
#ifdef GRAPHICS
if (resize_buffers) {
resizeBuffers(); // needs to be run from GPU thread
resize_buffers = false;
update_colors = true;
update_indices = true;
}
#endif
#ifdef CONSTRAINTS
if (update_constraints) {
d_constraints.d_balls = thrust::raw_pointer_cast(&d_balls[0]);
d_constraints.d_planes = thrust::raw_pointer_cast(&d_planes[0]);
d_constraints.num_balls = d_balls.size();
d_constraints.num_planes = d_planes.size();
#ifdef GRAPHICS
for (Constraint * c : constraints) { // generate buffers for constraint objects
if (!c -> _initialized)
c -> generateBuffers();
}
#endif
update_constraints = false;
}
#endif
continue;
}
gpuErrchk( hipPeekAtLastError() );
hipDeviceSynchronize(); // synchronize before updating the springs and mass positions
#ifdef RK2
hipLaunchKernelGGL(( computeSpringForces), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( massForcesAndUpdate<true>), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( hipPeekAtLastError() );
T += 0.5 * dt;
hipLaunchKernelGGL(( computeSpringForces), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( massForcesAndUpdate<false>), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( hipPeekAtLastError() );
T += 0.5 * dt;
#else
hipLaunchKernelGGL(( computeSpringForces), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( massForcesAndUpdate), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( hipPeekAtLastError() );
T += dt;
#endif
#ifdef GRAPHICS
if (fmod(T, 0.01) < dt) {
clearScreen();
updateBuffers();
draw();
for (Constraint * c : constraints) {
c->draw();
}
renderScreen();
#ifndef SDL2
if (glfwGetKey(window, GLFW_KEY_ESCAPE ) == GLFW_PRESS || glfwWindowShouldClose(window) != 0) {
// RUNNING = 0;
// ENDED = 1;
exit(1); // TODO maybe deal with memory leak here.
}
#endif
}
#endif
}
}
void Simulation::pause(double t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
setBreakpoint(t);
waitForEvent();
}
void Simulation::wait(double t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
double current_time = time();
while (RUNNING && time() <= current_time + t) {
std::this_thread::sleep_for(std::chrono::microseconds(10)); // TODO replace this with wait queue.
}
}
void Simulation::waitUntil(double t) {
if (ENDED && !FREED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
while (RUNNING && time() <= t) {
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
}
void Simulation::waitForEvent() {
if (ENDED && !FREED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
while (RUNNING) {
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
}
#ifdef GRAPHICS
void Simulation::resizeBuffers() {
// std::cout << "resizing buffers (" << masses.size() << " masses, " << springs.size() << " springs)." << std::endl;
// std::cout << "resizing buffers (" << d_masses.size() << " device masses, " << d_springs.size() << " device springs)." << std::endl;
{
hipGLUnregisterBufferObject(this -> colors);
glBindBuffer(GL_ARRAY_BUFFER, this -> colors);
glBufferData(GL_ARRAY_BUFFER, 3 * masses.size() * sizeof(GLfloat), NULL, GL_DYNAMIC_DRAW);
hipGLRegisterBufferObject(this -> colors);
}
{
hipGLUnregisterBufferObject(this -> indices);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this -> indices);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 2 * springs.size() * sizeof(GLuint), NULL, GL_DYNAMIC_DRAW); // second argument is number of bytes
hipGLRegisterBufferObject(this -> indices);
}
{
hipGLUnregisterBufferObject(this -> vertices);
glBindBuffer(GL_ARRAY_BUFFER, vertices);
glBufferData(GL_ARRAY_BUFFER, 3 * masses.size() * sizeof(GLfloat), NULL, GL_DYNAMIC_DRAW);
hipGLRegisterBufferObject(this -> vertices);
}
resize_buffers = false;
}
void Simulation::generateBuffers() {
{
GLuint colorbuffer; // bind colors to buffer colorbuffer
glGenBuffers(1, &colorbuffer);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glBufferData(GL_ARRAY_BUFFER, ::max(3 * masses.size() * sizeof(GLfloat), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW);
hipGLRegisterBufferObject(colorbuffer);
this -> colors = colorbuffer;
}
{
GLuint elementbuffer; // create buffer for main cube object
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, ::max(2 * springs.size() * sizeof(GLuint), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW); // second argument is number of bytes
hipGLRegisterBufferObject(elementbuffer);
this -> indices = elementbuffer;
}
{
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer); // bind cube vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, ::max(3 * masses.size() * sizeof(GLfloat), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW);
hipGLRegisterBufferObject(vertexbuffer);
this -> vertices = vertexbuffer;
}
}
__global__ void updateVertices(float * gl_ptr, CUDA_MASS ** d_mass, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
gl_ptr[3 * i] = (float) d_mass[i] -> pos[0];
gl_ptr[3 * i + 1] = (float) d_mass[i] -> pos[1];
gl_ptr[3 * i + 2] = (float) d_mass[i] -> pos[2];
}
}
__global__ void updateIndices(unsigned int * gl_ptr, CUDA_SPRING ** d_spring, CUDA_MASS ** d_mass, int num_springs, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_springs) {
if (d_spring[i] -> _left == nullptr || d_spring[i] -> _right == nullptr || ! d_spring[i] -> _left -> valid || ! d_spring[i] -> _right -> valid) {
gl_ptr[2*i] = 0;
gl_ptr[2*i] = 0;
return;
}
CUDA_MASS * left = d_spring[i] -> _left;
CUDA_MASS * right = d_spring[i] -> _right;
for (int j = 0; j < num_masses; j++) {
if (d_mass[j] == left) {
gl_ptr[2*i] = j;
}
if (d_mass[j] == right) {
gl_ptr[2*i + 1] = j;
}
}
}
}
__global__ void updateColors(float * gl_ptr, CUDA_MASS ** d_mass, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
gl_ptr[3 * i] = (float) d_mass[i] -> color[0];
gl_ptr[3 * i + 1] = (float) d_mass[i] -> color[1];
gl_ptr[3 * i + 2] = (float) d_mass[i] -> color[2];
}
}
void Simulation::updateBuffers() {
if (update_colors) {
glBindBuffer(GL_ARRAY_BUFFER, colors);
void *colorPointer; // if no masses, springs, or colors are changed/deleted, this can be start only once
hipGLMapBufferObject__(&colorPointer, colors);
hipLaunchKernelGGL(( updateColors), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, (float *) colorPointer, d_mass, masses.size());
hipGLUnmapBufferObject(colors);
update_colors = false;
}
if (update_indices) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indices);
void *indexPointer; // if no masses or springs are deleted, this can be start only once
hipGLMapBufferObject__(&indexPointer, indices);
hipLaunchKernelGGL(( updateIndices), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, (unsigned int *) indexPointer, d_spring, d_mass, springs.size(), masses.size());
hipGLUnmapBufferObject(indices);
update_indices = false;
}
{
glBindBuffer(GL_ARRAY_BUFFER, vertices);
void *vertexPointer;
hipGLMapBufferObject__(&vertexPointer, vertices);
hipLaunchKernelGGL(( updateVertices), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, (float *) vertexPointer, d_mass, masses.size());
hipGLUnmapBufferObject(vertices);
}
}
void Simulation::draw() {
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, this -> vertices);
glPointSize(this -> pointSize);
glLineWidth(this -> lineWidth);
glVertexAttribPointer(
0, // attribute. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, this -> colors);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_POINTS, 0, masses.size()); // 3 indices starting at 0 -> 1 triangle
glDrawElements(GL_LINES, 2 * springs.size(), GL_UNSIGNED_INT, (void*) 0); // 3 indices starting at 0 -> 1 triangle
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
}
#endif
Container * Simulation::createContainer() {
Container * c = new Container();
containers.push_back(c);
return c;
}
Cube * Simulation::createCube(const Vec & center, double side_length) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot create new objects");
}
Cube * cube = new Cube(center, side_length);
d_masses.reserve(masses.size() + cube -> masses.size());
d_springs.reserve(springs.size() + cube -> springs.size());
for (Mass * m : cube -> masses) {
createMass(m);
}
for (Spring * s : cube -> springs) {
createSpring(s);
}
containers.push_back(cube);
return cube;
}
Container * Simulation::importFromSTL(const std::string & path, double density, int num_rays) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot import new STL objects");
}
stl::stlFile file = stl::parseSTL(path);
stl::BBox b = file.getBoundingBox();
double dimmax = max(max(b.xdim, b.ydim), b.zdim);
double dimx, dimy, dimz;
dimx = 10 * b.xdim / dimmax;
dimy = 10 * b.ydim / dimmax;
dimz = 10 * b.zdim / dimmax;
std::cout << b.xdim << " " << b.ydim << " " << b.zdim << " " << dimmax << " " << pow(10 / dimmax, 3) << " " << density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim << " " << (int) cbrt(density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim) << std::endl;
int num_pts = (int) cbrt(density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim);
std::cout << "density is: " << density << " and num_pts is " << num_pts << std::endl;
Lattice * l1 = new Lattice(Vec(0, 0, dimz), Vec(dimx - 0.001, dimy - 0.001, dimz - 0.001), num_pts, num_pts, num_pts);
for (Mass * m : l1 -> masses) {
if (!file.inside(stl::Vec3D(b.center[0] + (b.xdim / dimx) * m -> pos[0], b.center[1] + (b.ydim / dimy) * m -> pos[1], (b.zdim / dimz) * (m -> pos[2] - dimz) + b.center[2]), num_rays)) {
m -> valid = false;
}
}
for (auto i = l1 -> springs.begin(); i != l1 -> springs.end();) {
Spring * s = *i;
if (!s ->_left -> valid || ! s -> _right -> valid) {
delete s;
i = l1 -> springs.erase(i);
} else {
++i;
}
}
for (auto i = l1 -> masses.begin(); i != l1 -> masses.end();) {
Mass * m = *i;
if (!m -> valid) {
delete m;
i = l1 -> masses.erase(i);
} else {
++i;
}
}
d_masses.reserve(masses.size() + l1 -> masses.size());
d_springs.reserve(springs.size() + l1 -> springs.size());
for (Mass * m : l1 -> masses) {
createMass(m);
}
for (Spring * s : l1 -> springs) {
createSpring(s);
}
containers.push_back(l1);
return l1;
}
Lattice * Simulation::createLattice(const Vec & center, const Vec & dims, int nx, int ny, int nz) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
Lattice * l = new Lattice(center, dims, nx, ny, nz);
d_masses.reserve(masses.size() + l -> masses.size());
d_springs.reserve(springs.size() + l -> springs.size());
for (Mass * m : l -> masses) {
createMass(m);
}
for (Spring * s : l -> springs) {
createSpring(s);
}
containers.push_back(l);
return l;
}
#ifdef CONSTRAINTS
Beam * Simulation::createBeam(const Vec & center, const Vec & dims, int nx, int ny, int nz) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
Beam * l = new Beam(center, dims, nx, ny, nz);
d_masses.reserve(masses.size() + l -> masses.size());
d_springs.reserve(springs.size() + l -> springs.size());
for (Mass * m : l -> masses) {
createMass(m);
}
for (Spring * s : l -> springs) {
createSpring(s);
}
containers.push_back(l);
return l;
}
#endif
// Robot * Simulation::createRobot(const Vec & center, const cppn& encoding, double side_length, double omega, double k_soft, double k_stiff){
// if (ENDED) {
// throw std::runtime_error("The simulation has ended. New objects cannot be created.");
// }
// Robot * l = new Robot(center, encoding, side_length, omega, k_soft, k_stiff);
// d_masses.reserve(masses.size() + l -> masses.size());
// d_springs.reserve(springs.size() + l -> springs.size());
// for (Mass * m : l -> masses) {
// createMass(m);
// }
// for (Spring * s : l -> springs) {
// createSpring(s);
// }
// containers.push_back(l);
// return l;
// }
void Simulation::createPlane(const Vec & abc, double d) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
ContactPlane * new_plane = new ContactPlane(abc, d);
constraints.push_back(new_plane);
d_planes.push_back(CudaContactPlane(*new_plane));
update_constraints = true;
}
void Simulation::createPlane(const Vec & abc, double d, double FRICTION_K, double FRICTION_S) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
ContactPlane * new_plane = new ContactPlane(abc, d);
new_plane -> _FRICTION_K = FRICTION_K;
new_plane -> _FRICTION_S = FRICTION_S;
constraints.push_back(new_plane);
d_planes.push_back(CudaContactPlane(*new_plane));
update_constraints = true;
}
void Simulation::createBall(const Vec & center, double r ) { // creates ball with radius r at position center
if (ENDED) {
throw std::runtime_error("The simulation has ended. New constraints cannot be added.");
}
Ball * new_ball = new Ball(center, r);
constraints.push_back(new_ball);
d_balls.push_back(CudaBall(*new_ball));
update_constraints = true;
}
void Simulation::clearConstraints() { // clears global constraints only
this -> constraints.clear();
update_constraints = true;
}
void Simulation::printPositions() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. You cannot view parameters of the simulation after it has been stopped.");
}
if (RUNNING) {
std::cout << "\nDEVICE MASSES: " << std::endl;
hipLaunchKernelGGL(( printMasses), dim3(massBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_mass, masses.size());
hipDeviceSynchronize();
}
else {
std::cout << "\nHOST MASSES: " << std::endl;
int count = 0;
for (Mass * m : masses) {
std::cout << count << ": " << m -> pos << std::endl;
count++;
}
}
std::cout << std::endl;
}
void Simulation::printSprings() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. You cannot view parameters of the simulation after it has been stopped.");
}
if (RUNNING) {
std::cout << "\nDEVICE SPRINGS: " << std::endl;
hipLaunchKernelGGL(( printSpring), dim3(springBlocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_spring, springs.size());
hipDeviceSynchronize();
}
else {
std::cout << "\nHOST SPRINGS: " << std::endl;
}
std::cout << std::endl;
}
void Simulation::setGlobalAcceleration(const Vec & global_acc) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. The global force parameter cannot be changed during runtime");
}
this -> _global_acc = global_acc;
}
} // namespace titan | a4c30c3fbb7dcd5ec129b9a5690ac76978211eba.cu | //
// Created by Jacob Austin on 5/17/18.
//
#include "sim.h"
#include "stlparser.h"
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#ifdef GRAPHICS
#include <GLFW/glfw3.h>
#endif
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_device_runtime_api.h>
#include <cuda_gl_interop.h>
#include <exception>
namespace titan {
#ifdef GRAPHICS
#ifndef SDL2
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
#endif
#endif
__global__ void createSpringPointers(CUDA_SPRING ** ptrs, CUDA_SPRING * data, int size);
__global__ void createMassPointers(CUDA_MASS ** ptrs, CUDA_MASS * data, int size);
__global__ void computeSpringForces(CUDA_SPRING ** device_springs, int num_springs, double t);
__global__ void massForcesAndUpdate(CUDA_MASS ** d_mass, int num_masses, double dt, double T, Vec global_acc, CUDA_GLOBAL_CONSTRAINTS c);
bool Simulation::RUNNING;
bool Simulation::STARTED;
bool Simulation::ENDED;
bool Simulation::FREED;
bool Simulation::GPU_DONE;
#ifdef GRAPHICS
GLFWwindow * Simulation::window;
GLuint Simulation::VertexArrayID;
GLuint Simulation::programID;
GLuint Simulation::MatrixID;
glm::mat4 Simulation::MVP;
GLuint Simulation::vertices;
GLuint Simulation::colors;
GLuint Simulation::indices;
bool Simulation::update_indices;
bool Simulation::update_colors;
int Simulation::lineWidth;
int Simulation::pointSize;
bool Simulation::resize_buffers;
Vec Simulation::camera;
Vec Simulation::looks_at;
Vec Simulation::up;
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=false)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %d %s %s %d\n", code, cudaGetErrorString(code), file, line);
if (abort) {
char buffer[200];
snprintf(buffer, sizeof(buffer), "GPUassert error in CUDA kernel: %s %s %d\n", cudaGetErrorString(code), file, line);
std::string buffer_string = buffer;
throw std::runtime_error(buffer_string);
}
}
}
Simulation::Simulation() {
dt = 0.0001;
RUNNING = false;
STARTED = false;
ENDED = false;
FREED = false;
GPU_DONE = false;
update_constraints = true;
_global_acc = Vec(0, 0, -9.81);
#ifdef GRAPHICS
resize_buffers = true;
update_colors = true;
update_indices = true;
lineWidth = 1;
pointSize = 3;
camera = Vec(15, 15, 7);
looks_at = Vec(0, 0, 2);
up = Vec(0, 0, 1);
#endif
}
void Simulation::reset() {
this -> masses.clear();
this -> springs.clear();
this -> containers.clear();
this -> constraints.clear();
RUNNING = false;
STARTED = false;
ENDED = false;
FREED = false;
GPU_DONE = false;
update_constraints = true;
_global_acc = Vec(0, 0, -9.81);
#ifdef GRAPHICS
resize_buffers = true;
update_colors = true;
update_indices = true;
lineWidth = 1;
pointSize = 3;
camera = Vec(15, 15, 7);
looks_at = Vec(0, 0, 2);
up = Vec(0, 0, 1);
#endif
}
void Simulation::freeGPU() {
for (Spring * s : springs) {
if (s -> _left && ! s -> _left -> valid) {
if (s -> _left -> arrayptr) {
gpuErrchk(cudaFree(s -> _left -> arrayptr));
}
delete s -> _left;
}
if (s -> _right && ! s -> _right -> valid) {
if (s -> _right -> arrayptr) {
gpuErrchk(cudaFree(s -> _right -> arrayptr));
}
delete s -> _right;
}
delete s;
}
for (Mass * m : masses) {
delete m;
}
for (Container * c : containers) {
delete c;
}
d_balls.clear();
d_balls.shrink_to_fit();
d_planes.clear();
d_planes.shrink_to_fit();
// freeSprings<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size()); // MUST COME BEFORE freeMasses
// freeMasses<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size());
// #ifdef GRAPHICS
// cudaGLUnmapBufferObject(this -> colors);
// cudaGLUnmapBufferObject(this -> indices);
// cudaGLUnmapBufferObject(this -> vertices);
// cudaGLUnregisterBufferObject(this -> colors);
// cudaGLUnregisterBufferObject(this -> indices);
// cudaGLUnregisterBufferObject(this -> vertices);
// #endif
FREED = true; // just to be safe
ENDED = true; // just to be safe
}
Simulation::~Simulation() {
std::cerr << "Simulation destructor called." << std::endl;
if (STARTED) {
waitForEvent();
ENDED = true; // TODO maybe race condition
while (!GPU_DONE) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
std::this_thread::sleep_for(std::chrono::milliseconds(10)); // TODO fix race condition
if (gpu_thread.joinable()) {
gpu_thread.join();
} else {
std::cout << "could not join GPU thread." << std::endl;
exit(1);
}
if (!FREED) {
freeGPU();
FREED = true;
}
} else {
for (Mass * m : masses) {
delete m;
}
for (Spring * s : springs) {
delete s;
}
for (Container * c : containers) {
delete c;
}
}
}
Mass * Simulation::createMass(Mass * m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
m -> ref_count++;
if (!STARTED) {
masses.push_back(m);
return m;
} else {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Stop the simulation to make changes.");
}
masses.push_back(m);
CUDA_MASS * d_mass;
gpuErrchk(cudaMalloc((void **) &d_mass, sizeof(CUDA_MASS)));
m -> arrayptr = d_mass;
d_masses.push_back(d_mass);
CUDA_MASS temp = CUDA_MASS(*m);
gpuErrchk(cudaMemcpy(d_mass, &temp, sizeof(CUDA_MASS), cudaMemcpyHostToDevice));
#ifdef GRAPHICS
resize_buffers = true;
#endif
return m;
}
}
Spring * Simulation::getSpringByIndex(int i) {
assert(i < springs.size() && i >= 0);
return springs[i];
}
Mass * Simulation::getMassByIndex(int i) {
assert(i < masses.size() && i >= 0);
return masses[i];
}
Container * Simulation::getContainerByIndex(int i) {
assert(i < containers.size() && i >= 0);
return containers[i];
}
Mass * Simulation::createMass() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Mass * m = new Mass();
return createMass(m);
}
Mass * Simulation::createMass(const Vec & pos) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Mass * m = new Mass(pos);
return createMass(m);
}
Spring * Simulation::createSpring(Spring * s) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (s -> _right) { s -> _right -> ref_count++; }
if (s -> _left) { s -> _left -> ref_count++; }
if (!STARTED) {
springs.push_back(s);
return s;
} else {
if (RUNNING) {
exit(1);
}
springs.push_back(s);
CUDA_SPRING * d_spring;
gpuErrchk(cudaMalloc((void **) &d_spring, sizeof(CUDA_SPRING)));
s -> arrayptr = d_spring;
d_springs.push_back(d_spring);
CUDA_SPRING temp = CUDA_SPRING(*s);
gpuErrchk(cudaMemcpy(d_spring, &temp, sizeof(CUDA_SPRING), cudaMemcpyHostToDevice));
#ifdef GRAPHICS
resize_buffers = true;
#endif
return s;
}
}
Spring * Simulation::createSpring() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Spring * s = new Spring();
return createSpring(s);
}
Spring * Simulation::createSpring(Mass * m1, Mass * m2) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
Spring * s = new Spring(m1, m2);
return createSpring(s);
}
__global__ void invalidate(CUDA_MASS ** ptrs, CUDA_MASS * m, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (ptrs[i] == m) {
m -> valid = false;
}
}
}
void Simulation::deleteMass(Mass * m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
masses.resize(std::remove(masses.begin(), masses.end(), m) - masses.begin());
m -> decrementRefCount();
} else {
if (RUNNING) {
exit(1);
}
updateCudaParameters();
invalidate<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, m -> arrayptr, masses.size());
m -> valid = false;
thrust::remove(thrust::device, d_masses.begin(), d_masses.begin() + masses.size(), m -> arrayptr);
masses.resize(std::remove(masses.begin(), masses.end(), m) - masses.begin());
d_masses.resize(masses.size());
m -> decrementRefCount();
#ifdef GRAPHICS
resize_buffers = true;
#endif
}
}
void Simulation::deleteSpring(Spring * s) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
springs.resize(std::remove(springs.begin(), springs.end(), s) - springs.begin());
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
} else {
if (RUNNING) {
exit(1);
}
gpuErrchk(cudaFree(s -> arrayptr));
thrust::remove(thrust::device, d_springs.begin(), d_springs.begin() + springs.size(), s -> arrayptr);
springs.resize(std::remove(springs.begin(), springs.end(), s) - springs.begin());
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
delete s;
#ifdef GRAPHICS
resize_buffers = true;
#endif
}
}
struct mass_in_list {
__device__ __host__ mass_in_list(CUDA_MASS ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(CUDA_MASS * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
data -> valid = false;
return true;
}
}
return false;
}
CUDA_MASS ** list;
int size;
};
struct spring_in_list {
__device__ __host__ spring_in_list(CUDA_SPRING ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(CUDA_SPRING * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
CUDA_SPRING ** list;
int size;
};
struct host_mass_in_list {
__device__ __host__ host_mass_in_list(Mass ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(Mass * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
Mass ** list;
int size;
};
struct host_spring_in_list {
__device__ __host__ host_spring_in_list(Spring ** ptr, int n) : list(ptr), size(n) {};
__device__ __host__ bool operator()(Spring * data) {
for (int i = 0; i < size; i++) {
if (list[i] == data) {
return true;
}
}
return false;
}
Spring ** list;
int size;
};
void Simulation::deleteContainer(Container * c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (RUNNING) {
throw std::runtime_error("The simulation is running. Stop the simulation to make changes.");
}
if (!STARTED) {
for (Mass * m : c -> masses) {
deleteMass(m);
}
for (Spring * s : c -> springs) {
deleteSpring(s);
}
delete c;
containers.resize(std::remove(containers.begin(), containers.end(), c) - containers.begin());
return;
}
{
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
c -> masses[i] -> valid = false;
c -> masses[i] -> decrementRefCount();
}
masses.resize(thrust::remove_if(thrust::host, masses.begin(), masses.end(), host_mass_in_list(c -> masses.data(), c -> masses.size())) - masses.begin());
CUDA_MASS ** temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
gpuErrchk(cudaMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), cudaMemcpyHostToDevice));
delete [] d_ptrs;
thrust::remove_if(thrust::device, d_masses.begin(), d_masses.begin() + masses.size() + c -> masses.size(), mass_in_list(temp, c -> masses.size()));
d_masses.resize(masses.size());
gpuErrchk(cudaFree(temp));
}
{
CUDA_SPRING ** d_ptrs = new CUDA_SPRING * [c -> springs.size()];
for (int i = 0; i < c -> springs.size(); i++) {
Spring * s = c -> springs[i];
d_ptrs[i] = s -> arrayptr;
gpuErrchk(cudaFree(s -> arrayptr));
if (s -> _left) { s -> _left -> decrementRefCount(); }
if (s -> _right) { s -> _right -> decrementRefCount(); }
}
springs.resize(thrust::remove_if(thrust::host, springs.begin(), springs.end(), host_spring_in_list(c -> springs.data(), c -> springs.size())) - springs.begin());
CUDA_SPRING ** temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_SPRING *) * c -> springs.size()));
gpuErrchk(cudaMemcpy(temp, d_ptrs, c -> springs.size() * sizeof(CUDA_SPRING *), cudaMemcpyHostToDevice));
delete [] d_ptrs;
thrust::remove_if(thrust::device, d_springs.begin(), d_springs.begin() + springs.size() + c -> springs.size(), spring_in_list(temp, c -> springs.size()));
d_springs.resize(springs.size());
gpuErrchk(cudaFree(temp));
}
#ifdef GRAPHICS // TODO make a decision about this
resize_buffers = true;
#endif
delete c;
containers.resize(std::remove(containers.begin(), containers.end(), c) - containers.begin());
}
//void Simulation::deleteContainer(Container * c) {
// if (RUNNING) {
// exit(1);
// }
//
// std::cout << c -> masses.size() << " " << c -> springs.size() << std::endl;
//
// for (Mass * m : c -> masses) {
// deleteMass(m);
// }
//
// for (Spring * s : c -> springs) {
// deleteSpring(s);
// }
//
//#ifdef GRAPHICS
// resize_buffers = true;
//#endif
//
// delete c;
// containers.remove(c);
//}
void Simulation::get(Mass * m) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_MASS temp;
gpuErrchk(cudaMemcpy(&temp, m -> arrayptr, sizeof(CUDA_MASS), cudaMemcpyDeviceToHost));
*m = temp;
}
void Simulation::set(Mass * m) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_MASS temp = CUDA_MASS(*m);
gpuErrchk(cudaMemcpy(m -> arrayptr, &temp, sizeof(CUDA_MASS), cudaMemcpyHostToDevice));
}
void Simulation::get(Spring * s) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
CUDA_SPRING temp;
gpuErrchk(cudaMemcpy(&temp, s -> arrayptr, sizeof(CUDA_SPRING), cudaMemcpyDeviceToHost));
s -> update(temp);
}
void Simulation::set(Spring * s) {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
CUDA_SPRING temp = CUDA_SPRING(*s);
gpuErrchk(cudaMemcpy(s -> arrayptr, &temp, sizeof(CUDA_SPRING), cudaMemcpyHostToDevice));
}
void Simulation::getAll() {
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
massFromArray(); // TODO make a note of this
}
void Simulation::set(Container * c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (!STARTED) {
std::cerr << "The simulation has not started. Get and set commands cannot be called before sim.start()" << std::endl;
return;
}
{
CUDA_MASS * h_data = new CUDA_MASS[c -> masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
h_data[i] = CUDA_MASS(*c -> masses[i]);
}
CUDA_MASS ** temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
gpuErrchk(cudaMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), cudaMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(cudaMalloc((void **)&d_data, sizeof(CUDA_MASS) * c -> masses.size()));
gpuErrchk(cudaMemcpy(d_data, h_data, sizeof(CUDA_MASS) * c -> masses.size(), cudaMemcpyHostToDevice));
delete [] h_data;
updateCudaParameters();
createMassPointers<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(temp, d_data, c -> masses.size());
gpuErrchk(cudaFree(d_data));
gpuErrchk(cudaFree(temp));
}
{
CUDA_SPRING * h_spring = new CUDA_SPRING[c -> springs.size()];
CUDA_SPRING ** d_ptrs = new CUDA_SPRING *[c -> springs.size()];
int count = 0;
for (Spring * s : springs) {
d_ptrs[count] = c -> springs[count] -> arrayptr;
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
count++;
}
CUDA_SPRING ** temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_SPRING *) * c -> springs.size()));
gpuErrchk(cudaMemcpy(temp, d_ptrs, c -> springs.size() * sizeof(CUDA_SPRING *), cudaMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_SPRING * d_data;
gpuErrchk(cudaMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(cudaMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), cudaMemcpyHostToDevice));
delete [] h_spring;
createSpringPointers<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(temp, d_data, springs.size());
gpuErrchk(cudaFree(d_data));
gpuErrchk(cudaFree(temp));
}
}
void Simulation::setAll() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
{
CUDA_MASS * h_data = new CUDA_MASS[masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
int count = 0;
for (Mass * m : masses) {
h_data[count] = CUDA_MASS(*m);
count++;
}
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(cudaMalloc((void **)&d_data, sizeof(CUDA_MASS) * masses.size()));
gpuErrchk(cudaMemcpy(d_data, h_data, sizeof(CUDA_MASS) * masses.size(), cudaMemcpyHostToDevice));
delete [] h_data;
updateCudaParameters();
createMassPointers<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(d_masses.data()), d_data, masses.size());
gpuErrchk(cudaFree(d_data));
}
{
CUDA_SPRING * h_spring = new CUDA_SPRING[springs.size()];
int count = 0;
for (Spring * s : springs) {
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
count++;
}
CUDA_SPRING * d_data;
gpuErrchk(cudaMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(cudaMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), cudaMemcpyHostToDevice));
delete [] h_spring;
createSpringPointers<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(d_springs.data()), d_data, springs.size());
gpuErrchk(cudaFree(d_data));
}
}
void Simulation::setAllSpringConstantValues(double k) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Spring * s : springs) {
s -> _k = k;
}
}
void Simulation::defaultRestLengths() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Spring * s : springs) {
s -> defaultLength();
}
}
void Simulation::setAllMassValues(double m) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
for (Mass * mass : masses) {
mass -> m += m;
}
}
void Simulation::setTimeStep(double delta_t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot modify simulation after the end of the simulation.");
}
if (delta_t <= 0) {
throw std::runtime_error("Cannot set time step to negative or zero value.");
}
this -> dt = delta_t;
}
double Simulation::getTimeStep() {
return this -> dt;
}
void Simulation::setBreakpoint(double time) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot set breakpoints after the end of the simulation run.");
}
bpts.insert(time); // TODO mutex breakpoints
}
__global__ void createMassPointers(CUDA_MASS ** ptrs, CUDA_MASS * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
*ptrs[i] = data[i];
}
}
CUDA_MASS ** Simulation::massToArray() {
CUDA_MASS ** d_ptrs = new CUDA_MASS * [masses.size()]; // array of pointers
for (int i = 0; i < masses.size(); i++) { // potentially slow
gpuErrchk(cudaMalloc((void **) (d_ptrs + i), sizeof(CUDA_MASS))); // TODO Fix this shit
}
d_masses = thrust::device_vector<CUDA_MASS *>(d_ptrs, d_ptrs + masses.size());
CUDA_MASS * h_data = new CUDA_MASS[masses.size()]; // copy masses into single array for copying to the GPU, set GPU pointers
int count = 0;
for (Mass * m : masses) {
m -> arrayptr = d_ptrs[count];
h_data[count] = CUDA_MASS(*m);
count++;
}
delete [] d_ptrs;
CUDA_MASS * d_data; // copy to the GPU
gpuErrchk(cudaMalloc((void **)&d_data, sizeof(CUDA_MASS) * masses.size()));
gpuErrchk(cudaMemcpy(d_data, h_data, sizeof(CUDA_MASS) * masses.size(), cudaMemcpyHostToDevice));
delete [] h_data;
massBlocksPerGrid = (masses.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (massBlocksPerGrid > MAX_BLOCKS) {
massBlocksPerGrid = MAX_BLOCKS;
}
if (massBlocksPerGrid < 1) {
massBlocksPerGrid = 1;
}
createMassPointers<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(d_masses.data()), d_data, masses.size());
gpuErrchk(cudaFree(d_data));
return thrust::raw_pointer_cast(d_masses.data()); // doesn't really do anything
}
__global__ void createSpringPointers(CUDA_SPRING ** ptrs, CUDA_SPRING * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
// ptrs[i] = (CUDA_SPRING *) malloc(sizeof(CUDA_SPRING));
*ptrs[i] = data[i];
}
}
CUDA_SPRING ** Simulation::springToArray() {
CUDA_SPRING ** d_ptrs = new CUDA_SPRING * [springs.size()]; // array of pointers
for (int i = 0; i < springs.size(); i++) { // potentially slow, allocate memory for every spring
gpuErrchk(cudaMalloc((void **) d_ptrs + i, sizeof(CUDA_SPRING)));
}
d_springs = thrust::device_vector<CUDA_SPRING *>(d_ptrs, d_ptrs + springs.size()); // copy those pointers to the GPU using thrust
CUDA_SPRING * h_spring = new CUDA_SPRING[springs.size()]; // array for the springs themselves
int count = 0;
for (Spring * s : springs) {
s -> arrayptr = d_ptrs[count];
if (s -> _left && s -> _right) {
h_spring[count] = CUDA_SPRING(*s, s -> _left -> arrayptr, s -> _right -> arrayptr);
} else {
h_spring[count] = CUDA_SPRING(*s);
}
count++;
}
delete [] d_ptrs;
CUDA_SPRING * d_data;
gpuErrchk(cudaMalloc((void **)& d_data, sizeof(CUDA_SPRING) * springs.size()));
gpuErrchk(cudaMemcpy(d_data, h_spring, sizeof(CUDA_SPRING) * springs.size(), cudaMemcpyHostToDevice));
delete [] h_spring;
createSpringPointers<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(d_springs.data()), d_data, springs.size());
gpuErrchk(cudaFree(d_data));
return thrust::raw_pointer_cast(d_springs.data());
}
//void Simulation::constraintsToArray() {
// d_constraints.reserve(constraints.size());
//
// for (Constraint * c : constraints) {
// Constraint * d_temp;
// cudaMalloc((void **)& d_temp, sizeof(Constraint));
// cudaMemcpy(d_temp, c, sizeof(Constraint), cudaMemcpyHostToDevice);
// d_constraints.push_back(d_temp);
// }
//}
void Simulation::toArray() {
CUDA_MASS ** d_mass = massToArray(); // must come first
CUDA_SPRING ** d_spring = springToArray();
}
__global__ void fromMassPointers(CUDA_MASS ** d_mass, CUDA_MASS * data, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
data[i] = *d_mass[i];
}
}
void Simulation::get(Container *c) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot get updates from the GPU after the end of the simulation.");
} else if (!STARTED) {
std::cerr << "sim.get() does nothing if called before the simulation has started." << std::endl;
return;
}
CUDA_MASS ** temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_MASS *) * c -> masses.size()));
CUDA_MASS ** d_ptrs = new CUDA_MASS * [c -> masses.size()];
for (int i = 0; i < c -> masses.size(); i++) {
d_ptrs[i] = c -> masses[i] -> arrayptr;
}
gpuErrchk(cudaMemcpy(temp, d_ptrs, c -> masses.size() * sizeof(CUDA_MASS *), cudaMemcpyHostToDevice));
delete [] d_ptrs;
CUDA_MASS * temp_data;
gpuErrchk(cudaMalloc((void **) &temp_data, sizeof(CUDA_MASS) * c -> masses.size()));
updateCudaParameters();
fromMassPointers<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(temp, temp_data, c -> masses.size());
gpuErrchk(cudaFree(temp));
CUDA_MASS * h_mass = new CUDA_MASS[masses.size()];
gpuErrchk(cudaMemcpy(h_mass, temp_data, sizeof(CUDA_MASS) * masses.size(), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(temp_data));
int count = 0;
for (Mass * m : c -> masses) {
*m = h_mass[count];
count++;
}
delete [] h_mass;
}
void Simulation::massFromArray() {
CUDA_MASS * temp;
gpuErrchk(cudaMalloc((void **) &temp, sizeof(CUDA_MASS) * masses.size()));
fromMassPointers<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, temp, masses.size());
CUDA_MASS * h_mass = new CUDA_MASS[masses.size()];
gpuErrchk(cudaMemcpy(h_mass, temp, sizeof(CUDA_MASS) * masses.size(), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(temp));
int count = 0;
Mass temp_data;
for (Mass * m : masses) {
*m = h_mass[count];
count++;
}
delete [] h_mass;
// cudaFree(d_mass);
}
void Simulation::springFromArray() {
}
void Simulation::constraintsFromArray() {
}
void Simulation::fromArray() {
massFromArray();
springFromArray();
constraintsFromArray();
}
__global__ void printMasses(CUDA_MASS ** d_masses, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
CUDA_MASS data = *d_masses[i];
printf("%d: (%3f, %3f, %3f)", i, data.pos[0], data.pos[1], data.pos[2]);
}
}
__global__ void printForce(CUDA_MASS ** d_masses, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
Vec & data = d_masses[i] -> force;
printf("%d: (%3f, %3f, %3f)\n", i, data[0], data[1], data[2]);
}
}
__global__ void printSpring(CUDA_SPRING ** d_springs, int num_springs) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_springs) {
CUDA_SPRING data = *d_springs[i];
printf("%d: left: (%5f, %5f, %5f), right: (%5f, %5f, %5f), k: %f, rest: %f\n ", i, data._left -> pos[0], data._left -> pos[1], data._left -> pos[2], data._right -> pos[0], data._right -> pos[1], data._right -> pos[2], data._k, data._rest);
}
}
__global__ void computeSpringForces(CUDA_SPRING ** d_spring, int num_springs, double t) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if ( i < num_springs ) {
CUDA_SPRING & spring = *d_spring[i];
if (spring._left == nullptr || spring._right == nullptr || ! spring._left -> valid || ! spring._right -> valid) // TODO might be expensive with CUDA instruction set
return;
Vec temp = (spring._right -> pos) - (spring._left -> pos);
double scale = 1.0;
if (spring._type == ACTIVE_CONTRACT_THEN_EXPAND){
scale = (1 - 0.2 * sin(spring._omega * t));
} else if (spring._type == ACTIVE_EXPAND_THEN_CONTRACT){
scale = (1 + 0.2 * sin(spring._omega * t));
}
Vec force = spring._k * (spring._rest * scale - temp.norm()) * (temp / temp.norm()); // normal spring force
force += dot(spring._left -> vel - spring._right -> vel, temp / temp.norm()) * spring._damping * (temp / temp.norm()); // damping
#ifdef CONSTRAINTS
if (spring._right -> constraints.fixed == false) {
spring._right->force.atomicVecAdd(force); // need atomics here
}
if (spring._left -> constraints.fixed == false) {
spring._left->force.atomicVecAdd(-force);
}
#else
spring._right -> force.atomicVecAdd(force);
spring._left -> force.atomicVecAdd(-force);
#endif
}
}
double Simulation::time() {
return this -> T;
}
bool Simulation::running() {
return this -> RUNNING;
}
#ifdef RK2
template <bool step>
#endif
__global__ void massForcesAndUpdate(CUDA_MASS ** d_mass, int num_masses, double dt, double T, Vec global_acc, CUDA_GLOBAL_CONSTRAINTS c) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
CUDA_MASS &mass = *d_mass[i];
#ifdef CONSTRAINTS
if (mass.constraints.fixed == 1)
return;
#endif
mass.force += mass.m * global_acc;
mass.force += mass.extern_force;
// mass.force += mass.external;
for (int j = 0; j < c.num_planes; j++) { // global constraints
c.d_planes[j].applyForce(&mass);
}
for (int j = 0; j < c.num_balls; j++) {
c.d_balls[j].applyForce(&mass);
}
#ifdef CONSTRAINTS
for (int j = 0; j < mass.constraints.num_contact_planes; j++) { // local constraints
mass.constraints.contact_plane[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_balls; j++) {
mass.constraints.ball[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_constraint_planes; j++) {
mass.constraints.constraint_plane[j].applyForce(&mass);
}
for (int j = 0; j < mass.constraints.num_directions; j++) {
mass.constraints.direction[j].applyForce(&mass);
}
// NOTE TODO this is really janky. On certain platforms, the following code causes excessive memory usage on the GPU.
if (mass.vel.norm() != 0.0) {
double norm = mass.vel.norm();
mass.force += - mass.constraints.drag_coefficient * pow(norm, 2) * mass.vel / norm; // drag
}
#endif
#ifdef RK2
if constexpr(step) {
mass.acc = mass.force / mass.m;
mass.__rk2_backup_vel = mass.vel;
mass.__rk2_backup_pos = mass.pos;
mass.pos = mass.pos + 0.5 * mass.vel * dt;
mass.vel = mass.vel + 0.5 * mass.acc * dt;
mass.T += 0.5 * dt;
} else {
mass.acc = mass.force / mass.m;
mass.pos = mass.__rk2_backup_pos + mass.vel * dt;
mass.vel = mass.__rk2_backup_vel + mass.acc * dt;
mass.T += 0.5 * dt;
}
#elif VERLET
mass.vel += 0.5 * (mass.acc + mass.force / mass.m) * dt;
mass.acc = mass.force / mass.m;
mass.pos += mass.vel * dt + 0.5 * mass.acc * pow(dt, 2);
mass.T += dt;
#else // simple leapfrog Euler integration
mass.acc = mass.force / mass.m;
mass.vel = mass.vel + mass.acc * dt;
mass.pos = mass.pos + mass.vel * dt;
mass.T += dt;
#endif
mass.force = Vec(0, 0, 0);
}
}
#ifdef GRAPHICS
void Simulation::clearScreen() {
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear screen
// Use our shader
glUseProgram(programID);
// Send our transformation to the currently bound shader in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
}
void Simulation::renderScreen() {
// Swap buffers
#ifdef SDL2
SDL_GL_SwapWindow(window);
#else
glfwPollEvents();
glfwSwapBuffers(window);
#endif
}
#ifdef SDL2
void Simulation::createSDLWindow() {
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
std::cout << "Failed to init SDL\n";
return;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
// Turn on double buffering with a 24bit Z buffer.
// You may need to change this to 16 or 32 for your system
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);
SDL_GL_SetSwapInterval(1);
// Open a window and create its OpenGL context
window = SDL_CreateWindow("CUDA Physics Simulation", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 1920, 1080, SDL_WINDOW_OPENGL);
SDL_SetWindowResizable(window, SDL_TRUE);
if (window == NULL) {
fprintf(stderr,
"Failed to open SDL window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
getchar();
SDL_Quit();
return;
}
context = SDL_GL_CreateContext(window);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
SDL_Quit();
return;
}
glEnable(GL_DEPTH_TEST);
// // Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_MULTISAMPLE);
}
#else
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void Simulation::createGLFWWindow() {
// Initialise GLFW
if( !glfwInit() ) // TODO throw errors here
{
fprintf( stderr, "Failed to initialize GLFW\n" );
getchar();
exit(1);
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //We don't want the old OpenGL
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE);
glfwSwapInterval(1);
// Open a window and create its OpenGL context
window = glfwCreateWindow(1920, 1080, "CUDA Physics Simulation", NULL, NULL);
if (window == NULL) {
fprintf(stderr,
"Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
getchar();
glfwTerminate();
exit(1);
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glEnable(GL_DEPTH_TEST);
// // Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
exit(1);
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// Dark blue background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
}
#endif
#endif
void Simulation::stop() { // no race condition actually
if (RUNNING) {
setBreakpoint(time());
waitForEvent();
}
ENDED = true;
freeGPU();
FREED = true;
return;
}
void Simulation::stop(double t) {
if (RUNNING) {
setBreakpoint(t);
waitForEvent();
}
ENDED = true;
freeGPU();
FREED = true;
return;
}
void Simulation::start() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot call sim.start() after the end of the simulation.");
}
if (masses.size() == 0) {
throw std::runtime_error("No masses have been added. Please add masses before starting the simulation.");
}
std::cout << "Starting simulation with " << masses.size() << " masses and " << springs.size() << " springs." << std::endl;
RUNNING = true;
STARTED = true;
T = 0;
if (this -> dt <= 0) {
throw std::runtime_error("Simultation timestep Simulation::dt is invalid. Please choose a positive non-zero value.");
}
#ifdef GRAPHICS // SDL2 window needs to be created here for Mac OS
#ifdef SDL2
createSDLWindow();
#endif
#endif
updateCudaParameters();
d_constraints.d_balls = thrust::raw_pointer_cast(&d_balls[0]);
d_constraints.d_planes = thrust::raw_pointer_cast(&d_planes[0]);
d_constraints.num_balls = d_balls.size();
d_constraints.num_planes = d_planes.size();
update_constraints = false;
// cudaDeviceSetLimit(cudaLimitMallocHeapSize, 5 * (masses.size() * sizeof(CUDA_MASS) + springs.size() * sizeof(CUDA_SPRING)));
toArray();
d_mass = thrust::raw_pointer_cast(d_masses.data());
d_spring = thrust::raw_pointer_cast(d_springs.data());
gpu_thread = std::thread(&Simulation::_run, this);
}
void Simulation::_run() { // repeatedly start next
#ifdef GRAPHICS
#ifndef SDL2 // GLFW window needs to be created here for Windows
createGLFWWindow();
#endif
#ifdef SDL2
SDL_GL_MakeCurrent(window, context);
#endif
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// glEnable(GL_LIGHTING);
// glEnable(GL_LIGHT0);
// Create and compile our GLSL program from the shaders
this -> programID = LoadShaders(); // ("shaders/StandardShading.vertexshader", "shaders/StandardShading.fragmentshader"); //
// Get a handle for our "MVP" uniform
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
this -> MatrixID = glGetUniformLocation(programID, "MVP"); // doesn't seem to be necessary
generateBuffers(); // generate buffers for all masses and springs
for (Constraint * c : constraints) { // generate buffers for constraint objects
c -> generateBuffers();
}
#endif
execute();
GPU_DONE = true;
}
#ifdef GRAPHICS
glm::mat4 & Simulation::getProjectionMatrix() {
return this -> MVP;
}
void Simulation::setViewport(const Vec & camera_position, const Vec & target_location, const Vec & up_vector) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Cannot modify viewport during simulation run.");
}
this -> camera = camera_position;
this -> looks_at = target_location;
this -> up = up_vector;
if (STARTED) {
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
}
}
void Simulation::moveViewport(const Vec & displacement) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. Cannot modify viewport during simulation run.");
}
this -> camera += displacement;
if (STARTED) {
this -> MVP = getProjection(camera, looks_at, up); // compute perspective projection matrix
}
}
#endif
void Simulation::updateCudaParameters() {
massBlocksPerGrid = (masses.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
springBlocksPerGrid = (springs.size() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (massBlocksPerGrid > MAX_BLOCKS) {
massBlocksPerGrid = MAX_BLOCKS;
}
if (springBlocksPerGrid > MAX_BLOCKS) {
springBlocksPerGrid = MAX_BLOCKS;
}
if (springBlocksPerGrid == 0) {
springBlocksPerGrid = 1;
}
d_mass = thrust::raw_pointer_cast(d_masses.data());
d_spring = thrust::raw_pointer_cast(d_springs.data());
}
void Simulation::resume() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot resume the simulation.");
}
if (!STARTED) {
throw std::runtime_error("The simulation has not started. You cannot resume a simulation before calling sim.start().");
}
if (masses.size() == 0) {
throw std::runtime_error("No masses have been added. Please add masses before starting the simulation.");
}
updateCudaParameters();
cudaDeviceSynchronize();
RUNNING = true;
}
void Simulation::execute() {
while (1) {
if (!bpts.empty() && *bpts.begin() <= T) {
cudaDeviceSynchronize(); // synchronize before updating the springs and mass positions
// std::cout << "Breakpoint set for time " << *bpts.begin() << " reached at simulation time " << T << "!" << std::endl;
bpts.erase(bpts.begin());
RUNNING = false;
while (!RUNNING) {
std::this_thread::sleep_for(std::chrono::microseconds(1));
if (ENDED) {
for (Constraint * c : constraints) {
delete c;
}
#ifdef GRAPHICS
glDeleteBuffers(1, &vertices);
glDeleteBuffers(1, &colors);
glDeleteBuffers(1, &indices);
glDeleteProgram(programID);
glDeleteVertexArrays(1, &VertexArrayID);
// Close OpenGL window and terminate GLFW
#ifdef SDL2
SDL_GL_DeleteContext(context);
SDL_DestroyWindow(window);
SDL_Quit();
#else
glfwTerminate();
#endif
#endif
return;
}
}
#ifdef GRAPHICS
if (resize_buffers) {
resizeBuffers(); // needs to be run from GPU thread
resize_buffers = false;
update_colors = true;
update_indices = true;
}
#endif
#ifdef CONSTRAINTS
if (update_constraints) {
d_constraints.d_balls = thrust::raw_pointer_cast(&d_balls[0]);
d_constraints.d_planes = thrust::raw_pointer_cast(&d_planes[0]);
d_constraints.num_balls = d_balls.size();
d_constraints.num_planes = d_planes.size();
#ifdef GRAPHICS
for (Constraint * c : constraints) { // generate buffers for constraint objects
if (!c -> _initialized)
c -> generateBuffers();
}
#endif
update_constraints = false;
}
#endif
continue;
}
gpuErrchk( cudaPeekAtLastError() );
cudaDeviceSynchronize(); // synchronize before updating the springs and mass positions
#ifdef RK2
computeSpringForces<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( cudaPeekAtLastError() );
massForcesAndUpdate<true><<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( cudaPeekAtLastError() );
T += 0.5 * dt;
computeSpringForces<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( cudaPeekAtLastError() );
massForcesAndUpdate<false><<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( cudaPeekAtLastError() );
T += 0.5 * dt;
#else
computeSpringForces<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size(), T); // compute mass forces after syncing
gpuErrchk( cudaPeekAtLastError() );
massForcesAndUpdate<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size(), dt, T, _global_acc, d_constraints);
gpuErrchk( cudaPeekAtLastError() );
T += dt;
#endif
#ifdef GRAPHICS
if (fmod(T, 0.01) < dt) {
clearScreen();
updateBuffers();
draw();
for (Constraint * c : constraints) {
c->draw();
}
renderScreen();
#ifndef SDL2
if (glfwGetKey(window, GLFW_KEY_ESCAPE ) == GLFW_PRESS || glfwWindowShouldClose(window) != 0) {
// RUNNING = 0;
// ENDED = 1;
exit(1); // TODO maybe deal with memory leak here.
}
#endif
}
#endif
}
}
void Simulation::pause(double t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
setBreakpoint(t);
waitForEvent();
}
void Simulation::wait(double t) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
double current_time = time();
while (RUNNING && time() <= current_time + t) {
std::this_thread::sleep_for(std::chrono::microseconds(10)); // TODO replace this with wait queue.
}
}
void Simulation::waitUntil(double t) {
if (ENDED && !FREED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
while (RUNNING && time() <= t) {
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
}
void Simulation::waitForEvent() {
if (ENDED && !FREED) {
throw std::runtime_error("The simulation has ended. Control functions cannot be called.");
}
while (RUNNING) {
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
}
#ifdef GRAPHICS
void Simulation::resizeBuffers() {
// std::cout << "resizing buffers (" << masses.size() << " masses, " << springs.size() << " springs)." << std::endl;
// std::cout << "resizing buffers (" << d_masses.size() << " device masses, " << d_springs.size() << " device springs)." << std::endl;
{
cudaGLUnregisterBufferObject(this -> colors);
glBindBuffer(GL_ARRAY_BUFFER, this -> colors);
glBufferData(GL_ARRAY_BUFFER, 3 * masses.size() * sizeof(GLfloat), NULL, GL_DYNAMIC_DRAW);
cudaGLRegisterBufferObject(this -> colors);
}
{
cudaGLUnregisterBufferObject(this -> indices);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this -> indices);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 2 * springs.size() * sizeof(GLuint), NULL, GL_DYNAMIC_DRAW); // second argument is number of bytes
cudaGLRegisterBufferObject(this -> indices);
}
{
cudaGLUnregisterBufferObject(this -> vertices);
glBindBuffer(GL_ARRAY_BUFFER, vertices);
glBufferData(GL_ARRAY_BUFFER, 3 * masses.size() * sizeof(GLfloat), NULL, GL_DYNAMIC_DRAW);
cudaGLRegisterBufferObject(this -> vertices);
}
resize_buffers = false;
}
void Simulation::generateBuffers() {
{
GLuint colorbuffer; // bind colors to buffer colorbuffer
glGenBuffers(1, &colorbuffer);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glBufferData(GL_ARRAY_BUFFER, std::max(3 * masses.size() * sizeof(GLfloat), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW);
cudaGLRegisterBufferObject(colorbuffer);
this -> colors = colorbuffer;
}
{
GLuint elementbuffer; // create buffer for main cube object
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, std::max(2 * springs.size() * sizeof(GLuint), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW); // second argument is number of bytes
cudaGLRegisterBufferObject(elementbuffer);
this -> indices = elementbuffer;
}
{
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer); // bind cube vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, std::max(3 * masses.size() * sizeof(GLfloat), 3 * sizeof(GLfloat)), NULL, GL_DYNAMIC_DRAW);
cudaGLRegisterBufferObject(vertexbuffer);
this -> vertices = vertexbuffer;
}
}
__global__ void updateVertices(float * gl_ptr, CUDA_MASS ** d_mass, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
gl_ptr[3 * i] = (float) d_mass[i] -> pos[0];
gl_ptr[3 * i + 1] = (float) d_mass[i] -> pos[1];
gl_ptr[3 * i + 2] = (float) d_mass[i] -> pos[2];
}
}
__global__ void updateIndices(unsigned int * gl_ptr, CUDA_SPRING ** d_spring, CUDA_MASS ** d_mass, int num_springs, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_springs) {
if (d_spring[i] -> _left == nullptr || d_spring[i] -> _right == nullptr || ! d_spring[i] -> _left -> valid || ! d_spring[i] -> _right -> valid) {
gl_ptr[2*i] = 0;
gl_ptr[2*i] = 0;
return;
}
CUDA_MASS * left = d_spring[i] -> _left;
CUDA_MASS * right = d_spring[i] -> _right;
for (int j = 0; j < num_masses; j++) {
if (d_mass[j] == left) {
gl_ptr[2*i] = j;
}
if (d_mass[j] == right) {
gl_ptr[2*i + 1] = j;
}
}
}
}
__global__ void updateColors(float * gl_ptr, CUDA_MASS ** d_mass, int num_masses) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_masses) {
gl_ptr[3 * i] = (float) d_mass[i] -> color[0];
gl_ptr[3 * i + 1] = (float) d_mass[i] -> color[1];
gl_ptr[3 * i + 2] = (float) d_mass[i] -> color[2];
}
}
void Simulation::updateBuffers() {
if (update_colors) {
glBindBuffer(GL_ARRAY_BUFFER, colors);
void *colorPointer; // if no masses, springs, or colors are changed/deleted, this can be start only once
cudaGLMapBufferObject(&colorPointer, colors);
updateColors<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>((float *) colorPointer, d_mass, masses.size());
cudaGLUnmapBufferObject(colors);
update_colors = false;
}
if (update_indices) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indices);
void *indexPointer; // if no masses or springs are deleted, this can be start only once
cudaGLMapBufferObject(&indexPointer, indices);
updateIndices<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>((unsigned int *) indexPointer, d_spring, d_mass, springs.size(), masses.size());
cudaGLUnmapBufferObject(indices);
update_indices = false;
}
{
glBindBuffer(GL_ARRAY_BUFFER, vertices);
void *vertexPointer;
cudaGLMapBufferObject(&vertexPointer, vertices);
updateVertices<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>((float *) vertexPointer, d_mass, masses.size());
cudaGLUnmapBufferObject(vertices);
}
}
void Simulation::draw() {
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, this -> vertices);
glPointSize(this -> pointSize);
glLineWidth(this -> lineWidth);
glVertexAttribPointer(
0, // attribute. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, this -> colors);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_POINTS, 0, masses.size()); // 3 indices starting at 0 -> 1 triangle
glDrawElements(GL_LINES, 2 * springs.size(), GL_UNSIGNED_INT, (void*) 0); // 3 indices starting at 0 -> 1 triangle
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
}
#endif
Container * Simulation::createContainer() {
Container * c = new Container();
containers.push_back(c);
return c;
}
Cube * Simulation::createCube(const Vec & center, double side_length) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot create new objects");
}
Cube * cube = new Cube(center, side_length);
d_masses.reserve(masses.size() + cube -> masses.size());
d_springs.reserve(springs.size() + cube -> springs.size());
for (Mass * m : cube -> masses) {
createMass(m);
}
for (Spring * s : cube -> springs) {
createSpring(s);
}
containers.push_back(cube);
return cube;
}
Container * Simulation::importFromSTL(const std::string & path, double density, int num_rays) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. Cannot import new STL objects");
}
stl::stlFile file = stl::parseSTL(path);
stl::BBox b = file.getBoundingBox();
double dimmax = max(max(b.xdim, b.ydim), b.zdim);
double dimx, dimy, dimz;
dimx = 10 * b.xdim / dimmax;
dimy = 10 * b.ydim / dimmax;
dimz = 10 * b.zdim / dimmax;
std::cout << b.xdim << " " << b.ydim << " " << b.zdim << " " << dimmax << " " << pow(10 / dimmax, 3) << " " << density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim << " " << (int) cbrt(density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim) << std::endl;
int num_pts = (int) cbrt(density * pow(10 / dimmax, 3) * b.xdim * b.ydim * b.zdim);
std::cout << "density is: " << density << " and num_pts is " << num_pts << std::endl;
Lattice * l1 = new Lattice(Vec(0, 0, dimz), Vec(dimx - 0.001, dimy - 0.001, dimz - 0.001), num_pts, num_pts, num_pts);
for (Mass * m : l1 -> masses) {
if (!file.inside(stl::Vec3D(b.center[0] + (b.xdim / dimx) * m -> pos[0], b.center[1] + (b.ydim / dimy) * m -> pos[1], (b.zdim / dimz) * (m -> pos[2] - dimz) + b.center[2]), num_rays)) {
m -> valid = false;
}
}
for (auto i = l1 -> springs.begin(); i != l1 -> springs.end();) {
Spring * s = *i;
if (!s ->_left -> valid || ! s -> _right -> valid) {
delete s;
i = l1 -> springs.erase(i);
} else {
++i;
}
}
for (auto i = l1 -> masses.begin(); i != l1 -> masses.end();) {
Mass * m = *i;
if (!m -> valid) {
delete m;
i = l1 -> masses.erase(i);
} else {
++i;
}
}
d_masses.reserve(masses.size() + l1 -> masses.size());
d_springs.reserve(springs.size() + l1 -> springs.size());
for (Mass * m : l1 -> masses) {
createMass(m);
}
for (Spring * s : l1 -> springs) {
createSpring(s);
}
containers.push_back(l1);
return l1;
}
Lattice * Simulation::createLattice(const Vec & center, const Vec & dims, int nx, int ny, int nz) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
Lattice * l = new Lattice(center, dims, nx, ny, nz);
d_masses.reserve(masses.size() + l -> masses.size());
d_springs.reserve(springs.size() + l -> springs.size());
for (Mass * m : l -> masses) {
createMass(m);
}
for (Spring * s : l -> springs) {
createSpring(s);
}
containers.push_back(l);
return l;
}
#ifdef CONSTRAINTS
Beam * Simulation::createBeam(const Vec & center, const Vec & dims, int nx, int ny, int nz) {
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
Beam * l = new Beam(center, dims, nx, ny, nz);
d_masses.reserve(masses.size() + l -> masses.size());
d_springs.reserve(springs.size() + l -> springs.size());
for (Mass * m : l -> masses) {
createMass(m);
}
for (Spring * s : l -> springs) {
createSpring(s);
}
containers.push_back(l);
return l;
}
#endif
// Robot * Simulation::createRobot(const Vec & center, const cppn& encoding, double side_length, double omega, double k_soft, double k_stiff){
// if (ENDED) {
// throw std::runtime_error("The simulation has ended. New objects cannot be created.");
// }
// Robot * l = new Robot(center, encoding, side_length, omega, k_soft, k_stiff);
// d_masses.reserve(masses.size() + l -> masses.size());
// d_springs.reserve(springs.size() + l -> springs.size());
// for (Mass * m : l -> masses) {
// createMass(m);
// }
// for (Spring * s : l -> springs) {
// createSpring(s);
// }
// containers.push_back(l);
// return l;
// }
void Simulation::createPlane(const Vec & abc, double d) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
ContactPlane * new_plane = new ContactPlane(abc, d);
constraints.push_back(new_plane);
d_planes.push_back(CudaContactPlane(*new_plane));
update_constraints = true;
}
void Simulation::createPlane(const Vec & abc, double d, double FRICTION_K, double FRICTION_S) { // creates half-space ax + by + cz < d
if (ENDED) {
throw std::runtime_error("The simulation has ended. New objects cannot be created.");
}
ContactPlane * new_plane = new ContactPlane(abc, d);
new_plane -> _FRICTION_K = FRICTION_K;
new_plane -> _FRICTION_S = FRICTION_S;
constraints.push_back(new_plane);
d_planes.push_back(CudaContactPlane(*new_plane));
update_constraints = true;
}
void Simulation::createBall(const Vec & center, double r ) { // creates ball with radius r at position center
if (ENDED) {
throw std::runtime_error("The simulation has ended. New constraints cannot be added.");
}
Ball * new_ball = new Ball(center, r);
constraints.push_back(new_ball);
d_balls.push_back(CudaBall(*new_ball));
update_constraints = true;
}
void Simulation::clearConstraints() { // clears global constraints only
this -> constraints.clear();
update_constraints = true;
}
void Simulation::printPositions() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. You cannot view parameters of the simulation after it has been stopped.");
}
if (RUNNING) {
std::cout << "\nDEVICE MASSES: " << std::endl;
printMasses<<<massBlocksPerGrid, THREADS_PER_BLOCK>>>(d_mass, masses.size());
cudaDeviceSynchronize();
}
else {
std::cout << "\nHOST MASSES: " << std::endl;
int count = 0;
for (Mass * m : masses) {
std::cout << count << ": " << m -> pos << std::endl;
count++;
}
}
std::cout << std::endl;
}
void Simulation::printSprings() {
if (ENDED) {
throw std::runtime_error("The simulation has ended. You cannot view parameters of the simulation after it has been stopped.");
}
if (RUNNING) {
std::cout << "\nDEVICE SPRINGS: " << std::endl;
printSpring<<<springBlocksPerGrid, THREADS_PER_BLOCK>>>(d_spring, springs.size());
cudaDeviceSynchronize();
}
else {
std::cout << "\nHOST SPRINGS: " << std::endl;
}
std::cout << std::endl;
}
void Simulation::setGlobalAcceleration(const Vec & global_acc) {
if (RUNNING) {
throw std::runtime_error("The simulation is running. The global force parameter cannot be changed during runtime");
}
this -> _global_acc = global_acc;
}
} // namespace titan |
4a05ba6ff17a8274a0969dc04ceacec969aaa02f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mInitVelocity.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *u_dimX = NULL;
hipMalloc(&u_dimX, XSIZE*YSIZE);
float *u_dimY = NULL;
hipMalloc(&u_dimY, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mInitVelocity), dim3(gridBlock),dim3(threadBlock), 0, 0, u_dimX,u_dimY);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mInitVelocity), dim3(gridBlock),dim3(threadBlock), 0, 0, u_dimX,u_dimY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mInitVelocity), dim3(gridBlock),dim3(threadBlock), 0, 0, u_dimX,u_dimY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4a05ba6ff17a8274a0969dc04ceacec969aaa02f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mInitVelocity.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *u_dimX = NULL;
cudaMalloc(&u_dimX, XSIZE*YSIZE);
float *u_dimY = NULL;
cudaMalloc(&u_dimY, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mInitVelocity<<<gridBlock,threadBlock>>>(u_dimX,u_dimY);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mInitVelocity<<<gridBlock,threadBlock>>>(u_dimX,u_dimY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mInitVelocity<<<gridBlock,threadBlock>>>(u_dimX,u_dimY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e73a416787ab25fa9e61682b1de9232b66e17797.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <chrono>
#include <iostream>
#include <fstream>
#include <helper_cuda.h>
using namespace std::chrono;
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
std::ofstream timeFile;
timeFile.open ("times.txt");
for(int numElements = 1; numElements<10e8; numElements *= 10){
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
auto stop = high_resolution_clock::now();
auto durationCuda = duration_cast<microseconds>(stop - start);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
start = high_resolution_clock::now();
// Calc again to measure time on cpu
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i] + h_B[i];
}
stop = high_resolution_clock::now();
auto durationCpu = duration_cast<microseconds>(stop - start);
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done for %d \n", numElements);
timeFile << numElements << " " << durationCuda.count() << " " << durationCpu.count() << std::endl;
}
timeFile.close();
std::cout << "end of tests";
return 0;
}
| e73a416787ab25fa9e61682b1de9232b66e17797.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <chrono>
#include <iostream>
#include <fstream>
#include <helper_cuda.h>
using namespace std::chrono;
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
std::ofstream timeFile;
timeFile.open ("times.txt");
for(int numElements = 1; numElements<10e8; numElements *= 10){
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto start = high_resolution_clock::now();
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
auto stop = high_resolution_clock::now();
auto durationCuda = duration_cast<microseconds>(stop - start);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
start = high_resolution_clock::now();
// Calc again to measure time on cpu
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i] + h_B[i];
}
stop = high_resolution_clock::now();
auto durationCpu = duration_cast<microseconds>(stop - start);
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done for %d \n", numElements);
timeFile << numElements << " " << durationCuda.count() << " " << durationCpu.count() << std::endl;
}
timeFile.close();
std::cout << "end of tests";
return 0;
}
|
15f67614c70883cbedf8168c62b4861a1b320175.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file scale_matrix.cu
*
* \brief Contains implementaiton of CUDA kernels to scale matrix elements (rows or columns).
*/
#include "cuda_common.hpp"
#include "acc_runtime.hpp"
__global__ void scale_matrix_columns_gpu_kernel
(
int nrow,
acc_complex_double_t* mtrx,
double* a
)
{
int icol = blockIdx.y;
int irow = blockIdx.x * blockDim.x + threadIdx.x;
if (irow < nrow)
{
mtrx[array2D_offset(irow, icol, nrow)] =
accCmul(mtrx[array2D_offset(irow, icol, nrow)], make_accDoubleComplex(a[icol], 0));
}
}
// scale each column of the matrix by a column-dependent constant
extern "C" void scale_matrix_columns_gpu(int nrow,
int ncol,
acc_complex_double_t* mtrx,
double* a)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow, grid_t.x), ncol);
accLaunchKernel((scale_matrix_columns_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
nrow,
mtrx,
a
);
}
__global__ void scale_matrix_rows_gpu_kernel
(
int nrow__,
acc_complex_double_t* mtrx__,
double const* v__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
acc_complex_double_t z = mtrx__[array2D_offset(irow, icol, nrow__)];
mtrx__[array2D_offset(irow, icol, nrow__)] = make_accDoubleComplex(z.x * v__[irow], z.y * v__[irow]);
}
}
// scale each row of the matrix by a row-dependent constant
extern "C" void scale_matrix_rows_gpu(int nrow__,
int ncol__,
acc_complex_double_t* mtrx__,
double const* v__)
{
dim3 grid_t(256);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
accLaunchKernel((scale_matrix_rows_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
nrow__,
mtrx__,
v__
);
}
__global__ void scale_matrix_elements_gpu_kernel
(
acc_complex_double_t* mtrx__,
int ld__,
int nrow__,
double beta__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
acc_complex_double_t z = mtrx__[array2D_offset(irow, icol, ld__)];
mtrx__[array2D_offset(irow, icol, ld__)] = make_accDoubleComplex(z.x * beta__, z.y * beta__);
}
}
extern "C" void scale_matrix_elements_gpu(acc_complex_double_t* ptr__,
int ld__,
int nrow__,
int ncol__,
double beta__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
accLaunchKernel((scale_matrix_elements_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
ptr__,
ld__,
nrow__,
beta__
);
}
| 15f67614c70883cbedf8168c62b4861a1b320175.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file scale_matrix.cu
*
* \brief Contains implementaiton of CUDA kernels to scale matrix elements (rows or columns).
*/
#include "cuda_common.hpp"
#include "acc_runtime.hpp"
__global__ void scale_matrix_columns_gpu_kernel
(
int nrow,
acc_complex_double_t* mtrx,
double* a
)
{
int icol = blockIdx.y;
int irow = blockIdx.x * blockDim.x + threadIdx.x;
if (irow < nrow)
{
mtrx[array2D_offset(irow, icol, nrow)] =
accCmul(mtrx[array2D_offset(irow, icol, nrow)], make_accDoubleComplex(a[icol], 0));
}
}
// scale each column of the matrix by a column-dependent constant
extern "C" void scale_matrix_columns_gpu(int nrow,
int ncol,
acc_complex_double_t* mtrx,
double* a)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow, grid_t.x), ncol);
accLaunchKernel((scale_matrix_columns_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
nrow,
mtrx,
a
);
}
__global__ void scale_matrix_rows_gpu_kernel
(
int nrow__,
acc_complex_double_t* mtrx__,
double const* v__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
acc_complex_double_t z = mtrx__[array2D_offset(irow, icol, nrow__)];
mtrx__[array2D_offset(irow, icol, nrow__)] = make_accDoubleComplex(z.x * v__[irow], z.y * v__[irow]);
}
}
// scale each row of the matrix by a row-dependent constant
extern "C" void scale_matrix_rows_gpu(int nrow__,
int ncol__,
acc_complex_double_t* mtrx__,
double const* v__)
{
dim3 grid_t(256);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
accLaunchKernel((scale_matrix_rows_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
nrow__,
mtrx__,
v__
);
}
__global__ void scale_matrix_elements_gpu_kernel
(
acc_complex_double_t* mtrx__,
int ld__,
int nrow__,
double beta__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
acc_complex_double_t z = mtrx__[array2D_offset(irow, icol, ld__)];
mtrx__[array2D_offset(irow, icol, ld__)] = make_accDoubleComplex(z.x * beta__, z.y * beta__);
}
}
extern "C" void scale_matrix_elements_gpu(acc_complex_double_t* ptr__,
int ld__,
int nrow__,
int ncol__,
double beta__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
accLaunchKernel((scale_matrix_elements_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
ptr__,
ld__,
nrow__,
beta__
);
}
|
97160bbfdf5eb0fb9ede209c8ecef24c88c0f502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "IndiceTools_GPU.h"
#include "Device.h"
#include "Mandelbrot.h"
#include <assert.h>
#include "DomaineMath_GPU.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h,float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
// Inputs
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, int dt, uint n, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "mandelbrot_CUDA_rgba_uchar4",domaineMath), variateurAnimation(Interval<float>(5, 250), 1)
{
// Input
this->n = n;
// Tools
this->t = 0;
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Device::lastCudaError("Mandelbrot rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
t = variateurAnimation.get();
hipLaunchKernelGGL(( mandelbrot), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t,domaineMath);
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
Device::lastCudaError("mandelbrot rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
Device::synchronize();
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
this->t = variateurAnimation.varierAndGet(); // in [0,120]
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 97160bbfdf5eb0fb9ede209c8ecef24c88c0f502.cu | #include <iostream>
#include <assert.h>
#include "IndiceTools_GPU.h"
#include "Device.h"
#include "Mandelbrot.h"
#include <assert.h>
#include "DomaineMath_GPU.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h,float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
// Inputs
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, int dt, uint n, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "mandelbrot_CUDA_rgba_uchar4",domaineMath), variateurAnimation(Interval<float>(5, 250), 1)
{
// Input
this->n = n;
// Tools
this->t = 0;
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Device::lastCudaError("Mandelbrot rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
t = variateurAnimation.get();
mandelbrot<<<dg,db>>>(ptrDevPixels,w,h,t,domaineMath);
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
Device::lastCudaError("mandelbrot rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
Device::synchronize();
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
this->t = variateurAnimation.varierAndGet(); // in [0,120]
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
9593d610d1623aa8a5b33f5d68fb53294cb5ec37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
//#include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/funcattrib.hpp>
#include "device.hpp"
#include "estimate_combined.h"
//#include <boost/graph/buffer_concepts.hpp>
#define RANGA_MODIFICATION_DEPTHWEIGHT_CURRENT_FRAME 1
//#define RANGA_MODIFICATION_ORIENTATION 1
using namespace pcl::device;
namespace pcl
{
namespace device
{
namespace kinfuLS
{
typedef double float_type;
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduce(volatile T* buffer)
{
int tid = Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); }
if (tid < 32)
{
if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; }
}
}
struct Combined
{
enum
{
CTA_SIZE_X = ESTIMATE_COMBINED_CUDA_GRID_X,
CTA_SIZE_Y = ESTIMATE_COMBINED_CUDA_GRID_Y,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y
};
Mat33 Rcurr;
float3 tcurr;
PtrStep<float> vmap_curr;
PtrStep<float> nmap_curr;
Mat33 Rprev_inv;
float3 tprev;
Intr intr;
PtrStep<float> vmap_g_prev;
PtrStep<float> nmap_g_prev;
float distThres;
float angleThres;
int cols;
int rows;
mutable PtrStep<float_type> gbuf;
__device__ __forceinline__ bool
search (int x, int y, float3& n, float3& d, float3& s) const
{
float3 ncurr;
ncurr.x = nmap_curr.ptr (y)[x];
if (isnan (ncurr.x))
return (false);
float3 vcurr;
vcurr.x = vmap_curr.ptr (y )[x];
vcurr.y = vmap_curr.ptr (y + rows)[x];
vcurr.z = vmap_curr.ptr (y + 2 * rows)[x];
float3 vcurr_g = Rcurr * vcurr + tcurr;
float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space
int2 ukr; //projection
ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4
ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4
if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0)
return (false);
float3 nprev_g;
nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x];
if (isnan (nprev_g.x))
return (false);
float3 vprev_g;
vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x];
vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x];
vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x];
float dist = norm (vprev_g - vcurr_g);
if (dist > distThres)
return (false);
ncurr.y = nmap_curr.ptr (y + rows)[x];
ncurr.z = nmap_curr.ptr (y + 2 * rows)[x];
float3 ncurr_g = Rcurr * ncurr;
nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x];
nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x];
float sine = norm (cross (ncurr_g, nprev_g));
if (sine >= angleThres)
return (false);
n = nprev_g;
d = vprev_g;
s = vcurr_g;
return (true);
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
float3 n, d, s, temp_1, temp_2;
bool found_coresp = false;
float depth = 0.f, min_depth = 0.8, max_depth = 4, weight = 1, step = 1;
float new_min_depth = 0;
if (x < cols && y < rows)
found_coresp = search (x, y, n, d, s);
float row[7];
float mind = 0.8, maxd = 4;
if (found_coresp)
{
depth = vmap_curr.ptr (y + 2 * rows)[x];
if ( depth >= 0.8 && depth <= max_depth )
{
#if RANGA_MODIFICATION_DEPTHWEIGHT_CUTOFF
step = ((1/(min_depth*min_depth)) - (1/(max_depth*max_depth)));
weight = (((1/(depth*depth)) - (1/(max_depth*max_depth))) / step);
weight = fabs(sqrt(weight));
if(weight < 0.25)
weight = 0.25;
#elif RANGA_MODIFICATION_DEPTHWEIGHT_CURRENT_FRAME
int less_then_1500 = intr.number_less_than_1000 + intr.number_less_than_1500;
int less_then_2000 = less_then_1500 + intr.number_less_than_2000;
int less_then_2500 = less_then_2000 + intr.number_less_than_2500;
int less_then_3000 = less_then_2500 + intr.number_less_than_3000;
int less_then_3500 = less_then_3000 + intr.number_less_than_3500;
int less_then_4000 = less_then_3500 + intr.number_less_than_4000;
int disable_weights = 0;
if(intr.number_less_than_1000 > (640*480/5)) // && ((intr.depth_max - intr.depth_min) > 1000))
{
new_min_depth = 0.8; //0.5;
//if(intr.number_less_than_1000 > (640*480 * 3/5))
//disable_weights = 1;
}
else if( less_then_1500 > (640*480/5))
{
new_min_depth = 1.25;
}
else if( less_then_2000 > (640*480/5))
{
new_min_depth = 1.75;
}
else if( less_then_2500 > (640*480/5))
{
new_min_depth = 2.25;
}
else if( less_then_3000 > (640*480/5))
{
new_min_depth = 2.75;
}
else if( less_then_3500 > (640*480/5))
{
new_min_depth = 3.25;
}
else
{
new_min_depth = 3.25;
}
//if(depth < 0.8)
//depth = 0.8;
if(!disable_weights)
{
//if(intr.depth_min != 0)
//mind = ((float)intr.depth_min)/1000;
mind = new_min_depth;
//if(intr.depth_max != 0)
maxd = ((float)max_depth);
float temp_max_sqr = ((mind * mind * maxd * maxd * 15/16)/ (mind*mind - maxd*maxd/16));
step = ((1/(mind*mind)) - (1/(temp_max_sqr)));
weight = (((1/(depth*depth)) - (1/(temp_max_sqr))) / step);
//weight = weight * 64;
//weight = fabs(sqrt(weight));
}
else
// Not enough point near the camera to apply weighted ICP (i.e., without big error in measurements)
// Switch to un-weighted ICP
{
weight = 1;
}
#if RANGA_MODIFICATION_ORIENTATION
//if(intr.number_less_than > (640*480/5))//((intr.depth_max - intr.depth_min) > 500))
{
float3 rayvector;
rayvecto .x = x - intr.cx;
rayvector.y = y - intr.cy;
rayvector.z = (intr.fx + intr.fy)/2;
float norm_value = norm(rayvector);
float3 normalvector;
float weight1 = 0.0f;
normalvector.x = nmap_curr.ptr(y ) [x];
normalvector.y = nmap_curr.ptr(y + rows) [x];
normalvector.z = nmap_curr.ptr(y + 2 * rows) [x];
float norm_value1 = norm(normalvector);
weight1 = abs(dot(rayvector, normalvector))/(norm_value * norm_value1);
if(weight1 > 0.6 && weight1 <= 1.0)
{
weight1 = (weight1 - 0.5)/ 0.5;
}
else if(weight1 > 1)
{
// This should not be reached
weight1 = 0;
}
else
weight1 = 1;
weight = weight * weight1;
//weight = fabs(sqrt(weight));
}
#endif
//weight = weight * 4;
weight = fabs(sqrt(weight));
//if(weight < 0.25)
//weight = 0.25;
#else
step = ((1/(min_depth)) - (1/(max_depth)));
weight = (((1/(depth)) - (1/(max_depth))) / step);
weight = fabs(sqrt(weight));
#endif
}
else if(depth > max_depth) // || depth < min_depth) // Minimum depth is removed as I found a case where in minimum depth is less than 0.4 m
// 0.8 is the minimum valid value for the kinect V1 sensor in default mode
// 4 is the maximum valid value for kinect V1 sensor
// http://msdn.microsoft.com/en-us/library/hh973078.aspx
{
weight = 0;
}
else
{
// As it should be square root of the actual weight
weight = 1; //8;
}
temp_1 = cross (s, n);
temp_2 = n;
temp_1.x = temp_1.x * weight ;
temp_1.y = temp_1.y * weight ;
temp_1.z = temp_1.z * weight ;
temp_2.x = n.x * weight;
temp_2.y = n.y * weight;
temp_2.z = n.z * weight;
#if 0
*(float3*)&row[0] = temp_1;
*(float3*)&row[3] = temp_2;
row[6] = weight * dot (n, d - s);
#else
*(float3*)&row[0] = cross (s, n);
*(float3*)&row[3] = n;
row[6] = dot (n, d - s);
#endif
}
else
row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f;
__shared__ float_type smem[CTA_SIZE];
int tid = Block::flattenedThreadId ();
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
#pragma unroll
for (int j = i; j < 7; ++j) // cols + b
{
__syncthreads ();
smem[tid] = row[i] * row[j];
__syncthreads ();
reduce<CTA_SIZE>(smem);
if (tid == 0)
gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0];
}
}
}
};
__global__ void
combinedKernel (const Combined cs)
{
cs ();
}
struct TranformReduction
{
enum
{
CTA_SIZE = 512,
STRIDE = CTA_SIZE,
B = 6, COLS = 6, ROWS = 6, DIAG = 6,
UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG,
TOTAL = UPPER_DIAG_MAT + B,
GRID_X = TOTAL
};
PtrStep<float_type> gbuf;
int length;
mutable float_type* output;
__device__ __forceinline__ void
operator () () const
{
const float_type *beg = gbuf.ptr (blockIdx.x);
const float_type *end = beg + length;
int tid = threadIdx.x;
float_type sum = 0.f;
for (const float_type *t = beg + tid; t < end; t += STRIDE)
sum += *t;
__shared__ float_type smem[CTA_SIZE];
smem[tid] = sum;
__syncthreads ();
reduce<CTA_SIZE>(smem);
if (tid == 0)
output[blockIdx.x] = smem[0];
}
};
__global__ void
TransformEstimatorKernel2 (const TranformReduction tr)
{
tr ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
estimateCombined (const Mat33& Rcurr, const float3& tcurr,
const MapArr& vmap_curr, const MapArr& nmap_curr,
const Mat33& Rprev_inv, const float3& tprev, const Intr& intr,
const MapArr& vmap_g_prev, const MapArr& nmap_g_prev,
float distThres, float angleThres,
DeviceArray2D<float_type>& gbuf, DeviceArray<float_type>& mbuf,
float_type* matrixA_host, float_type* vectorB_host)
{
int cols = vmap_curr.cols ();
int rows = vmap_curr.rows () / 3;
Combined cs;
cs.Rcurr = Rcurr;
cs.tcurr = tcurr;
cs.vmap_curr = vmap_curr;
cs.nmap_curr = nmap_curr;
cs.Rprev_inv = Rprev_inv;
cs.tprev = tprev;
cs.intr = intr;
cs.vmap_g_prev = vmap_g_prev;
cs.nmap_g_prev = nmap_g_prev;
cs.distThres = distThres;
cs.angleThres = angleThres;
cs.cols = cols;
cs.rows = rows;
//////////////////////////////
dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y);
dim3 grid (1, 1, 1);
grid.x = divUp (cols, block.x);
grid.y = divUp (rows, block.y);
mbuf.create (TranformReduction::TOTAL);
if (gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y))
gbuf.create (TranformReduction::TOTAL, grid.x * grid.y);
cs.gbuf = gbuf;
// Temporary: This has to be converted to CUDA code
// Find Min and Max of depth value for each frame
// Find the number of values that are less than 1.5 meters.
hipLaunchKernelGGL(( combinedKernel), dim3(grid), dim3(block), 0, 0, cs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall(hipDeviceSynchronize());
//printFuncAttrib(combinedKernel);
TranformReduction tr;
tr.gbuf = gbuf;
tr.length = grid.x * grid.y;
tr.output = mbuf;
hipLaunchKernelGGL(( TransformEstimatorKernel2), dim3(TranformReduction::TOTAL), dim3(TranformReduction::CTA_SIZE), 0, 0, tr);
cudaSafeCall (hipGetLastError ());
cudaSafeCall (hipDeviceSynchronize ());
float_type host_data[TranformReduction::TOTAL];
mbuf.download (host_data);
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
for (int j = i; j < 7; ++j) // cols + b
{
float_type value = host_data[shift++];
if (j == 6) // vector b
vectorB_host[i] = value;
else
matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value;
}
}
}
}
}
| 9593d610d1623aa8a5b33f5d68fb53294cb5ec37.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
//#include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/funcattrib.hpp>
#include "device.hpp"
#include "estimate_combined.h"
//#include <boost/graph/buffer_concepts.hpp>
#define RANGA_MODIFICATION_DEPTHWEIGHT_CURRENT_FRAME 1
//#define RANGA_MODIFICATION_ORIENTATION 1
using namespace pcl::device;
namespace pcl
{
namespace device
{
namespace kinfuLS
{
typedef double float_type;
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduce(volatile T* buffer)
{
int tid = Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); }
if (tid < 32)
{
if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; }
}
}
struct Combined
{
enum
{
CTA_SIZE_X = ESTIMATE_COMBINED_CUDA_GRID_X,
CTA_SIZE_Y = ESTIMATE_COMBINED_CUDA_GRID_Y,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y
};
Mat33 Rcurr;
float3 tcurr;
PtrStep<float> vmap_curr;
PtrStep<float> nmap_curr;
Mat33 Rprev_inv;
float3 tprev;
Intr intr;
PtrStep<float> vmap_g_prev;
PtrStep<float> nmap_g_prev;
float distThres;
float angleThres;
int cols;
int rows;
mutable PtrStep<float_type> gbuf;
__device__ __forceinline__ bool
search (int x, int y, float3& n, float3& d, float3& s) const
{
float3 ncurr;
ncurr.x = nmap_curr.ptr (y)[x];
if (isnan (ncurr.x))
return (false);
float3 vcurr;
vcurr.x = vmap_curr.ptr (y )[x];
vcurr.y = vmap_curr.ptr (y + rows)[x];
vcurr.z = vmap_curr.ptr (y + 2 * rows)[x];
float3 vcurr_g = Rcurr * vcurr + tcurr;
float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space
int2 ukr; //projection
ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4
ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4
if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0)
return (false);
float3 nprev_g;
nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x];
if (isnan (nprev_g.x))
return (false);
float3 vprev_g;
vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x];
vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x];
vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x];
float dist = norm (vprev_g - vcurr_g);
if (dist > distThres)
return (false);
ncurr.y = nmap_curr.ptr (y + rows)[x];
ncurr.z = nmap_curr.ptr (y + 2 * rows)[x];
float3 ncurr_g = Rcurr * ncurr;
nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x];
nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x];
float sine = norm (cross (ncurr_g, nprev_g));
if (sine >= angleThres)
return (false);
n = nprev_g;
d = vprev_g;
s = vcurr_g;
return (true);
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
float3 n, d, s, temp_1, temp_2;
bool found_coresp = false;
float depth = 0.f, min_depth = 0.8, max_depth = 4, weight = 1, step = 1;
float new_min_depth = 0;
if (x < cols && y < rows)
found_coresp = search (x, y, n, d, s);
float row[7];
float mind = 0.8, maxd = 4;
if (found_coresp)
{
depth = vmap_curr.ptr (y + 2 * rows)[x];
if ( depth >= 0.8 && depth <= max_depth )
{
#if RANGA_MODIFICATION_DEPTHWEIGHT_CUTOFF
step = ((1/(min_depth*min_depth)) - (1/(max_depth*max_depth)));
weight = (((1/(depth*depth)) - (1/(max_depth*max_depth))) / step);
weight = fabs(sqrt(weight));
if(weight < 0.25)
weight = 0.25;
#elif RANGA_MODIFICATION_DEPTHWEIGHT_CURRENT_FRAME
int less_then_1500 = intr.number_less_than_1000 + intr.number_less_than_1500;
int less_then_2000 = less_then_1500 + intr.number_less_than_2000;
int less_then_2500 = less_then_2000 + intr.number_less_than_2500;
int less_then_3000 = less_then_2500 + intr.number_less_than_3000;
int less_then_3500 = less_then_3000 + intr.number_less_than_3500;
int less_then_4000 = less_then_3500 + intr.number_less_than_4000;
int disable_weights = 0;
if(intr.number_less_than_1000 > (640*480/5)) // && ((intr.depth_max - intr.depth_min) > 1000))
{
new_min_depth = 0.8; //0.5;
//if(intr.number_less_than_1000 > (640*480 * 3/5))
//disable_weights = 1;
}
else if( less_then_1500 > (640*480/5))
{
new_min_depth = 1.25;
}
else if( less_then_2000 > (640*480/5))
{
new_min_depth = 1.75;
}
else if( less_then_2500 > (640*480/5))
{
new_min_depth = 2.25;
}
else if( less_then_3000 > (640*480/5))
{
new_min_depth = 2.75;
}
else if( less_then_3500 > (640*480/5))
{
new_min_depth = 3.25;
}
else
{
new_min_depth = 3.25;
}
//if(depth < 0.8)
//depth = 0.8;
if(!disable_weights)
{
//if(intr.depth_min != 0)
//mind = ((float)intr.depth_min)/1000;
mind = new_min_depth;
//if(intr.depth_max != 0)
maxd = ((float)max_depth);
float temp_max_sqr = ((mind * mind * maxd * maxd * 15/16)/ (mind*mind - maxd*maxd/16));
step = ((1/(mind*mind)) - (1/(temp_max_sqr)));
weight = (((1/(depth*depth)) - (1/(temp_max_sqr))) / step);
//weight = weight * 64;
//weight = fabs(sqrt(weight));
}
else
// Not enough point near the camera to apply weighted ICP (i.e., without big error in measurements)
// Switch to un-weighted ICP
{
weight = 1;
}
#if RANGA_MODIFICATION_ORIENTATION
//if(intr.number_less_than > (640*480/5))//((intr.depth_max - intr.depth_min) > 500))
{
float3 rayvector;
rayvecto .x = x - intr.cx;
rayvector.y = y - intr.cy;
rayvector.z = (intr.fx + intr.fy)/2;
float norm_value = norm(rayvector);
float3 normalvector;
float weight1 = 0.0f;
normalvector.x = nmap_curr.ptr(y ) [x];
normalvector.y = nmap_curr.ptr(y + rows) [x];
normalvector.z = nmap_curr.ptr(y + 2 * rows) [x];
float norm_value1 = norm(normalvector);
weight1 = abs(dot(rayvector, normalvector))/(norm_value * norm_value1);
if(weight1 > 0.6 && weight1 <= 1.0)
{
weight1 = (weight1 - 0.5)/ 0.5;
}
else if(weight1 > 1)
{
// This should not be reached
weight1 = 0;
}
else
weight1 = 1;
weight = weight * weight1;
//weight = fabs(sqrt(weight));
}
#endif
//weight = weight * 4;
weight = fabs(sqrt(weight));
//if(weight < 0.25)
//weight = 0.25;
#else
step = ((1/(min_depth)) - (1/(max_depth)));
weight = (((1/(depth)) - (1/(max_depth))) / step);
weight = fabs(sqrt(weight));
#endif
}
else if(depth > max_depth) // || depth < min_depth) // Minimum depth is removed as I found a case where in minimum depth is less than 0.4 m
// 0.8 is the minimum valid value for the kinect V1 sensor in default mode
// 4 is the maximum valid value for kinect V1 sensor
// http://msdn.microsoft.com/en-us/library/hh973078.aspx
{
weight = 0;
}
else
{
// As it should be square root of the actual weight
weight = 1; //8;
}
temp_1 = cross (s, n);
temp_2 = n;
temp_1.x = temp_1.x * weight ;
temp_1.y = temp_1.y * weight ;
temp_1.z = temp_1.z * weight ;
temp_2.x = n.x * weight;
temp_2.y = n.y * weight;
temp_2.z = n.z * weight;
#if 0
*(float3*)&row[0] = temp_1;
*(float3*)&row[3] = temp_2;
row[6] = weight * dot (n, d - s);
#else
*(float3*)&row[0] = cross (s, n);
*(float3*)&row[3] = n;
row[6] = dot (n, d - s);
#endif
}
else
row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f;
__shared__ float_type smem[CTA_SIZE];
int tid = Block::flattenedThreadId ();
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
#pragma unroll
for (int j = i; j < 7; ++j) // cols + b
{
__syncthreads ();
smem[tid] = row[i] * row[j];
__syncthreads ();
reduce<CTA_SIZE>(smem);
if (tid == 0)
gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0];
}
}
}
};
__global__ void
combinedKernel (const Combined cs)
{
cs ();
}
struct TranformReduction
{
enum
{
CTA_SIZE = 512,
STRIDE = CTA_SIZE,
B = 6, COLS = 6, ROWS = 6, DIAG = 6,
UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG,
TOTAL = UPPER_DIAG_MAT + B,
GRID_X = TOTAL
};
PtrStep<float_type> gbuf;
int length;
mutable float_type* output;
__device__ __forceinline__ void
operator () () const
{
const float_type *beg = gbuf.ptr (blockIdx.x);
const float_type *end = beg + length;
int tid = threadIdx.x;
float_type sum = 0.f;
for (const float_type *t = beg + tid; t < end; t += STRIDE)
sum += *t;
__shared__ float_type smem[CTA_SIZE];
smem[tid] = sum;
__syncthreads ();
reduce<CTA_SIZE>(smem);
if (tid == 0)
output[blockIdx.x] = smem[0];
}
};
__global__ void
TransformEstimatorKernel2 (const TranformReduction tr)
{
tr ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
estimateCombined (const Mat33& Rcurr, const float3& tcurr,
const MapArr& vmap_curr, const MapArr& nmap_curr,
const Mat33& Rprev_inv, const float3& tprev, const Intr& intr,
const MapArr& vmap_g_prev, const MapArr& nmap_g_prev,
float distThres, float angleThres,
DeviceArray2D<float_type>& gbuf, DeviceArray<float_type>& mbuf,
float_type* matrixA_host, float_type* vectorB_host)
{
int cols = vmap_curr.cols ();
int rows = vmap_curr.rows () / 3;
Combined cs;
cs.Rcurr = Rcurr;
cs.tcurr = tcurr;
cs.vmap_curr = vmap_curr;
cs.nmap_curr = nmap_curr;
cs.Rprev_inv = Rprev_inv;
cs.tprev = tprev;
cs.intr = intr;
cs.vmap_g_prev = vmap_g_prev;
cs.nmap_g_prev = nmap_g_prev;
cs.distThres = distThres;
cs.angleThres = angleThres;
cs.cols = cols;
cs.rows = rows;
//////////////////////////////
dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y);
dim3 grid (1, 1, 1);
grid.x = divUp (cols, block.x);
grid.y = divUp (rows, block.y);
mbuf.create (TranformReduction::TOTAL);
if (gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y))
gbuf.create (TranformReduction::TOTAL, grid.x * grid.y);
cs.gbuf = gbuf;
// Temporary: This has to be converted to CUDA code
// Find Min and Max of depth value for each frame
// Find the number of values that are less than 1.5 meters.
combinedKernel<<<grid, block>>>(cs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall(cudaDeviceSynchronize());
//printFuncAttrib(combinedKernel);
TranformReduction tr;
tr.gbuf = gbuf;
tr.length = grid.x * grid.y;
tr.output = mbuf;
TransformEstimatorKernel2<<<TranformReduction::TOTAL, TranformReduction::CTA_SIZE>>>(tr);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall (cudaDeviceSynchronize ());
float_type host_data[TranformReduction::TOTAL];
mbuf.download (host_data);
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
for (int j = i; j < 7; ++j) // cols + b
{
float_type value = host_data[shift++];
if (j == 6) // vector b
vectorB_host[i] = value;
else
matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value;
}
}
}
}
}
|
1a15319501213122067cbe54beeda19e3a08cca7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../Public/HistGPU.h"
HistGPU::HistGPU(int* inputArray_in, int inputArraySize_in) :
inputArray(inputArray_in), inputArraySize(inputArraySize_in)
{
HistogramGPU = new int[256]();
if( !inputArraySize_in || 0 == inputArraySize_in || !HistogramGPU)
throw std::invalid_argument("HistCPU class: Received invalid argument in constructor.");
}
HistGPU::~HistGPU() { }
/* ----------------------------------------------------------
* Function name: RunSingleTest_GPU
* Parameters: None
* Used to: Compute histogram with GPU strictly by adding every pixel value occurrence of input image to 256's histogram array.
* Return: None. Updating values of TotalComputeTime.
*/
void HistGPU::RunSingleTest_GPU(int blocks)
{
int* dev_inputArray = nullptr;
int* dev_Histogram = nullptr;
hipError_t cudaStatus;
hipEventRecord(beforeAlloc);
//Allocate space on GPU.
cudaStatus = hipMalloc((void**)&dev_inputArray, inputArraySize * sizeof(int));
if (cudaStatus != hipSuccess)
{
printf("hipMalloc() fail! Can not allocate memory on GPU.\n");
throw(cudaStatus);
}
cudaStatus = hipMalloc((void**)&dev_Histogram, 256 * sizeof(int));
if (cudaStatus != hipSuccess)
{
printf("hipMalloc() fail! Can not allocate memory on GPU.\n");
throw(cudaStatus);
}
// Initialize device Histogram with 0
cudaStatus = hipMemset(dev_Histogram, 0, 256 * sizeof(int));
if (cudaStatus != hipSuccess)
{
printf("hipMemset() fail! Can not set memory on GPU.\n");
throw(cudaStatus);
}
// Copy input to previously allocated memory on GPU.
cudaStatus = hipMemcpy(dev_inputArray, inputArray, inputArraySize * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy() fail! Can not copy data to GPU device.\n");
throw(cudaStatus);
}
hipEventRecord(beforeCompute);
//Launch kernel. ==============================================================================
GPU_Histogram_Kernel << <blocks*16, 256 >> > (dev_inputArray, inputArraySize, dev_Histogram);
hipEventRecord(afterCompute);
hipEventSynchronize(afterCompute);
// Check for kernel errors.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
printf("GPU_Histogram() kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// Wait for kernel to finish work, and check for any errors during kernel work.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize() returned error code %d after launching!\n", cudaStatus);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(HistogramGPU, dev_Histogram, 256 * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy() device to host failed!");
}
hipEventRecord(afterAlloc);
hipEventSynchronize(afterAlloc);
float withAllocation = 0;
float woAllocation = 0;
hipEventElapsedTime(&withAllocation, beforeAlloc, afterAlloc);
hipEventElapsedTime(&woAllocation, beforeCompute, afterCompute);
totalMiliseconds_withAllocation += withAllocation;
totalMiliseconds_woAllocation += woAllocation;
hipFree(dev_inputArray);
hipFree(dev_Histogram);
}
/* ----------------------------------------------------------
* Function name: Test_GPU
* Parameters: unsigned int NumberOfExec - How many times did GPU will be tested.
* Used to: Run GPU test exactly number of times and compute mean execution time.
* Return: None. Update vaues of mean cumpute time.
*/
void HistGPU::Test_GPU(unsigned int NumberOfExec)
{
hipError_t cudaStatus;
//Assume, we will use first GPU device.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
printf("hipSetDevice() fail! Do you have CUDA available device?\n");
throw(cudaStatus);
}
// Cuda events used to measure execution time.
CreateTimeEvents();
//Check available number of multiprocessors on GPU device- it will be used in kernel function.
hipDeviceProp_t properties;
cudaStatus = hipGetDeviceProperties(&properties, 0);
if (cudaStatus != hipSuccess)
{
printf("hipGetDeviceProperties() fail.");
throw(cudaStatus);
}
for (int TryNumber = 0; TryNumber < NumberOfExec; TryNumber++)
{
RunSingleTest_GPU( properties.multiProcessorCount );
}
hipEventDestroy(beforeAlloc);
hipEventDestroy(afterAlloc);
hipEventDestroy(beforeCompute);
hipEventDestroy(afterCompute);
ComputeMeanTimes(NumberOfExec);
}
/* ----------------------------------------------------------
* Function name: CreateTimeEvents
* Parameters: None.
* Used to: Create events used to measure compute time.
* Return: None. Events are created.
*/
void HistGPU::CreateTimeEvents()
{
hipError_t cudaStatus;
hipEvent_t* Event[4] = { &beforeAlloc, &beforeCompute, &afterAlloc, &afterCompute };
for (int i = 0; i < 4; i++)
{
cudaStatus = hipEventCreate(Event[i]);
if (cudaStatus != hipSuccess) {
printf("hipEventCreate() fail! Can not create beforeAlloc event to measure execution time.\n");
throw(cudaStatus);
}
}
}
/* ----------------------------------------------------------
* Function name: ComputeMeanTimes
* Parameters: unsigned int NumberOfExec - number of cycles, GPU was tested.
* Used to: Determine mean value of computing time.
* Return: None. Public values are updated.
*/
void HistGPU::ComputeMeanTimes(unsigned int NumberOfExec)
{
msWithAlloc = totalMiliseconds_withAllocation / NumberOfExec;
msWithoutAlloc = totalMiliseconds_woAllocation / NumberOfExec;
}
/* ----------------------------------------------------------
* Function name: PrintGPUInfo
* Parameters: None.
* Used to: Print to stdout information about GPU device.
* Return: None.
*/
void HistGPU::PrintGPUInfo()
{
hipDeviceProp_t inf;
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
hipError_t cudaStatus = hipGetDeviceProperties(&inf, 0);
if (cudaStatus != hipSuccess)
{
printf("hipGetDeviceProperties() fail.");
throw(cudaStatus);
}
SetConsoleTextAttribute(hConsole, 11);
printf("************************** GPU Info ****************************\n");
SetConsoleTextAttribute(hConsole, 7);
printf("GPU Device Name: \t\t%s\n", inf.name);
printf("Number of Muliprocessors:\t%d\n", inf.multiProcessorCount);
printf("Clock rate:\t\t\t%f [GHz]\n", inf.clockRate/1000000.f);
printf("Major compute capability:\t\t%d\n", inf.major);
printf("Max size of each dimension block:\t%d, %d, %d\n", inf.maxThreadsDim[0], inf.maxThreadsDim[1], inf.maxThreadsDim[2]);
printf("Max number of threads per block:\t%d\n", inf.maxThreadsPerBlock);
SetConsoleTextAttribute(hConsole, 11);
printf("*******************************************************************\n");
SetConsoleTextAttribute(hConsole, 7);
}
/* ----------------------------------------------------------
* Function name: PrintMeanComputeTime
* Parameters: None.
* Used to: Print out computed values.
* Return: None.
*/
void HistGPU::PrintMeanComputeTime()
{
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
if (msWithAlloc == 0 || msWithoutAlloc == 0)
{
printf("GPU mean compute time is 0. Something happen wrong. Did you choose valid image?\n");
hipError_t exception = hipError_t::hipErrorInvalidValue;
throw exception;
}
printf("Mean histogram computing time on GPU:\n");
printf(" - with memory allocation: ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[ms]", msWithAlloc);
SetConsoleTextAttribute(hConsole, 7); printf(", which is about ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[s]\n",(msWithAlloc / 1000.f));
SetConsoleTextAttribute(hConsole, 7); printf(" - without memory allocation: ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[ms]", msWithoutAlloc);
SetConsoleTextAttribute(hConsole, 7); printf(", which is about ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[s]\n\n", (msWithoutAlloc / 1000.f));
SetConsoleTextAttribute(hConsole, 7);
}
/* ----------------------------------------------------------
* Function name: GPU_Histogram_Kernel
* Parameters: int* inputArray - Pointer to input array of pixel values.
int inputArraySize - Size of input array.
int* HistogramGPU - Pointer to array storing computed values.
* Used to: Compute histogram with GPU. Main GPU function. Multithread function.
* Return: None. Histogram on GPU is computed.
*/
__global__ void GPU_Histogram_Kernel(int* inputArray, int inputArraySize, int* HistogramGPU)
{
//Create and set to 0 local memory for single block.
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < inputArraySize)
{
atomicAdd(&temp[inputArray[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd(&(HistogramGPU[threadIdx.x]), temp[threadIdx.x] );
} | 1a15319501213122067cbe54beeda19e3a08cca7.cu | #include "../Public/HistGPU.h"
HistGPU::HistGPU(int* inputArray_in, int inputArraySize_in) :
inputArray(inputArray_in), inputArraySize(inputArraySize_in)
{
HistogramGPU = new int[256]();
if( !inputArraySize_in || 0 == inputArraySize_in || !HistogramGPU)
throw std::invalid_argument("HistCPU class: Received invalid argument in constructor.");
}
HistGPU::~HistGPU() { }
/* ----------------------------------------------------------
* Function name: RunSingleTest_GPU
* Parameters: None
* Used to: Compute histogram with GPU strictly by adding every pixel value occurrence of input image to 256's histogram array.
* Return: None. Updating values of TotalComputeTime.
*/
void HistGPU::RunSingleTest_GPU(int blocks)
{
int* dev_inputArray = nullptr;
int* dev_Histogram = nullptr;
cudaError_t cudaStatus;
cudaEventRecord(beforeAlloc);
//Allocate space on GPU.
cudaStatus = cudaMalloc((void**)&dev_inputArray, inputArraySize * sizeof(int));
if (cudaStatus != cudaSuccess)
{
printf("cudaMalloc() fail! Can not allocate memory on GPU.\n");
throw(cudaStatus);
}
cudaStatus = cudaMalloc((void**)&dev_Histogram, 256 * sizeof(int));
if (cudaStatus != cudaSuccess)
{
printf("cudaMalloc() fail! Can not allocate memory on GPU.\n");
throw(cudaStatus);
}
// Initialize device Histogram with 0
cudaStatus = cudaMemset(dev_Histogram, 0, 256 * sizeof(int));
if (cudaStatus != cudaSuccess)
{
printf("cudaMemset() fail! Can not set memory on GPU.\n");
throw(cudaStatus);
}
// Copy input to previously allocated memory on GPU.
cudaStatus = cudaMemcpy(dev_inputArray, inputArray, inputArraySize * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy() fail! Can not copy data to GPU device.\n");
throw(cudaStatus);
}
cudaEventRecord(beforeCompute);
//Launch kernel. ==============================================================================
GPU_Histogram_Kernel << <blocks*16, 256 >> > (dev_inputArray, inputArraySize, dev_Histogram);
cudaEventRecord(afterCompute);
cudaEventSynchronize(afterCompute);
// Check for kernel errors.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("GPU_Histogram() kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// Wait for kernel to finish work, and check for any errors during kernel work.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize() returned error code %d after launching!\n", cudaStatus);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(HistogramGPU, dev_Histogram, 256 * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy() device to host failed!");
}
cudaEventRecord(afterAlloc);
cudaEventSynchronize(afterAlloc);
float withAllocation = 0;
float woAllocation = 0;
cudaEventElapsedTime(&withAllocation, beforeAlloc, afterAlloc);
cudaEventElapsedTime(&woAllocation, beforeCompute, afterCompute);
totalMiliseconds_withAllocation += withAllocation;
totalMiliseconds_woAllocation += woAllocation;
cudaFree(dev_inputArray);
cudaFree(dev_Histogram);
}
/* ----------------------------------------------------------
* Function name: Test_GPU
* Parameters: unsigned int NumberOfExec - How many times did GPU will be tested.
* Used to: Run GPU test exactly number of times and compute mean execution time.
* Return: None. Update vaues of mean cumpute time.
*/
void HistGPU::Test_GPU(unsigned int NumberOfExec)
{
cudaError_t cudaStatus;
//Assume, we will use first GPU device.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
printf("cudaSetDevice() fail! Do you have CUDA available device?\n");
throw(cudaStatus);
}
// Cuda events used to measure execution time.
CreateTimeEvents();
//Check available number of multiprocessors on GPU device- it will be used in kernel function.
cudaDeviceProp properties;
cudaStatus = cudaGetDeviceProperties(&properties, 0);
if (cudaStatus != cudaSuccess)
{
printf("cudaGetDeviceProperties() fail.");
throw(cudaStatus);
}
for (int TryNumber = 0; TryNumber < NumberOfExec; TryNumber++)
{
RunSingleTest_GPU( properties.multiProcessorCount );
}
cudaEventDestroy(beforeAlloc);
cudaEventDestroy(afterAlloc);
cudaEventDestroy(beforeCompute);
cudaEventDestroy(afterCompute);
ComputeMeanTimes(NumberOfExec);
}
/* ----------------------------------------------------------
* Function name: CreateTimeEvents
* Parameters: None.
* Used to: Create events used to measure compute time.
* Return: None. Events are created.
*/
void HistGPU::CreateTimeEvents()
{
cudaError_t cudaStatus;
cudaEvent_t* Event[4] = { &beforeAlloc, &beforeCompute, &afterAlloc, &afterCompute };
for (int i = 0; i < 4; i++)
{
cudaStatus = cudaEventCreate(Event[i]);
if (cudaStatus != cudaSuccess) {
printf("cudaEventCreate() fail! Can not create beforeAlloc event to measure execution time.\n");
throw(cudaStatus);
}
}
}
/* ----------------------------------------------------------
* Function name: ComputeMeanTimes
* Parameters: unsigned int NumberOfExec - number of cycles, GPU was tested.
* Used to: Determine mean value of computing time.
* Return: None. Public values are updated.
*/
void HistGPU::ComputeMeanTimes(unsigned int NumberOfExec)
{
msWithAlloc = totalMiliseconds_withAllocation / NumberOfExec;
msWithoutAlloc = totalMiliseconds_woAllocation / NumberOfExec;
}
/* ----------------------------------------------------------
* Function name: PrintGPUInfo
* Parameters: None.
* Used to: Print to stdout information about GPU device.
* Return: None.
*/
void HistGPU::PrintGPUInfo()
{
cudaDeviceProp inf;
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
cudaError_t cudaStatus = cudaGetDeviceProperties(&inf, 0);
if (cudaStatus != cudaSuccess)
{
printf("cudaGetDeviceProperties() fail.");
throw(cudaStatus);
}
SetConsoleTextAttribute(hConsole, 11);
printf("************************** GPU Info ****************************\n");
SetConsoleTextAttribute(hConsole, 7);
printf("GPU Device Name: \t\t%s\n", inf.name);
printf("Number of Muliprocessors:\t%d\n", inf.multiProcessorCount);
printf("Clock rate:\t\t\t%f [GHz]\n", inf.clockRate/1000000.f);
printf("Major compute capability:\t\t%d\n", inf.major);
printf("Max size of each dimension block:\t%d, %d, %d\n", inf.maxThreadsDim[0], inf.maxThreadsDim[1], inf.maxThreadsDim[2]);
printf("Max number of threads per block:\t%d\n", inf.maxThreadsPerBlock);
SetConsoleTextAttribute(hConsole, 11);
printf("*******************************************************************\n");
SetConsoleTextAttribute(hConsole, 7);
}
/* ----------------------------------------------------------
* Function name: PrintMeanComputeTime
* Parameters: None.
* Used to: Print out computed values.
* Return: None.
*/
void HistGPU::PrintMeanComputeTime()
{
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
if (msWithAlloc == 0 || msWithoutAlloc == 0)
{
printf("GPU mean compute time is 0. Something happen wrong. Did you choose valid image?\n");
cudaError_t exception = cudaError_t::cudaErrorInvalidValue;
throw exception;
}
printf("Mean histogram computing time on GPU:\n");
printf(" - with memory allocation: ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[ms]", msWithAlloc);
SetConsoleTextAttribute(hConsole, 7); printf(", which is about ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[s]\n",(msWithAlloc / 1000.f));
SetConsoleTextAttribute(hConsole, 7); printf(" - without memory allocation: ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[ms]", msWithoutAlloc);
SetConsoleTextAttribute(hConsole, 7); printf(", which is about ");
SetConsoleTextAttribute(hConsole, 10); printf("%f[s]\n\n", (msWithoutAlloc / 1000.f));
SetConsoleTextAttribute(hConsole, 7);
}
/* ----------------------------------------------------------
* Function name: GPU_Histogram_Kernel
* Parameters: int* inputArray - Pointer to input array of pixel values.
int inputArraySize - Size of input array.
int* HistogramGPU - Pointer to array storing computed values.
* Used to: Compute histogram with GPU. Main GPU function. Multithread function.
* Return: None. Histogram on GPU is computed.
*/
__global__ void GPU_Histogram_Kernel(int* inputArray, int inputArraySize, int* HistogramGPU)
{
//Create and set to 0 local memory for single block.
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < inputArraySize)
{
atomicAdd(&temp[inputArray[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd(&(HistogramGPU[threadIdx.x]), temp[threadIdx.x] );
} |
166d46297c03b6bab8c9a9007d46179095753be1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <ReaDDyGPU.hpp>
# include <stdlib.h>
# include <stdio.h>
# include <time.h>
# include <math.h>
# include <vector>
# include <hip/hip_runtime.h>
# include <hiprand/hiprand.h>
# include <hip/hip_runtime.h>
# include <hiprand/hiprand_kernel.h>
# include <sm_11_atomic_functions.h>
#include <initializer_list>
/// //////////////////////////////////////////////////////////////////////////////////////////
/// /
/// TODO:
/// - data structures
/// - dynamic arrays (amortized linear runtime) +sort?
/// - periodic boundaries
/// - lattice + neighbor lists
/// - usage of shared mem
/// /
/// //////////////////////////////////////////////////////////////////////////////////////////
__global__ void update(double* cudaCoords, double* cudaForces, int* cudaTypes, double* cudaD, int * cudaNeighborList, int * cudaNeighborListBegins, double * cudaBoxSize, int * cudaSemaphore, hiprandState_t* globalRandStates, double dt, int numberParticles, double KB, double T, double maxCutoff, int * latticeSize);
__global__ void orderOne(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, hiprandState_t* globalRandStates, int numberParticles, int maxCutoff, int * cudaOrderOnePotentialsMatrix, CudaOrderOnePotential * cudaCudaOrderOnePotentials, int numberOfOrderOnePotentials, int numberOfParticleTypes, double * cudaCollisionRadiiMatrix);
__global__ void orderTwo(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, hiprandState_t* globalRandStates, int numberParticles, int maxCutoff);
__global__ void setup_kernel ( hiprandState_t * state, unsigned long seed, int n );
CudaSimulation::CudaSimulation(Simulation* simulation){
this->simulation = simulation;
}
CudaOrderOnePotential toCudaOrderOnePotential(OrderOnePotential* orderOnePotential){
CudaOrderOnePotential cudaOrderOnePotential = CudaOrderOnePotential();
cudaOrderOnePotential.subtype=orderOnePotential->subtypeID;
if(orderOnePotential->type.compare("DISK")==0){
DiskPotential * diskPotential = reinterpret_cast<DiskPotential*>(orderOnePotential);
cudaOrderOnePotential.type=1;
cudaOrderOnePotential.forceConst=diskPotential->forceConst;
std::copy ( diskPotential->center, diskPotential->center+3, cudaOrderOnePotential.origin );
//cudaOrderOnePotential.origin=diskPotential->center;
std::copy ( diskPotential->normal, diskPotential->normal+3, cudaOrderOnePotential.normal );
//cudaOrderOnePotential.normal=diskPotential->normal;
cudaOrderOnePotential.radius=diskPotential->radius;
}
else if(orderOnePotential->type.compare("CYLINDER")==0){
CylinderPotential * cylinderPotential = reinterpret_cast<CylinderPotential*>(orderOnePotential);
cudaOrderOnePotential.type=2;
cudaOrderOnePotential.forceConst=cylinderPotential->forceConst;
std::copy ( cylinderPotential->center, cylinderPotential->center+3, cudaOrderOnePotential.origin );
//cudaOrderOnePotential.origin=cylinderPotential->center;
std::copy ( cylinderPotential->normal, cylinderPotential->normal+3, cudaOrderOnePotential.normal );
//cudaOrderOnePotential.normal=cylinderPotential->normal;
cudaOrderOnePotential.radius=cylinderPotential->radius;
cudaOrderOnePotential.height=cylinderPotential->height;
}
return cudaOrderOnePotential;
}
int CudaSimulation::initialize(){
int numberOfCudaDevices = 0;
hipGetDeviceCount(&numberOfCudaDevices);
if(numberOfCudaDevices==0){
cout << "no cuda device availible" << endl;
return 1;
}
if(simulation->testmode)
cout << endl << endl << numberOfCudaDevices << " cuda devices found" << endl << endl;
for(int i=0; i<numberOfCudaDevices; ++i){
hipSetDevice(i);
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
if(simulation->testmode){
cout << prop.name << endl;
cout << "compute capability: " << prop.major << "." << prop.minor << endl;
cout << "total global Memory: " << (float)prop.totalGlobalMem/1024.0f/1024.0f/1024.0f << "GB" << endl;
cout << "shared memory per block: " << (float)prop.sharedMemPerBlock/1024.0f << "KB" << endl;
cout << "total constant memory: " << (float)prop.totalConstMem/1024.0f << "KB" << endl;
cout << "memory clock rate: " << prop.memoryClockRate << "Hz" << endl;
cout << "memory bus width: " << prop.memoryBusWidth << "bits" << endl;
cout << "multi processors: " << prop.multiProcessorCount << endl;
cout << "clock rate: " << prop.clockRate << "Hz" << endl;
cout << "max threads per multiprocessor: " << prop.maxThreadsPerMultiProcessor << endl;
cout << "max threads dim: " << prop.maxThreadsDim[0] << " " << prop.maxThreadsDim[1] << " " << prop.maxThreadsDim[2] << endl;
cout << "max grid size: " << prop.maxGridSize[0] << " " << prop.maxGridSize[1] << " " << prop.maxGridSize[2] << endl;
cout << endl;
}
}
/// ////////////////////////////////////////////////////////////////////////
cudaDevice = 3;
numberOfThreads = 128;
/// ////////////////////////////////////////////////////////////////////////
hipSetDevice(cudaDevice);
gridSize = (simulation->numberParticles/numberOfThreads)+1;
blockSize = numberOfThreads;
//gridSize = 10;
//blockSize = 10;
if(simulation->testmode)
cout << "use " << gridSize<< " blocks (grid size) and " << blockSize << " threads (block size) each" << endl;
maxCutoff = simulation->maxCutoff;
boxSize = simulation->latticeBounds;
/// initialize cuRand
hipMalloc ( (void**)&globalRandStates, simulation->numberParticles * sizeof( hiprandState_t ) );
/// setup seeds
hipLaunchKernelGGL(( setup_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, globalRandStates, time(NULL), simulation->numberParticles );
/// Coords
hipMalloc((void**)&cudaCoords,( simulation->numberParticles * 3 * sizeof ( double ) ));
copyPosToDevice();
/// Forces
hipMalloc((void**)&cudaForces,( simulation->numberParticles * 3 * sizeof ( double ) ));
hipMemset( cudaForces,(double)0, ( simulation->numberParticles * 3 * sizeof ( double ) ));
/// Diffusion const.
double * hostDiffConst;
hostDiffConst = new double[simulation->particleTypes.size()];
for(int i=0; i<simulation->particleTypes.size(); ++i){
hostDiffConst[i]=simulation->particleTypes[i].D;
}
hipMalloc((void**)&cudaD,( simulation->particleTypes.size() * sizeof ( double ) ));
hipMemcpy(cudaD, hostDiffConst, ( simulation->particleTypes.size() * sizeof ( double ) ), hipMemcpyHostToDevice);
/// types
hipMalloc((void**)&cudaTypes,( simulation->numberParticles * sizeof ( int ) ));
hipMemcpy(cudaTypes, simulation->types, ( simulation->numberParticles * sizeof ( int ) ), hipMemcpyHostToDevice);
if(createNeighborList()!=0){
cout <<"neigborlist building problem" << endl;
return 1;
}
hipMemcpy(cudaNeighborList, hostNeighborList, ( simulation->numberParticles * 2 * sizeof ( int ) ), hipMemcpyHostToDevice);
hipMemcpy(cudaNeighborListBegins, hostNeighborListBegins, ( numberOfLatticeFields * sizeof ( int ) ), hipMemcpyHostToDevice);
hipMalloc ((void**)&cudaBoxSize, ( 6 * sizeof ( double ) ));
hipMemcpy( cudaBoxSize, boxSize, ( 6 * sizeof ( double ) ), hipMemcpyHostToDevice);
hipMalloc ( (void**)&cudaLatticeSize, ( 3 * sizeof ( int ) ));
hipMemcpy(cudaLatticeSize, latticeSize, ( 3 * sizeof ( int ) ), hipMemcpyHostToDevice);
/// cudaSemaphores for the lattice fields
hipMalloc((void**)&cudaSemaphore,( numberOfLatticeFields * sizeof ( int ) ));
hipMemset( cudaSemaphore,(int)0, ( numberOfLatticeFields * sizeof ( int ) ));
/// Matrix for order one potentials = matrix[pot][types] = matrix[simulation->orderOnePotentials.size()][simulation->particleTypes.size()]
int orderOnePotentialsMatrixSize = simulation->particleTypes.size() * simulation->orderOnePotentials.size();
hostOrderOnePotentialsMatrix = new int[orderOnePotentialsMatrixSize];
for(int i=0; i<simulation->orderOnePotentials.size(); ++i){
for(int j=0; j<simulation->particleTypes.size(); ++j){
hostOrderOnePotentialsMatrix[i*simulation->particleTypes.size()+j]=0;
}
for(int j=0; j<simulation->orderOnePotentials[i]->affectedParticleTypeIds.size(); ++j){
hostOrderOnePotentialsMatrix[i*simulation->particleTypes.size()+simulation->orderOnePotentials[i]->affectedParticleTypeIds[j]]=1;
}
}
hipMalloc((void**)&cudaOrderOnePotentialsMatrix,( orderOnePotentialsMatrixSize * sizeof ( int ) ));
hipMemcpy(cudaOrderOnePotentialsMatrix, hostOrderOnePotentialsMatrix, ( orderOnePotentialsMatrixSize * sizeof ( int ) ), hipMemcpyHostToDevice);
/// create cuda order one pot
hostCudaOrderOnePotentials = new CudaOrderOnePotential[simulation->orderOnePotentials.size()];
for(int i=0; i<simulation->orderOnePotentials.size(); ++i){
hostCudaOrderOnePotentials[i] = toCudaOrderOnePotential(simulation->orderOnePotentials[i]);
}
hipMalloc((void**)&cudaCudaOrderOnePotentials,( simulation->orderOnePotentials.size() * sizeof ( CudaOrderOnePotential ) ));
hipMemcpy(cudaCudaOrderOnePotentials, hostCudaOrderOnePotentials, ( simulation->orderOnePotentials.size() * sizeof ( CudaOrderOnePotential ) ), hipMemcpyHostToDevice);
/// create cuda collision radii matix -> matrix[nTypes+1]*[nTypes] (+1 for default)
hostCollisionRadiiMatrix = new double[(simulation->particleTypes.size()+1)*simulation->particleTypes.size()];
for(int i=0; i<simulation->particleTypes.size(); ++i){
hostCollisionRadiiMatrix[i]=simulation->particleTypes[i].defaultRadius;
}
for(int i=0; i<simulation->particleTypes.size(); ++i){
for(int j=0; j<simulation->particleTypes.size(); ++j){
hostCollisionRadiiMatrix[(i+1)*simulation->particleTypes.size()+j]=simulation->particleTypes[i].radiiMatrix[j];
}
}
hipMalloc((void**)&cudaCollisionRadiiMatrix,( (simulation->particleTypes.size()+1)*simulation->particleTypes.size() * sizeof ( double ) ));
hipMemcpy(cudaCollisionRadiiMatrix, hostCollisionRadiiMatrix, ( (simulation->particleTypes.size()+1)*simulation->particleTypes.size() * sizeof ( double ) ), hipMemcpyHostToDevice);
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "cuda error during initialization: %s\n",hipGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::createNeighborList(){
numberOfLatticeFields = (boxSize[1]-boxSize[0])/maxCutoff*(boxSize[3]-boxSize[2])/maxCutoff*(boxSize[5]-boxSize[4])/maxCutoff;
latticeSize = new int[3];
latticeSize[0] = (boxSize[1]-boxSize[0])/maxCutoff;
latticeSize[1] = (boxSize[3]-boxSize[2])/maxCutoff;
latticeSize[2] = (boxSize[5]-boxSize[4])/maxCutoff;
hipMalloc((void**)&cudaNeighborList,( simulation->numberParticles * 2 * sizeof ( int ) ));
hipMalloc((void**)&cudaNeighborListBegins,( numberOfLatticeFields * sizeof ( int ) ));
hostNeighborList = new int[simulation->numberParticles * 2];
hostNeighborListBegins= new int[numberOfLatticeFields];
for(int i=0; i<numberOfLatticeFields; ++i){
hostNeighborListBegins[i]=-1;
}
if(simulation->testmode){
cout << "lattice informations: " << endl;
cout << "simulation size x[nm]: " << boxSize[1]-boxSize[0] << endl;
cout << "simulation size y[nm]: " << boxSize[3]-boxSize[2] << endl;
cout << "simulation size z[nm]: " << boxSize[5]-boxSize[4] << endl;
cout << "number of voxels: " << numberOfLatticeFields << endl;
cout << "voxel edge length: " << maxCutoff << endl;
cout << "lattice size x: " << latticeSize[0] << endl;
cout << "lattice size y: " << latticeSize[1] << endl;
cout << "lattice size z: " << latticeSize[2] << endl << endl;
}
for(int i=0; i<simulation->numberParticles; ++i){
int field=((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2])*latticeSize[0]*latticeSize[1]
+((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1])*latticeSize[0]
+((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]);
/*
cout << "particle nr: " << i << endl;
cout << "x: " << simulation->coords[3*i+0] << endl;
cout << "y: " << simulation->coords[3*i+1] << endl;
cout << "z: " << simulation->coords[3*i+2] << endl;
cout << ((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2])*latticeSize[0]*latticeSize[1] << endl;
cout << ((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1])*latticeSize[0] << endl;
cout << ((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]) << endl;
cout << field << endl;*/
if(field<0 || field>numberOfLatticeFields){
cout << "particle is out of the Box: " << i << " [" <<simulation->coords[3*i+0] << ", " << simulation->coords[3*i+1] << ", " << simulation->coords[3*i+2] << "]" << endl;
return 1;
}
if(hostNeighborListBegins[field]==-1){
/// this particle is the first in this field. it is its own predecessor and successor
hostNeighborListBegins[field]=i;
hostNeighborList[2*i+1]=i;
hostNeighborList[2*i]=i;
}
else{
/// x f y -> x p f y
/// particles successor is the fields first particle
/// S'(p) = f
hostNeighborList[2*i+1]=hostNeighborListBegins[field];
/// sucessor of the first particles predecessor is the particle
/// S(P(f))=p , P(f)=x -> S'(x)=p
hostNeighborList[2*hostNeighborList[2*hostNeighborListBegins[field]]+1]=i;
/// particles predecessor is the predecessor of the fields first particle
/// P'(p)=P(f)=x
hostNeighborList[2*i]=hostNeighborList[2*hostNeighborListBegins[field]];
/// fields first particles new predecessor is the current particle
/// P'(f)=p
hostNeighborList[2*hostNeighborListBegins[field]]=i;
//hostNeighborListBegins[field]=i;
}
}
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::testNeighborList(){
hipMemcpy(hostNeighborList, cudaNeighborList, ( simulation->numberParticles * 2 * sizeof ( int ) ), hipMemcpyDeviceToHost);
hipMemcpy(hostNeighborListBegins, cudaNeighborListBegins, ( numberOfLatticeFields * sizeof ( int ) ), hipMemcpyDeviceToHost);
int count = 0;
int count2 = 0;
int x;
for(int i=0; i<numberOfLatticeFields; ++i){
x=hostNeighborListBegins[i];
//cout << i << ":" << x << endl;
if(x!=-1){
do{
count++;
//cout << hostNeighborList[2*x+0] << " " << x << " " << hostNeighborList[2*x+1] << endl;
x=hostNeighborList[2*x+1];
if(x==hostNeighborListBegins[i])
break;
//char a;
//cin >> a;
}while(true);
}
else{
++count2;
}
}
cout << "Neighborlist check:" << "count: "<< count << " part num: " << simulation->numberParticles << " (check 2:" << count2 << " empty fields)"<< endl;
if(count!=simulation->numberParticles){
cout << "Neighborlist broken!" << endl;
return 1;
}
//cout << "Neighborlist okay!" << endl;
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::copyPosToDevice(){
hipMemcpy(cudaCoords, simulation->coords, simulation->numberParticles * 3 * sizeof(double), hipMemcpyHostToDevice);
if(simulation->testmode){
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
}
return 0;
}
int CudaSimulation::copyPosFromDevice(){
hipMemcpy(simulation->coords, cudaCoords, simulation->numberParticles * 3 * sizeof ( double ), hipMemcpyDeviceToHost);
if(simulation->testmode){
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
}
return 0;
}
int CudaSimulation::simulate(){
//cout << "1" << endl;
//orderOne<<<1,1>>>(cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff, cudaOrderOnePotentialsMatrix, cudaCudaOrderOnePotentials, simulation->orderOnePotentials.size(), simulation->particleTypes.size(), cudaCollisionRadiiMatrix);
hipLaunchKernelGGL(( orderOne), dim3(gridSize),dim3(blockSize), 0, 0, cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff, cudaOrderOnePotentialsMatrix, cudaCudaOrderOnePotentials, simulation->orderOnePotentials.size(), simulation->particleTypes.size(), cudaCollisionRadiiMatrix);
if(simulation->testmode){
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "order one, cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
}
//cout << "2" << endl;
//orderTwo<<<gridSize,blockSize>>>(cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff);
if(simulation->testmode){
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "order two, cuda error: %s\n",hipGetErrorString(error) );
return 1;
}
}
//cout << "3" << endl;
hipLaunchKernelGGL(( update), dim3(gridSize),dim3(blockSize), 0, 0, cudaCoords, cudaForces, cudaTypes, cudaD, cudaNeighborList, cudaNeighborListBegins, cudaBoxSize, cudaSemaphore, globalRandStates, simulation->stepSizeInPs, simulation->numberParticles, simulation->boltzmann, simulation->temperature, maxCutoff, cudaLatticeSize);
if(simulation->testmode){
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if ( hipSuccess != error ){
printf( "update, cuda error: %s\n",hipGetErrorString(error ));
return 1;
}
}
return 0;
}
/// /////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// cuda kernels ////////////////////////////////////////////////////////////////////////////////////////////////
/// /////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void orderOne(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, hiprandState_t* globalRandStates, int numberParticles, int maxCutoff, int * cudaOrderOnePotentialsMatrix, CudaOrderOnePotential * cudaCudaOrderOnePotentials, int numberOfOrderOnePotentials, int numberOfParticleTypes, double * cudaCollisionRadiiMatrix){
int particleNumber=blockIdx.x * blockDim.x + threadIdx.x;
if(particleNumber<numberParticles){
hiprandState_t localState = globalRandStates[particleNumber];
/// do calculation of forces and maybe reactions here ...
/// go through all order one potetntials
for(int orderOnePotential=0; orderOnePotential<numberOfOrderOnePotentials; ++orderOnePotential){
/// lookup in matrix whether they apply to the current particle type
if(cudaOrderOnePotentialsMatrix[orderOnePotential*numberOfParticleTypes+cudaTypes[particleNumber]]==1){
/// check what kind of potential it is
if(cudaCudaOrderOnePotentials[orderOnePotential].type==1){/// Disk
/// calculation depends on the normal vector. assign x,y and z coordinates to variables
int normal, side1, side2;
/// normal vector on x axis -> assign x to normal and y and z to the lateral (on Disk) directions
if(cudaCudaOrderOnePotentials[orderOnePotential].normal[0]==1){
normal=0;side1=1;side2=2;
}
/// y
else if(cudaCudaOrderOnePotentials[orderOnePotential].normal[1]==1){
normal=1;side1=0;side2=2;
}
/// x
else {
normal=2;side1=1;side2=0;
}
/// different subtypes
if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
/* for(int i=0; i<3; ++i){
//cudaCoords[3*particleNumber+i]=cudaCudaOrderOnePotentials[orderOnePotential].origin[i];
//cudaForces[3*particleNumber+i]+= 10;
}
cudaCoords[3*particleNumber+0]=cudaCudaOrderOnePotentials[orderOnePotential].forceConst;
cudaCoords[3*particleNumber+1]=cudaCudaOrderOnePotentials[orderOnePotential].radius;
}
if(false && cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
*/
/* r = distToDiskPlane;// actual
r0 = 0;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+precompute * (pointOnDiskPlane[0]-coords1[0]);
gradient[1] = gradient[1]+precompute * (pointOnDiskPlane[1]-coords1[1]);
gradient[2] = gradient[2]+precompute * (pointOnDiskPlane[2]-coords1[2]);
}*/
double r = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal];
cudaForces[3*particleNumber+normal]+=-cudaCudaOrderOnePotentials[orderOnePotential].forceConst*r;
//cudaForces[3*particleNumber+normal]+=cudaCudaOrderOnePotentials[orderOnePotential].origin[normal];
// force within disc plane
/*r = distToCenterWithinDiskPlane + pRadius;// actual
r0 = diskRadius;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+precompute * (center[0]-pointOnDiskPlane[0]);
gradient[1] = gradient[1]+precompute * (center[1]-pointOnDiskPlane[1]);
gradient[2] = gradient[2]+precompute * (center[2]-pointOnDiskPlane[2]);
}*/
/// particle radius!
r = sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
);
if (r > cudaCudaOrderOnePotentials[orderOnePotential].radius) {
cudaForces[3*particleNumber+side1]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]-cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]-cudaCoords[3*particleNumber+side2]);
}
/*
double distToOriginWithinDisk = fminf(
cudaCudaOrderOnePotentials[orderOnePotential].radius
-
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
,0);
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow( cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal], 2)
+
pow(distToOriginWithinDisk,2)
);
cudaForces[3*particleNumber+normal]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+cudaCoords[3*particleNumber+normal]);
if(distToDisk>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}*/
}
else if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==2){/// repulsive
// makes no sense ...
/*
// force along normal vector
r = distToDiskPlane;// actual
r0 = pRadius;// desired
double r_1 = distToCenterWithinDiskPlane - pRadius;
double r0_1 = diskRadius;
if (r < r0 && r_1 < r0_1) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+ precompute * ( pointOnDiskPlane[0]-coords1[0]);
gradient[1] = gradient[1]+ precompute * ( pointOnDiskPlane[1]-coords1[1]);
gradient[2] = gradient[2]+ precompute * ( pointOnDiskPlane[2]-coords1[2]);
}*/
}
}/// end Disk
/****/ else if(cudaCudaOrderOnePotentials[orderOnePotential].type==2){/// Cylinder
int normal, side1, side2;
if(cudaCudaOrderOnePotentials[orderOnePotential].normal[0]==1){normal=0;side1=1;side2=2;}
else if(cudaCudaOrderOnePotentials[orderOnePotential].normal[1]==1){normal=1;side1=0;side2=2;}
else {normal=2;side1=1;side2=0;}
if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
/* r = distToDiskPlane + pRadius;// actual
r0 = 0.5 * this.height;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0] + precompute * (pointOnDiskPlane[0] - coords1[0]);
gradient[1] = gradient[1] + precompute * (pointOnDiskPlane[1] - coords1[1]);
gradient[2] = gradient[2] + precompute * (pointOnDiskPlane[2] - coords1[2]);
}*/
double r = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal];
if(fabsf(r)>cudaCudaOrderOnePotentials[orderOnePotential].height*0.5)
cudaForces[3*particleNumber+normal]+=-cudaCudaOrderOnePotentials[orderOnePotential].forceConst*(fabsf(r)-cudaCudaOrderOnePotentials[orderOnePotential].height*0.5)/fabsf(r)*r;
/// particle radius!
r = sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
);
if (r > cudaCudaOrderOnePotentials[orderOnePotential].radius) {
cudaForces[3*particleNumber+side1]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]-cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]-cudaCoords[3*particleNumber+side2]);
}
/* double distToDiskSide = fminf(
cudaCudaOrderOnePotentials[orderOnePotential].radius
-
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
,0);
double distToDiskPlane = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height<cudaCoords[3*particleNumber+normal]?
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height-cudaCoords[3*particleNumber+normal] : 0
+
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height>cudaCoords[3*particleNumber+normal]?
cudaCoords[3*particleNumber+normal]-cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height : 0;
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow(distToDiskPlane, 2)
+
pow(distToDiskSide,2)
);
cudaForces[3*particleNumber+normal]-=force*(distToDiskPlane);
if(distToDiskSide>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}*/
}
else if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==2){/// repulsive
double distToDiskSide = fminf(
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
-
cudaCudaOrderOnePotentials[orderOnePotential].radius
,0);
double distToDiskPlane = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height>cudaCoords[3*particleNumber+normal]?
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height-cudaCoords[3*particleNumber+normal] : 0
+
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height<cudaCoords[3*particleNumber+normal]?
cudaCoords[3*particleNumber+normal]-cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height : 0;
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow(distToDiskPlane, 2)
+
pow(distToDiskSide,2)
);
cudaForces[3*particleNumber+normal]-=force*(distToDiskPlane);
if(distToDiskSide>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}
}
}/// end Cylinder
}/// endif order one potentials matrix
}/// end iterate over order one potentials
globalRandStates[particleNumber] = localState;
}
return;
}
__global__ void orderTwo(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, hiprandState_t* globalRandStates, int numberParticles, int maxCutoff){
int k=blockIdx.x * blockDim.x + threadIdx.x;
if(k<numberParticles){
hiprandState_t localState = globalRandStates[k];
int todo[27];
int x,y,z;
int field=((int)floor((cudaCoords[3*k+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*k+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*k+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// surrounding, for calculation imprtant fields
/// TODO: CHECK! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for(x=-1; x<2;x++){
for(y=-1; y<2;y++){
for(z=-1; z<2;z++){
todo[(x+1)+(y+1)*3+(z+1)*9]=
(
(
(field%(cudaLatticeSize[0]))
+x+cudaLatticeSize[0]
)
%cudaLatticeSize[0]
)
+cudaLatticeSize[0]*
(
(
(int)floorf
(
(float)(field%(cudaLatticeSize[0]*cudaLatticeSize[1]))
/
(float)(cudaLatticeSize[0])
)
+y+cudaLatticeSize[1]
)
%cudaLatticeSize[1]
)
+cudaLatticeSize[0]*cudaLatticeSize[1]*
(
(
(int)floorf
(
(float)(field)
/
(float)(cudaLatticeSize[0]*cudaLatticeSize[1])
)
+z+cudaLatticeSize[2]
)
%cudaLatticeSize[2]
);
}
}
}
/// do calculation of forces and maybe reactions here ...
/*
*
*first:
*do it plain
*think about parameter storage
*then dynamik arrays
*later think about accellerations due to ideas below
*
*/
/*
for every near particle with higher ID:
for every force
if Interaction Matrix != 0
calculate necessary forces
atomic add force to both interactiong particles
need: - interaction matices (radii, forces) for every force (aligned in one array, +array size)
- more parameter? how to store?
data alignment: x,y,z,type,rand?,force?
*/
/*
* call voxel per warp(n threads)
* load first n coords in shared mem
* calculate distances to particles in surrounding fields (always load one particle and calc. n dist.)
* calulate all necessary forces somehow
*/
globalRandStates[k] = localState;
}
return;
}
__global__ void update(double* cudaCoords, double* cudaForces, int* cudaTypes, double* cudaD, int * cudaNeighborList, int * cudaNeighborListBegins, double * cudaBoxSize, int * cudaSemaphore, hiprandState_t* globalRandStates, double dt, int numberParticles, double KB, double T, double maxCutoff, int * cudaLatticeSize){
int particleNumber=blockIdx.x * blockDim.x + threadIdx.x;
if(particleNumber<numberParticles){
hiprandState_t localState = globalRandStates[particleNumber];
int oldVoxel= ((int)floor((cudaCoords[3*particleNumber+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*particleNumber+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*particleNumber+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// check for periodic boundaries ...
/// /
for(int dimension=0; dimension<3; ++dimension){
/// apply diffusion and forces -> update positions
/// x(t+dt) = x(t) - dt*D*(F(x(t))/kT) + sqrt(2Ddt)*N(0,1)
cudaCoords[particleNumber*3+dimension] += -dt*cudaD[cudaTypes[particleNumber]]*cudaForces[particleNumber*3+dimension]/KB/T + sqrt(2*cudaD[cudaTypes[particleNumber]]*dt)*hiprand_normal( &localState );
//cudaCoords[particleNumber*3+dimension] += -dt*cudaD[cudaTypes[particleNumber]]*cudaForces[particleNumber*3+dimension]/KB/T ;
//cudaCoords[particleNumber*3+dimension] += cudaForces[particleNumber*3+dimension] ;
cudaForces[particleNumber*3+dimension]=0.0f;
/// periodic boundary condition
while(cudaCoords[3*particleNumber+dimension]>cudaBoxSize[dimension*2+1]){cudaCoords[3*particleNumber+dimension]=cudaCoords[3*particleNumber+dimension]-(cudaBoxSize[dimension*2+1]-cudaBoxSize[dimension*2+0]);}
while(cudaCoords[3*particleNumber+dimension]<cudaBoxSize[dimension*2+0]){cudaCoords[3*particleNumber+dimension]=cudaCoords[3*particleNumber+dimension]+(cudaBoxSize[dimension*2+1]-cudaBoxSize[dimension*2+0]);}
}
/// lattice field changed?
int newVoxel= ((int)floor((cudaCoords[3*particleNumber+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*particleNumber+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*particleNumber+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// apply voxel-changes ...
if(newVoxel!=oldVoxel){
bool leaveLoop = false;
/// delete form old list
while(!leaveLoop){
/// Lock
if(atomicExch(&(cudaSemaphore[oldVoxel]),1)==0){
int prev=cudaNeighborList[2*particleNumber];
int next=cudaNeighborList[2*particleNumber+1];
cudaNeighborList[2*prev+1]=next;
cudaNeighborList[2*next]=prev;
/// was this partilce begin of the linked list?
if(cudaNeighborListBegins[oldVoxel]==particleNumber){
/// was the particle the only one in this field?
if(cudaNeighborList[2*particleNumber]==particleNumber){
cudaNeighborListBegins[oldVoxel]=-1;
}
else{
cudaNeighborListBegins[oldVoxel]=cudaNeighborList[2*particleNumber+1];
}
}
leaveLoop=true;
/// unLock
atomicExch(&(cudaSemaphore[oldVoxel]),0);
}
}
leaveLoop = false;
/// push ontop of the new list
while(!leaveLoop){
/// Lock
if(atomicExch(&(cudaSemaphore[newVoxel]),1)==0){
/// is new list empty?
if(cudaNeighborListBegins[newVoxel]!=-1){/// no
cudaNeighborList[2*particleNumber]=cudaNeighborList[2*cudaNeighborListBegins[newVoxel]];
cudaNeighborList[2*particleNumber+1]=cudaNeighborListBegins[newVoxel];
cudaNeighborList[2*cudaNeighborList[2*cudaNeighborListBegins[newVoxel]]+1]=particleNumber;;
cudaNeighborList[2*cudaNeighborListBegins[newVoxel]]=particleNumber;
cudaNeighborListBegins[newVoxel]=particleNumber;
}
else{/// first one in new list
cudaNeighborList[2*particleNumber+1]=particleNumber;
cudaNeighborList[2*particleNumber]=particleNumber;
cudaNeighborListBegins[newVoxel]=particleNumber;
}
leaveLoop=true;
/// unLock
atomicExch(&(cudaSemaphore[newVoxel]),0);
}
}
}
globalRandStates[particleNumber] = localState;
}
return;
}
__global__ void setup_kernel ( hiprandState_t * state, unsigned long seed, int n ){
int id=blockIdx.x * blockDim.x + threadIdx.x;
if(id<n){
hiprand_init ( seed, id, 0, &state[id] );
}
}
/// pos force radii forceconst types todo links linkbegins
__device__ void lennardJones(){
return;
}
| 166d46297c03b6bab8c9a9007d46179095753be1.cu |
# include <ReaDDyGPU.hpp>
# include <stdlib.h>
# include <stdio.h>
# include <time.h>
# include <math.h>
# include <vector>
# include <cuda.h>
# include <curand.h>
# include <cuda_runtime.h>
# include <curand_kernel.h>
# include <sm_11_atomic_functions.h>
#include <initializer_list>
/// //////////////////////////////////////////////////////////////////////////////////////////
/// /
/// TODO:
/// - data structures
/// - dynamic arrays (amortized linear runtime) +sort?
/// - periodic boundaries
/// - lattice + neighbor lists
/// - usage of shared mem
/// /
/// //////////////////////////////////////////////////////////////////////////////////////////
__global__ void update(double* cudaCoords, double* cudaForces, int* cudaTypes, double* cudaD, int * cudaNeighborList, int * cudaNeighborListBegins, double * cudaBoxSize, int * cudaSemaphore, curandState* globalRandStates, double dt, int numberParticles, double KB, double T, double maxCutoff, int * latticeSize);
__global__ void orderOne(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, curandState* globalRandStates, int numberParticles, int maxCutoff, int * cudaOrderOnePotentialsMatrix, CudaOrderOnePotential * cudaCudaOrderOnePotentials, int numberOfOrderOnePotentials, int numberOfParticleTypes, double * cudaCollisionRadiiMatrix);
__global__ void orderTwo(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, curandState* globalRandStates, int numberParticles, int maxCutoff);
__global__ void setup_kernel ( curandState * state, unsigned long seed, int n );
CudaSimulation::CudaSimulation(Simulation* simulation){
this->simulation = simulation;
}
CudaOrderOnePotential toCudaOrderOnePotential(OrderOnePotential* orderOnePotential){
CudaOrderOnePotential cudaOrderOnePotential = CudaOrderOnePotential();
cudaOrderOnePotential.subtype=orderOnePotential->subtypeID;
if(orderOnePotential->type.compare("DISK")==0){
DiskPotential * diskPotential = reinterpret_cast<DiskPotential*>(orderOnePotential);
cudaOrderOnePotential.type=1;
cudaOrderOnePotential.forceConst=diskPotential->forceConst;
std::copy ( diskPotential->center, diskPotential->center+3, cudaOrderOnePotential.origin );
//cudaOrderOnePotential.origin=diskPotential->center;
std::copy ( diskPotential->normal, diskPotential->normal+3, cudaOrderOnePotential.normal );
//cudaOrderOnePotential.normal=diskPotential->normal;
cudaOrderOnePotential.radius=diskPotential->radius;
}
else if(orderOnePotential->type.compare("CYLINDER")==0){
CylinderPotential * cylinderPotential = reinterpret_cast<CylinderPotential*>(orderOnePotential);
cudaOrderOnePotential.type=2;
cudaOrderOnePotential.forceConst=cylinderPotential->forceConst;
std::copy ( cylinderPotential->center, cylinderPotential->center+3, cudaOrderOnePotential.origin );
//cudaOrderOnePotential.origin=cylinderPotential->center;
std::copy ( cylinderPotential->normal, cylinderPotential->normal+3, cudaOrderOnePotential.normal );
//cudaOrderOnePotential.normal=cylinderPotential->normal;
cudaOrderOnePotential.radius=cylinderPotential->radius;
cudaOrderOnePotential.height=cylinderPotential->height;
}
return cudaOrderOnePotential;
}
int CudaSimulation::initialize(){
int numberOfCudaDevices = 0;
cudaGetDeviceCount(&numberOfCudaDevices);
if(numberOfCudaDevices==0){
cout << "no cuda device availible" << endl;
return 1;
}
if(simulation->testmode)
cout << endl << endl << numberOfCudaDevices << " cuda devices found" << endl << endl;
for(int i=0; i<numberOfCudaDevices; ++i){
cudaSetDevice(i);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
if(simulation->testmode){
cout << prop.name << endl;
cout << "compute capability: " << prop.major << "." << prop.minor << endl;
cout << "total global Memory: " << (float)prop.totalGlobalMem/1024.0f/1024.0f/1024.0f << "GB" << endl;
cout << "shared memory per block: " << (float)prop.sharedMemPerBlock/1024.0f << "KB" << endl;
cout << "total constant memory: " << (float)prop.totalConstMem/1024.0f << "KB" << endl;
cout << "memory clock rate: " << prop.memoryClockRate << "Hz" << endl;
cout << "memory bus width: " << prop.memoryBusWidth << "bits" << endl;
cout << "multi processors: " << prop.multiProcessorCount << endl;
cout << "clock rate: " << prop.clockRate << "Hz" << endl;
cout << "max threads per multiprocessor: " << prop.maxThreadsPerMultiProcessor << endl;
cout << "max threads dim: " << prop.maxThreadsDim[0] << " " << prop.maxThreadsDim[1] << " " << prop.maxThreadsDim[2] << endl;
cout << "max grid size: " << prop.maxGridSize[0] << " " << prop.maxGridSize[1] << " " << prop.maxGridSize[2] << endl;
cout << endl;
}
}
/// ////////////////////////////////////////////////////////////////////////
cudaDevice = 3;
numberOfThreads = 128;
/// ////////////////////////////////////////////////////////////////////////
cudaSetDevice(cudaDevice);
gridSize = (simulation->numberParticles/numberOfThreads)+1;
blockSize = numberOfThreads;
//gridSize = 10;
//blockSize = 10;
if(simulation->testmode)
cout << "use " << gridSize<< " blocks (grid size) and " << blockSize << " threads (block size) each" << endl;
maxCutoff = simulation->maxCutoff;
boxSize = simulation->latticeBounds;
/// initialize cuRand
cudaMalloc ( (void**)&globalRandStates, simulation->numberParticles * sizeof( curandState ) );
/// setup seeds
setup_kernel <<< gridSize, blockSize >>> ( globalRandStates, time(NULL), simulation->numberParticles );
/// Coords
cudaMalloc((void**)&cudaCoords,( simulation->numberParticles * 3 * sizeof ( double ) ));
copyPosToDevice();
/// Forces
cudaMalloc((void**)&cudaForces,( simulation->numberParticles * 3 * sizeof ( double ) ));
cudaMemset( cudaForces,(double)0, ( simulation->numberParticles * 3 * sizeof ( double ) ));
/// Diffusion const.
double * hostDiffConst;
hostDiffConst = new double[simulation->particleTypes.size()];
for(int i=0; i<simulation->particleTypes.size(); ++i){
hostDiffConst[i]=simulation->particleTypes[i].D;
}
cudaMalloc((void**)&cudaD,( simulation->particleTypes.size() * sizeof ( double ) ));
cudaMemcpy(cudaD, hostDiffConst, ( simulation->particleTypes.size() * sizeof ( double ) ), cudaMemcpyHostToDevice);
/// types
cudaMalloc((void**)&cudaTypes,( simulation->numberParticles * sizeof ( int ) ));
cudaMemcpy(cudaTypes, simulation->types, ( simulation->numberParticles * sizeof ( int ) ), cudaMemcpyHostToDevice);
if(createNeighborList()!=0){
cout <<"neigborlist building problem" << endl;
return 1;
}
cudaMemcpy(cudaNeighborList, hostNeighborList, ( simulation->numberParticles * 2 * sizeof ( int ) ), cudaMemcpyHostToDevice);
cudaMemcpy(cudaNeighborListBegins, hostNeighborListBegins, ( numberOfLatticeFields * sizeof ( int ) ), cudaMemcpyHostToDevice);
cudaMalloc ((void**)&cudaBoxSize, ( 6 * sizeof ( double ) ));
cudaMemcpy( cudaBoxSize, boxSize, ( 6 * sizeof ( double ) ), cudaMemcpyHostToDevice);
cudaMalloc ( (void**)&cudaLatticeSize, ( 3 * sizeof ( int ) ));
cudaMemcpy(cudaLatticeSize, latticeSize, ( 3 * sizeof ( int ) ), cudaMemcpyHostToDevice);
/// cudaSemaphores for the lattice fields
cudaMalloc((void**)&cudaSemaphore,( numberOfLatticeFields * sizeof ( int ) ));
cudaMemset( cudaSemaphore,(int)0, ( numberOfLatticeFields * sizeof ( int ) ));
/// Matrix for order one potentials = matrix[pot][types] = matrix[simulation->orderOnePotentials.size()][simulation->particleTypes.size()]
int orderOnePotentialsMatrixSize = simulation->particleTypes.size() * simulation->orderOnePotentials.size();
hostOrderOnePotentialsMatrix = new int[orderOnePotentialsMatrixSize];
for(int i=0; i<simulation->orderOnePotentials.size(); ++i){
for(int j=0; j<simulation->particleTypes.size(); ++j){
hostOrderOnePotentialsMatrix[i*simulation->particleTypes.size()+j]=0;
}
for(int j=0; j<simulation->orderOnePotentials[i]->affectedParticleTypeIds.size(); ++j){
hostOrderOnePotentialsMatrix[i*simulation->particleTypes.size()+simulation->orderOnePotentials[i]->affectedParticleTypeIds[j]]=1;
}
}
cudaMalloc((void**)&cudaOrderOnePotentialsMatrix,( orderOnePotentialsMatrixSize * sizeof ( int ) ));
cudaMemcpy(cudaOrderOnePotentialsMatrix, hostOrderOnePotentialsMatrix, ( orderOnePotentialsMatrixSize * sizeof ( int ) ), cudaMemcpyHostToDevice);
/// create cuda order one pot
hostCudaOrderOnePotentials = new CudaOrderOnePotential[simulation->orderOnePotentials.size()];
for(int i=0; i<simulation->orderOnePotentials.size(); ++i){
hostCudaOrderOnePotentials[i] = toCudaOrderOnePotential(simulation->orderOnePotentials[i]);
}
cudaMalloc((void**)&cudaCudaOrderOnePotentials,( simulation->orderOnePotentials.size() * sizeof ( CudaOrderOnePotential ) ));
cudaMemcpy(cudaCudaOrderOnePotentials, hostCudaOrderOnePotentials, ( simulation->orderOnePotentials.size() * sizeof ( CudaOrderOnePotential ) ), cudaMemcpyHostToDevice);
/// create cuda collision radii matix -> matrix[nTypes+1]*[nTypes] (+1 for default)
hostCollisionRadiiMatrix = new double[(simulation->particleTypes.size()+1)*simulation->particleTypes.size()];
for(int i=0; i<simulation->particleTypes.size(); ++i){
hostCollisionRadiiMatrix[i]=simulation->particleTypes[i].defaultRadius;
}
for(int i=0; i<simulation->particleTypes.size(); ++i){
for(int j=0; j<simulation->particleTypes.size(); ++j){
hostCollisionRadiiMatrix[(i+1)*simulation->particleTypes.size()+j]=simulation->particleTypes[i].radiiMatrix[j];
}
}
cudaMalloc((void**)&cudaCollisionRadiiMatrix,( (simulation->particleTypes.size()+1)*simulation->particleTypes.size() * sizeof ( double ) ));
cudaMemcpy(cudaCollisionRadiiMatrix, hostCollisionRadiiMatrix, ( (simulation->particleTypes.size()+1)*simulation->particleTypes.size() * sizeof ( double ) ), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "cuda error during initialization: %s\n",cudaGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::createNeighborList(){
numberOfLatticeFields = (boxSize[1]-boxSize[0])/maxCutoff*(boxSize[3]-boxSize[2])/maxCutoff*(boxSize[5]-boxSize[4])/maxCutoff;
latticeSize = new int[3];
latticeSize[0] = (boxSize[1]-boxSize[0])/maxCutoff;
latticeSize[1] = (boxSize[3]-boxSize[2])/maxCutoff;
latticeSize[2] = (boxSize[5]-boxSize[4])/maxCutoff;
cudaMalloc((void**)&cudaNeighborList,( simulation->numberParticles * 2 * sizeof ( int ) ));
cudaMalloc((void**)&cudaNeighborListBegins,( numberOfLatticeFields * sizeof ( int ) ));
hostNeighborList = new int[simulation->numberParticles * 2];
hostNeighborListBegins= new int[numberOfLatticeFields];
for(int i=0; i<numberOfLatticeFields; ++i){
hostNeighborListBegins[i]=-1;
}
if(simulation->testmode){
cout << "lattice informations: " << endl;
cout << "simulation size x[nm]: " << boxSize[1]-boxSize[0] << endl;
cout << "simulation size y[nm]: " << boxSize[3]-boxSize[2] << endl;
cout << "simulation size z[nm]: " << boxSize[5]-boxSize[4] << endl;
cout << "number of voxels: " << numberOfLatticeFields << endl;
cout << "voxel edge length: " << maxCutoff << endl;
cout << "lattice size x: " << latticeSize[0] << endl;
cout << "lattice size y: " << latticeSize[1] << endl;
cout << "lattice size z: " << latticeSize[2] << endl << endl;
}
for(int i=0; i<simulation->numberParticles; ++i){
int field=((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2])*latticeSize[0]*latticeSize[1]
+((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1])*latticeSize[0]
+((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]);
/*
cout << "particle nr: " << i << endl;
cout << "x: " << simulation->coords[3*i+0] << endl;
cout << "y: " << simulation->coords[3*i+1] << endl;
cout << "z: " << simulation->coords[3*i+2] << endl;
cout << ((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+2]-boxSize[4])/maxCutoff)%latticeSize[2])*latticeSize[0]*latticeSize[1] << endl;
cout << ((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+1]-boxSize[2])/maxCutoff)%latticeSize[1])*latticeSize[0] << endl;
cout << ((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]) << endl;
cout << "-> " << ((int)floor((simulation->coords[3*i+0]-boxSize[0])/maxCutoff)%latticeSize[0]) << endl;
cout << field << endl;*/
if(field<0 || field>numberOfLatticeFields){
cout << "particle is out of the Box: " << i << " [" <<simulation->coords[3*i+0] << ", " << simulation->coords[3*i+1] << ", " << simulation->coords[3*i+2] << "]" << endl;
return 1;
}
if(hostNeighborListBegins[field]==-1){
/// this particle is the first in this field. it is its own predecessor and successor
hostNeighborListBegins[field]=i;
hostNeighborList[2*i+1]=i;
hostNeighborList[2*i]=i;
}
else{
/// x f y -> x p f y
/// particles successor is the fields first particle
/// S'(p) = f
hostNeighborList[2*i+1]=hostNeighborListBegins[field];
/// sucessor of the first particles predecessor is the particle
/// S(P(f))=p , P(f)=x -> S'(x)=p
hostNeighborList[2*hostNeighborList[2*hostNeighborListBegins[field]]+1]=i;
/// particles predecessor is the predecessor of the fields first particle
/// P'(p)=P(f)=x
hostNeighborList[2*i]=hostNeighborList[2*hostNeighborListBegins[field]];
/// fields first particles new predecessor is the current particle
/// P'(f)=p
hostNeighborList[2*hostNeighborListBegins[field]]=i;
//hostNeighborListBegins[field]=i;
}
}
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::testNeighborList(){
cudaMemcpy(hostNeighborList, cudaNeighborList, ( simulation->numberParticles * 2 * sizeof ( int ) ), cudaMemcpyDeviceToHost);
cudaMemcpy(hostNeighborListBegins, cudaNeighborListBegins, ( numberOfLatticeFields * sizeof ( int ) ), cudaMemcpyDeviceToHost);
int count = 0;
int count2 = 0;
int x;
for(int i=0; i<numberOfLatticeFields; ++i){
x=hostNeighborListBegins[i];
//cout << i << ":" << x << endl;
if(x!=-1){
do{
count++;
//cout << hostNeighborList[2*x+0] << " " << x << " " << hostNeighborList[2*x+1] << endl;
x=hostNeighborList[2*x+1];
if(x==hostNeighborListBegins[i])
break;
//char a;
//cin >> a;
}while(true);
}
else{
++count2;
}
}
cout << "Neighborlist check:" << "count: "<< count << " part num: " << simulation->numberParticles << " (check 2:" << count2 << " empty fields)"<< endl;
if(count!=simulation->numberParticles){
cout << "Neighborlist broken!" << endl;
return 1;
}
//cout << "Neighborlist okay!" << endl;
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
return 0;
}
int CudaSimulation::copyPosToDevice(){
cudaMemcpy(cudaCoords, simulation->coords, simulation->numberParticles * 3 * sizeof(double), cudaMemcpyHostToDevice);
if(simulation->testmode){
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
}
return 0;
}
int CudaSimulation::copyPosFromDevice(){
cudaMemcpy(simulation->coords, cudaCoords, simulation->numberParticles * 3 * sizeof ( double ), cudaMemcpyDeviceToHost);
if(simulation->testmode){
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
}
return 0;
}
int CudaSimulation::simulate(){
//cout << "1" << endl;
//orderOne<<<1,1>>>(cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff, cudaOrderOnePotentialsMatrix, cudaCudaOrderOnePotentials, simulation->orderOnePotentials.size(), simulation->particleTypes.size(), cudaCollisionRadiiMatrix);
orderOne<<<gridSize,blockSize>>>(cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff, cudaOrderOnePotentialsMatrix, cudaCudaOrderOnePotentials, simulation->orderOnePotentials.size(), simulation->particleTypes.size(), cudaCollisionRadiiMatrix);
if(simulation->testmode){
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "order one, cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
}
//cout << "2" << endl;
//orderTwo<<<gridSize,blockSize>>>(cudaCoords, cudaForces, cudaTypes, cudaNeighborListBegins, cudaNeighborList, cudaLatticeSize, cudaBoxSize, globalRandStates, simulation->numberParticles, maxCutoff);
if(simulation->testmode){
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "order two, cuda error: %s\n",cudaGetErrorString(error) );
return 1;
}
}
//cout << "3" << endl;
update<<<gridSize,blockSize>>>( cudaCoords, cudaForces, cudaTypes, cudaD, cudaNeighborList, cudaNeighborListBegins, cudaBoxSize, cudaSemaphore, globalRandStates, simulation->stepSizeInPs, simulation->numberParticles, simulation->boltzmann, simulation->temperature, maxCutoff, cudaLatticeSize);
if(simulation->testmode){
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if ( cudaSuccess != error ){
printf( "update, cuda error: %s\n",cudaGetErrorString(error ));
return 1;
}
}
return 0;
}
/// /////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// cuda kernels ////////////////////////////////////////////////////////////////////////////////////////////////
/// /////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void orderOne(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, curandState* globalRandStates, int numberParticles, int maxCutoff, int * cudaOrderOnePotentialsMatrix, CudaOrderOnePotential * cudaCudaOrderOnePotentials, int numberOfOrderOnePotentials, int numberOfParticleTypes, double * cudaCollisionRadiiMatrix){
int particleNumber=blockIdx.x * blockDim.x + threadIdx.x;
if(particleNumber<numberParticles){
curandState localState = globalRandStates[particleNumber];
/// do calculation of forces and maybe reactions here ...
/// go through all order one potetntials
for(int orderOnePotential=0; orderOnePotential<numberOfOrderOnePotentials; ++orderOnePotential){
/// lookup in matrix whether they apply to the current particle type
if(cudaOrderOnePotentialsMatrix[orderOnePotential*numberOfParticleTypes+cudaTypes[particleNumber]]==1){
/// check what kind of potential it is
if(cudaCudaOrderOnePotentials[orderOnePotential].type==1){/// Disk
/// calculation depends on the normal vector. assign x,y and z coordinates to variables
int normal, side1, side2;
/// normal vector on x axis -> assign x to normal and y and z to the lateral (on Disk) directions
if(cudaCudaOrderOnePotentials[orderOnePotential].normal[0]==1){
normal=0;side1=1;side2=2;
}
/// y
else if(cudaCudaOrderOnePotentials[orderOnePotential].normal[1]==1){
normal=1;side1=0;side2=2;
}
/// x
else {
normal=2;side1=1;side2=0;
}
/// different subtypes
if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
/* for(int i=0; i<3; ++i){
//cudaCoords[3*particleNumber+i]=cudaCudaOrderOnePotentials[orderOnePotential].origin[i];
//cudaForces[3*particleNumber+i]+= 10;
}
cudaCoords[3*particleNumber+0]=cudaCudaOrderOnePotentials[orderOnePotential].forceConst;
cudaCoords[3*particleNumber+1]=cudaCudaOrderOnePotentials[orderOnePotential].radius;
}
if(false && cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
*/
/* r = distToDiskPlane;// actual
r0 = 0;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+precompute * (pointOnDiskPlane[0]-coords1[0]);
gradient[1] = gradient[1]+precompute * (pointOnDiskPlane[1]-coords1[1]);
gradient[2] = gradient[2]+precompute * (pointOnDiskPlane[2]-coords1[2]);
}*/
double r = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal];
cudaForces[3*particleNumber+normal]+=-cudaCudaOrderOnePotentials[orderOnePotential].forceConst*r;
//cudaForces[3*particleNumber+normal]+=cudaCudaOrderOnePotentials[orderOnePotential].origin[normal];
// force within disc plane
/*r = distToCenterWithinDiskPlane + pRadius;// actual
r0 = diskRadius;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+precompute * (center[0]-pointOnDiskPlane[0]);
gradient[1] = gradient[1]+precompute * (center[1]-pointOnDiskPlane[1]);
gradient[2] = gradient[2]+precompute * (center[2]-pointOnDiskPlane[2]);
}*/
/// particle radius!
r = sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
);
if (r > cudaCudaOrderOnePotentials[orderOnePotential].radius) {
cudaForces[3*particleNumber+side1]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]-cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]-cudaCoords[3*particleNumber+side2]);
}
/*
double distToOriginWithinDisk = fminf(
cudaCudaOrderOnePotentials[orderOnePotential].radius
-
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
,0);
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow( cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal], 2)
+
pow(distToOriginWithinDisk,2)
);
cudaForces[3*particleNumber+normal]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+cudaCoords[3*particleNumber+normal]);
if(distToDisk>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}*/
}
else if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==2){/// repulsive
// makes no sense ...
/*
// force along normal vector
r = distToDiskPlane;// actual
r0 = pRadius;// desired
double r_1 = distToCenterWithinDiskPlane - pRadius;
double r0_1 = diskRadius;
if (r < r0 && r_1 < r0_1) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0]+ precompute * ( pointOnDiskPlane[0]-coords1[0]);
gradient[1] = gradient[1]+ precompute * ( pointOnDiskPlane[1]-coords1[1]);
gradient[2] = gradient[2]+ precompute * ( pointOnDiskPlane[2]-coords1[2]);
}*/
}
}/// end Disk
/****/ else if(cudaCudaOrderOnePotentials[orderOnePotential].type==2){/// Cylinder
int normal, side1, side2;
if(cudaCudaOrderOnePotentials[orderOnePotential].normal[0]==1){normal=0;side1=1;side2=2;}
else if(cudaCudaOrderOnePotentials[orderOnePotential].normal[1]==1){normal=1;side1=0;side2=2;}
else {normal=2;side1=1;side2=0;}
if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==1){/// attractive
/* r = distToDiskPlane + pRadius;// actual
r0 = 0.5 * this.height;// desired
if (r > r0) {
precompute = (k * (-r0 + r) / r);
gradient[0] = gradient[0] + precompute * (pointOnDiskPlane[0] - coords1[0]);
gradient[1] = gradient[1] + precompute * (pointOnDiskPlane[1] - coords1[1]);
gradient[2] = gradient[2] + precompute * (pointOnDiskPlane[2] - coords1[2]);
}*/
double r = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-cudaCoords[3*particleNumber+normal];
if(fabsf(r)>cudaCudaOrderOnePotentials[orderOnePotential].height*0.5)
cudaForces[3*particleNumber+normal]+=-cudaCudaOrderOnePotentials[orderOnePotential].forceConst*(fabsf(r)-cudaCudaOrderOnePotentials[orderOnePotential].height*0.5)/fabsf(r)*r;
/// particle radius!
r = sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
);
if (r > cudaCudaOrderOnePotentials[orderOnePotential].radius) {
cudaForces[3*particleNumber+side1]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]-cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]+=
-cudaCudaOrderOnePotentials[orderOnePotential].forceConst
*(r-cudaCudaOrderOnePotentials[orderOnePotential].radius)
/r
*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]-cudaCoords[3*particleNumber+side2]);
}
/* double distToDiskSide = fminf(
cudaCudaOrderOnePotentials[orderOnePotential].radius
-
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
,0);
double distToDiskPlane = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height<cudaCoords[3*particleNumber+normal]?
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height-cudaCoords[3*particleNumber+normal] : 0
+
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height>cudaCoords[3*particleNumber+normal]?
cudaCoords[3*particleNumber+normal]-cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height : 0;
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow(distToDiskPlane, 2)
+
pow(distToDiskSide,2)
);
cudaForces[3*particleNumber+normal]-=force*(distToDiskPlane);
if(distToDiskSide>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}*/
}
else if(cudaCudaOrderOnePotentials[orderOnePotential].subtype==2){/// repulsive
double distToDiskSide = fminf(
sqrt(
pow(cudaCoords[3*particleNumber+side1]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side1],2)
+
pow(cudaCoords[3*particleNumber+side2]- cudaCudaOrderOnePotentials[orderOnePotential].origin[side2],2)
)
-
cudaCudaOrderOnePotentials[orderOnePotential].radius
,0);
double distToDiskPlane = cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height>cudaCoords[3*particleNumber+normal]?
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]+0.5*cudaCudaOrderOnePotentials[orderOnePotential].height-cudaCoords[3*particleNumber+normal] : 0
+
cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height<cudaCoords[3*particleNumber+normal]?
cudaCoords[3*particleNumber+normal]-cudaCudaOrderOnePotentials[orderOnePotential].origin[normal]-0.5*cudaCudaOrderOnePotentials[orderOnePotential].height : 0;
double force = cudaCudaOrderOnePotentials[orderOnePotential].forceConst * (
pow(distToDiskPlane, 2)
+
pow(distToDiskSide,2)
);
cudaForces[3*particleNumber+normal]-=force*(distToDiskPlane);
if(distToDiskSide>cudaCudaOrderOnePotentials[orderOnePotential].radius){
// in my opinion this calculation is wrong, because we should take the distance to the edge of the disk, instead the distance to its center
cudaForces[3*particleNumber+side1]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side1]+cudaCoords[3*particleNumber+side1]);
cudaForces[3*particleNumber+side2]-=force*(cudaCudaOrderOnePotentials[orderOnePotential].origin[side2]+cudaCoords[3*particleNumber+side2]);
}
}
}/// end Cylinder
}/// endif order one potentials matrix
}/// end iterate over order one potentials
globalRandStates[particleNumber] = localState;
}
return;
}
__global__ void orderTwo(double* cudaCoords, double* cudaForces, int* cudaTypes, int * cudaNeighborListBegins, int * cudaNeighborList, int * cudaLatticeSize, double * cudaBoxSize, curandState* globalRandStates, int numberParticles, int maxCutoff){
int k=blockIdx.x * blockDim.x + threadIdx.x;
if(k<numberParticles){
curandState localState = globalRandStates[k];
int todo[27];
int x,y,z;
int field=((int)floor((cudaCoords[3*k+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*k+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*k+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// surrounding, for calculation imprtant fields
/// TODO: CHECK! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for(x=-1; x<2;x++){
for(y=-1; y<2;y++){
for(z=-1; z<2;z++){
todo[(x+1)+(y+1)*3+(z+1)*9]=
(
(
(field%(cudaLatticeSize[0]))
+x+cudaLatticeSize[0]
)
%cudaLatticeSize[0]
)
+cudaLatticeSize[0]*
(
(
(int)floorf
(
(float)(field%(cudaLatticeSize[0]*cudaLatticeSize[1]))
/
(float)(cudaLatticeSize[0])
)
+y+cudaLatticeSize[1]
)
%cudaLatticeSize[1]
)
+cudaLatticeSize[0]*cudaLatticeSize[1]*
(
(
(int)floorf
(
(float)(field)
/
(float)(cudaLatticeSize[0]*cudaLatticeSize[1])
)
+z+cudaLatticeSize[2]
)
%cudaLatticeSize[2]
);
}
}
}
/// do calculation of forces and maybe reactions here ...
/*
*
*first:
*do it plain
*think about parameter storage
*then dynamik arrays
*later think about accellerations due to ideas below
*
*/
/*
for every near particle with higher ID:
for every force
if Interaction Matrix != 0
calculate necessary forces
atomic add force to both interactiong particles
need: - interaction matices (radii, forces) for every force (aligned in one array, +array size)
- more parameter? how to store?
data alignment: x,y,z,type,rand?,force?
*/
/*
* call voxel per warp(n threads)
* load first n coords in shared mem
* calculate distances to particles in surrounding fields (always load one particle and calc. n dist.)
* calulate all necessary forces somehow
*/
globalRandStates[k] = localState;
}
return;
}
__global__ void update(double* cudaCoords, double* cudaForces, int* cudaTypes, double* cudaD, int * cudaNeighborList, int * cudaNeighborListBegins, double * cudaBoxSize, int * cudaSemaphore, curandState* globalRandStates, double dt, int numberParticles, double KB, double T, double maxCutoff, int * cudaLatticeSize){
int particleNumber=blockIdx.x * blockDim.x + threadIdx.x;
if(particleNumber<numberParticles){
curandState localState = globalRandStates[particleNumber];
int oldVoxel= ((int)floor((cudaCoords[3*particleNumber+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*particleNumber+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*particleNumber+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// check for periodic boundaries ...
/// /
for(int dimension=0; dimension<3; ++dimension){
/// apply diffusion and forces -> update positions
/// x(t+dt) = x(t) - dt*D*(F(x(t))/kT) + sqrt(2Ddt)*N(0,1)
cudaCoords[particleNumber*3+dimension] += -dt*cudaD[cudaTypes[particleNumber]]*cudaForces[particleNumber*3+dimension]/KB/T + sqrt(2*cudaD[cudaTypes[particleNumber]]*dt)*curand_normal( &localState );
//cudaCoords[particleNumber*3+dimension] += -dt*cudaD[cudaTypes[particleNumber]]*cudaForces[particleNumber*3+dimension]/KB/T ;
//cudaCoords[particleNumber*3+dimension] += cudaForces[particleNumber*3+dimension] ;
cudaForces[particleNumber*3+dimension]=0.0f;
/// periodic boundary condition
while(cudaCoords[3*particleNumber+dimension]>cudaBoxSize[dimension*2+1]){cudaCoords[3*particleNumber+dimension]=cudaCoords[3*particleNumber+dimension]-(cudaBoxSize[dimension*2+1]-cudaBoxSize[dimension*2+0]);}
while(cudaCoords[3*particleNumber+dimension]<cudaBoxSize[dimension*2+0]){cudaCoords[3*particleNumber+dimension]=cudaCoords[3*particleNumber+dimension]+(cudaBoxSize[dimension*2+1]-cudaBoxSize[dimension*2+0]);}
}
/// lattice field changed?
int newVoxel= ((int)floor((cudaCoords[3*particleNumber+2]-cudaBoxSize[4])/maxCutoff)%cudaLatticeSize[2])*cudaLatticeSize[0]*cudaLatticeSize[1]
+((int)floor((cudaCoords[3*particleNumber+1]-cudaBoxSize[2])/maxCutoff)%cudaLatticeSize[1])*cudaLatticeSize[0]
+((int)floor((cudaCoords[3*particleNumber+0]-cudaBoxSize[0])/maxCutoff)%cudaLatticeSize[0]);
/// apply voxel-changes ...
if(newVoxel!=oldVoxel){
bool leaveLoop = false;
/// delete form old list
while(!leaveLoop){
/// Lock
if(atomicExch(&(cudaSemaphore[oldVoxel]),1)==0){
int prev=cudaNeighborList[2*particleNumber];
int next=cudaNeighborList[2*particleNumber+1];
cudaNeighborList[2*prev+1]=next;
cudaNeighborList[2*next]=prev;
/// was this partilce begin of the linked list?
if(cudaNeighborListBegins[oldVoxel]==particleNumber){
/// was the particle the only one in this field?
if(cudaNeighborList[2*particleNumber]==particleNumber){
cudaNeighborListBegins[oldVoxel]=-1;
}
else{
cudaNeighborListBegins[oldVoxel]=cudaNeighborList[2*particleNumber+1];
}
}
leaveLoop=true;
/// unLock
atomicExch(&(cudaSemaphore[oldVoxel]),0);
}
}
leaveLoop = false;
/// push ontop of the new list
while(!leaveLoop){
/// Lock
if(atomicExch(&(cudaSemaphore[newVoxel]),1)==0){
/// is new list empty?
if(cudaNeighborListBegins[newVoxel]!=-1){/// no
cudaNeighborList[2*particleNumber]=cudaNeighborList[2*cudaNeighborListBegins[newVoxel]];
cudaNeighborList[2*particleNumber+1]=cudaNeighborListBegins[newVoxel];
cudaNeighborList[2*cudaNeighborList[2*cudaNeighborListBegins[newVoxel]]+1]=particleNumber;;
cudaNeighborList[2*cudaNeighborListBegins[newVoxel]]=particleNumber;
cudaNeighborListBegins[newVoxel]=particleNumber;
}
else{/// first one in new list
cudaNeighborList[2*particleNumber+1]=particleNumber;
cudaNeighborList[2*particleNumber]=particleNumber;
cudaNeighborListBegins[newVoxel]=particleNumber;
}
leaveLoop=true;
/// unLock
atomicExch(&(cudaSemaphore[newVoxel]),0);
}
}
}
globalRandStates[particleNumber] = localState;
}
return;
}
__global__ void setup_kernel ( curandState * state, unsigned long seed, int n ){
int id=blockIdx.x * blockDim.x + threadIdx.x;
if(id<n){
curand_init ( seed, id, 0, &state[id] );
}
}
/// pos force radii forceconst types todo links linkbegins
__device__ void lennardJones(){
return;
}
|
6979954b4640f08a07182d9bb8997b594bee890d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == RTVD )
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS );
#elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
__global__
void CUFLU_FluidSolver_MHM(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#elif ( FLU_SCHEME == CTU )
__global__
void CUFLU_FluidSolver_CTU(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#endif // FLU_SCHEME
#elif ( MODEL == ELBDM )
__global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ],
real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ],
const real dt, const real _dh, const real Eta, const bool StoreFlux,
const real Taylor3_Coeff, const bool XYZ, const real MinDens );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
#ifndef GRAVITY
static ExtAcc_t GPUExtAcc_Ptr = NULL;
#endif
// device pointers
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ];
extern double (*d_Corner_Array_F)[3];
#if ( MODEL == HYDRO )
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ];
#else
static char (*d_DE_Array_F_Out)[ CUBE(PS2) ] = NULL;
#endif
#ifdef MHD
extern real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ];
extern real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ];
#else
static real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ] = NULL;
static real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ] = NULL;
#endif
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_LR ][ CUBE(FLU_NXT) ];
extern real (*d_Slope_PPM)[3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ];
extern real (*d_FC_Var) [6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ];
extern real (*d_FC_Flux) [3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ];
#ifdef MHD
extern real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ];
#else
static real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ] = NULL;
#endif // MHD
#endif // FLU_SCHEME
#endif // #if ( MODEL == HYDRO )
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ];
#else
static real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ] = NULL;
#endif
extern hipStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_FluidSolver
// Description : 1. MODEL == HYDRO : use GPU to solve the Euler equations by different schemes
// --> invoke the kernel "CUFLU_FluidSolver_XXX"
// 2. MODEL == ELBDM : use GPU to solve the kinematic operator in the Schrodinger's equations
// --> invoke the kernel "CUFLU_ELBDMSolver"
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : 1. Use streams for the asychronous memory copy between device and host
// 2. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
// 3. Use the input pamameter "XYZ" to control the order of update for dimensional-splitting
// method (currently only RTVD)
// 4. Currently five hydro schemes are supported :
// 1. Relaxing TVD scheme (RTVD ) --> split
// 2. MUSCL-Hancock scheme (MHM ) --> unsplit
// 3. MUSCL-Hancock scheme with Riemann prediction (MHM_RP) --> unsplit
// 4. Corner-Transport-Upwind scheme (CTU ) --> unsplit
//
// Parameter : h_Flu_Array_In : Host array to store the input fluid variables
// h_Flu_Array_Out : Host array to store the output fluid variables
// h_Mag_Array_In : Host array storing the input B field (for MHD only)
// h_Mag_Array_Out : Host array to store the output B field (for MHD only)
// h_DE_Array_Out : Host array to store the dual-energy status
// h_Flux_Array : Host array to store the output fluxes
// h_Ele_Array : Host array to store the output electric field (for MHD only)
// h_Corner_Array : Host array storing the physical corner coordinates of each patch group
// h_Pot_Array_USG : Host array storing the input potential for UNSPLIT_GRAVITY
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dt : Time interval to advance solution
// dh : Cell size
// StoreFlux : true --> store the coarse-fine fluxes
// StoreElectric : true --> store the coarse-fine electric field
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// ~ useless in directionally unsplit schemes
// LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes
// (0/1/2/3/4) = (vanLeer/generalized MinMod/vanAlbada/
// vanLeer + generalized MinMod/extrema-preserving) limiter
// MinMod_Coeff : Coefficient of the generalized MinMod limiter
// ELBDM_Eta : Particle mass / Planck constant
// ELBDM_Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion for ELBDM
// ELBDM_Taylor3_Auto : true --> Determine ELBDM_Taylor3_Coeff automatically by invoking the
// function "ELBDM_SetTaylor3Coeff"
// Time : Current physical time (for UNSPLIT_GRAVITY only)
// UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only)
// ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only)
// MinDens/Pres/Eint : Density, pressure, and internal energy floors
// DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch
// NormPassive : true --> normalize passive scalars so that the sum of their mass density
// is equal to the gas mass density
// NNorm : Number of passive scalars to be normalized
// --> Should be set to the global variable "PassiveNorm_NVar"
// FracPassive : true --> convert passive scalars to mass fraction during data reconstruction
// NFrac : Number of passive scalars for the option "FracPassive"
// --> Should be set to the global variable "PassiveIntFrac_NVar"
// JeansMinPres : Apply minimum pressure estimated from the Jeans length
// JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi);
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_FluidSolver( real h_Flu_Array_In[][FLU_NIN ][ CUBE(FLU_NXT) ],
real h_Flu_Array_Out[][FLU_NOUT][ CUBE(PS2) ],
real h_Mag_Array_In[][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real h_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char h_DE_Array_Out[][ CUBE(PS2) ],
real h_Flux_Array[][9][NFLUX_TOTAL][ SQR(PS2) ],
real h_Ele_Array[][9][NCOMP_ELE][ PS2P1*PS2 ],
const double h_Corner_Array[][3],
real h_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
const int NPatchGroup, const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const bool XYZ, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const real ELBDM_Eta, real ELBDM_Taylor3_Coeff, const bool ELBDM_Taylor3_Auto,
const double Time, const bool UsePot, const OptExtAcc_t ExtAcc,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const int GPU_NStream )
{
// check
# ifdef GAMER_DEBUG
# if ( MODEL == HYDRO )
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
{
if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
if ( d_Pot_Array_USG_F == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_F == NULL !!\n" );
}
if ( ExtAcc )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_F == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_F == NULL !!\n" );
}
# endif
# elif ( MODEL == ELBDM )
# else
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
if ( StoreFlux )
{
if ( d_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "d_Flux_Array == NULL !!\n" );
if ( h_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "h_Flux_Array == NULL !!\n" );
}
# ifdef MHD
if ( h_Mag_Array_In == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_In == NULL !!\n" );
if ( d_Mag_Array_F_In == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_In == NULL !!\n" );
if ( h_Mag_Array_Out == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_Out == NULL !!\n" );
if ( d_Mag_Array_F_Out == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_Out == NULL !!\n" );
if ( d_FC_Mag_Half == NULL ) Aux_Error( ERROR_INFO, "d_FC_Mag_Half == NULL !!\n" );
if ( d_EC_Ele == NULL ) Aux_Error( ERROR_INFO, "d_EC_Ele == NULL !!\n" );
if ( StoreElectric )
{
if ( d_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "d_Ele_Array == NULL !!\n" );
if ( h_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "h_Ele_Array == NULL !!\n" );
}
# endif
# endif // #ifdef GAMER_DEBUG
const dim3 BlockDim_FluidSolver ( FLU_BLOCK_SIZE_X, FLU_BLOCK_SIZE_Y, 1 ); // for the fluidsolvers
// model-dependent operations
# if ( MODEL == HYDRO )
# elif ( MODEL == ELBDM )
// evaluate the optimized Taylor expansion coefficient
if ( ELBDM_Taylor3_Auto ) ELBDM_Taylor3_Coeff = ELBDM_SetTaylor3Coeff( dt, dh, ELBDM_Eta );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
int *NPatch_per_Stream = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
int *Flu_MemSize_In = new int [GPU_NStream];
int *Flu_MemSize_Out = new int [GPU_NStream];
int *Flux_MemSize = new int [GPU_NStream];
# ifdef MHD
int *Mag_MemSize_In = new int [GPU_NStream];
int *Mag_MemSize_Out = new int [GPU_NStream];
int *Ele_MemSize = new int [GPU_NStream];
# endif
# ifdef UNSPLIT_GRAVITY
int *USG_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
# endif
# ifdef DUAL_ENERGY
int *DE_MemSize_Out = new int [GPU_NStream];
# endif
// set the number of patches of each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatchGroup;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatchGroup / GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatchGroup - UsedPatch[GPU_NStream-1];
}
// set the size of data to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
Flu_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NIN *CUBE(FLU_NXT);
Flu_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NOUT*CUBE(PS2);
Flux_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NFLUX_TOTAL*9*PS2*PS2;
# ifdef MHD
Mag_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
Mag_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*PS2P1*SQR(PS2);
Ele_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_ELE*9*PS2P1*PS2;
# endif
# ifdef UNSPLIT_GRAVITY
USG_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*CUBE(USG_NXT_F);
Corner_MemSize [s] = sizeof(double)*NPatch_per_Stream[s]*3;
# endif
# ifdef DUAL_ENERGY
DE_MemSize_Out [s] = sizeof(char )*NPatch_per_Stream[s]*CUBE(PS2);
# endif
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_F_In + UsedPatch[s], h_Flu_Array_In + UsedPatch[s],
Flu_MemSize_In[s], hipMemcpyHostToDevice, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Mag_Array_F_In + UsedPatch[s], h_Mag_Array_In + UsedPatch[s],
Mag_MemSize_In[s], hipMemcpyHostToDevice, Stream[s] ) );
# endif
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_USG_F + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
USG_MemSize [s], hipMemcpyHostToDevice, Stream[s] ) );
if ( ExtAcc )
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Corner_Array_F + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == RTVD )
hipLaunchKernelGGL(( CUFLU_FluidSolver_RTVD) , dim3(NPatch_per_Stream[s]), dim3(BlockDim_FluidSolver), 0, Stream[s] ,
d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
dt, 1.0/dh, StoreFlux, XYZ, MinDens, MinPres, MinEint, EoS );
# elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
hipLaunchKernelGGL(( CUFLU_FluidSolver_MHM) , dim3(NPatch_per_Stream[s]), dim3(BlockDim_FluidSolver), 0, Stream[s] ,
d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# elif ( FLU_SCHEME == CTU )
hipLaunchKernelGGL(( CUFLU_FluidSolver_CTU) , dim3(NPatch_per_Stream[s]), dim3(BlockDim_FluidSolver), 0, Stream[s] ,
d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# else
# error : unsupported GPU hydro scheme
# endif // FLU_SCHEME
# elif ( MODEL == ELBDM )
hipLaunchKernelGGL(( CUFLU_ELBDMSolver) , dim3(NPatch_per_Stream[s]), dim3(BlockDim_FluidSolver), 0, Stream[s] ,
d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
dt, 1.0/dh, ELBDM_Eta, StoreFlux, ELBDM_Taylor3_Coeff, XYZ, MinDens );
# else
# error : unsupported MODEL !!
# endif // MODEL
CUDA_CHECK_ERROR( hipGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Flu_Array_Out + UsedPatch[s], d_Flu_Array_F_Out + UsedPatch[s],
Flu_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) );
if ( StoreFlux )
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Flux_Array + UsedPatch[s], d_Flux_Array + UsedPatch[s],
Flux_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Mag_Array_Out + UsedPatch[s], d_Mag_Array_F_Out + UsedPatch[s],
Mag_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) );
if ( StoreElectric )
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Ele_Array + UsedPatch[s], d_Ele_Array + UsedPatch[s],
Ele_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( hipMemcpyAsync( h_DE_Array_Out + UsedPatch[s], d_DE_Array_F_Out + UsedPatch[s],
DE_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] UsedPatch;
delete [] Flu_MemSize_In;
delete [] Flu_MemSize_Out;
delete [] Flux_MemSize;
# ifdef MHD
delete [] Mag_MemSize_In;
delete [] Mag_MemSize_Out;
delete [] Ele_MemSize;
# endif
# ifdef UNSPLIT_GRAVITY
delete [] USG_MemSize;
delete [] Corner_MemSize;
# endif
# ifdef DUAL_ENERGY
delete [] DE_MemSize_Out;
# endif
} // FUNCTION : CUAPI_Asyn_FluidSolver
#endif // #ifdef GPU
| 6979954b4640f08a07182d9bb8997b594bee890d.cu | #include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == RTVD )
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS );
#elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
__global__
void CUFLU_FluidSolver_MHM(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#elif ( FLU_SCHEME == CTU )
__global__
void CUFLU_FluidSolver_CTU(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#endif // FLU_SCHEME
#elif ( MODEL == ELBDM )
__global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ],
real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ],
const real dt, const real _dh, const real Eta, const bool StoreFlux,
const real Taylor3_Coeff, const bool XYZ, const real MinDens );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
#ifndef GRAVITY
static ExtAcc_t GPUExtAcc_Ptr = NULL;
#endif
// device pointers
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ];
extern double (*d_Corner_Array_F)[3];
#if ( MODEL == HYDRO )
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ];
#else
static char (*d_DE_Array_F_Out)[ CUBE(PS2) ] = NULL;
#endif
#ifdef MHD
extern real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ];
extern real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ];
#else
static real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ] = NULL;
static real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ] = NULL;
#endif
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_LR ][ CUBE(FLU_NXT) ];
extern real (*d_Slope_PPM)[3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ];
extern real (*d_FC_Var) [6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ];
extern real (*d_FC_Flux) [3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ];
#ifdef MHD
extern real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ];
#else
static real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ] = NULL;
#endif // MHD
#endif // FLU_SCHEME
#endif // #if ( MODEL == HYDRO )
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ];
#else
static real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ] = NULL;
#endif
extern cudaStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_FluidSolver
// Description : 1. MODEL == HYDRO : use GPU to solve the Euler equations by different schemes
// --> invoke the kernel "CUFLU_FluidSolver_XXX"
// 2. MODEL == ELBDM : use GPU to solve the kinematic operator in the Schrodinger's equations
// --> invoke the kernel "CUFLU_ELBDMSolver"
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : 1. Use streams for the asychronous memory copy between device and host
// 2. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
// 3. Use the input pamameter "XYZ" to control the order of update for dimensional-splitting
// method (currently only RTVD)
// 4. Currently five hydro schemes are supported :
// 1. Relaxing TVD scheme (RTVD ) --> split
// 2. MUSCL-Hancock scheme (MHM ) --> unsplit
// 3. MUSCL-Hancock scheme with Riemann prediction (MHM_RP) --> unsplit
// 4. Corner-Transport-Upwind scheme (CTU ) --> unsplit
//
// Parameter : h_Flu_Array_In : Host array to store the input fluid variables
// h_Flu_Array_Out : Host array to store the output fluid variables
// h_Mag_Array_In : Host array storing the input B field (for MHD only)
// h_Mag_Array_Out : Host array to store the output B field (for MHD only)
// h_DE_Array_Out : Host array to store the dual-energy status
// h_Flux_Array : Host array to store the output fluxes
// h_Ele_Array : Host array to store the output electric field (for MHD only)
// h_Corner_Array : Host array storing the physical corner coordinates of each patch group
// h_Pot_Array_USG : Host array storing the input potential for UNSPLIT_GRAVITY
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dt : Time interval to advance solution
// dh : Cell size
// StoreFlux : true --> store the coarse-fine fluxes
// StoreElectric : true --> store the coarse-fine electric field
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// ~ useless in directionally unsplit schemes
// LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes
// (0/1/2/3/4) = (vanLeer/generalized MinMod/vanAlbada/
// vanLeer + generalized MinMod/extrema-preserving) limiter
// MinMod_Coeff : Coefficient of the generalized MinMod limiter
// ELBDM_Eta : Particle mass / Planck constant
// ELBDM_Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion for ELBDM
// ELBDM_Taylor3_Auto : true --> Determine ELBDM_Taylor3_Coeff automatically by invoking the
// function "ELBDM_SetTaylor3Coeff"
// Time : Current physical time (for UNSPLIT_GRAVITY only)
// UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only)
// ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only)
// MinDens/Pres/Eint : Density, pressure, and internal energy floors
// DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch
// NormPassive : true --> normalize passive scalars so that the sum of their mass density
// is equal to the gas mass density
// NNorm : Number of passive scalars to be normalized
// --> Should be set to the global variable "PassiveNorm_NVar"
// FracPassive : true --> convert passive scalars to mass fraction during data reconstruction
// NFrac : Number of passive scalars for the option "FracPassive"
// --> Should be set to the global variable "PassiveIntFrac_NVar"
// JeansMinPres : Apply minimum pressure estimated from the Jeans length
// JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi);
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_FluidSolver( real h_Flu_Array_In[][FLU_NIN ][ CUBE(FLU_NXT) ],
real h_Flu_Array_Out[][FLU_NOUT][ CUBE(PS2) ],
real h_Mag_Array_In[][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real h_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char h_DE_Array_Out[][ CUBE(PS2) ],
real h_Flux_Array[][9][NFLUX_TOTAL][ SQR(PS2) ],
real h_Ele_Array[][9][NCOMP_ELE][ PS2P1*PS2 ],
const double h_Corner_Array[][3],
real h_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
const int NPatchGroup, const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const bool XYZ, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const real ELBDM_Eta, real ELBDM_Taylor3_Coeff, const bool ELBDM_Taylor3_Auto,
const double Time, const bool UsePot, const OptExtAcc_t ExtAcc,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const int GPU_NStream )
{
// check
# ifdef GAMER_DEBUG
# if ( MODEL == HYDRO )
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
{
if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
if ( d_Pot_Array_USG_F == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_F == NULL !!\n" );
}
if ( ExtAcc )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_F == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_F == NULL !!\n" );
}
# endif
# elif ( MODEL == ELBDM )
# else
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
if ( StoreFlux )
{
if ( d_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "d_Flux_Array == NULL !!\n" );
if ( h_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "h_Flux_Array == NULL !!\n" );
}
# ifdef MHD
if ( h_Mag_Array_In == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_In == NULL !!\n" );
if ( d_Mag_Array_F_In == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_In == NULL !!\n" );
if ( h_Mag_Array_Out == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_Out == NULL !!\n" );
if ( d_Mag_Array_F_Out == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_Out == NULL !!\n" );
if ( d_FC_Mag_Half == NULL ) Aux_Error( ERROR_INFO, "d_FC_Mag_Half == NULL !!\n" );
if ( d_EC_Ele == NULL ) Aux_Error( ERROR_INFO, "d_EC_Ele == NULL !!\n" );
if ( StoreElectric )
{
if ( d_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "d_Ele_Array == NULL !!\n" );
if ( h_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "h_Ele_Array == NULL !!\n" );
}
# endif
# endif // #ifdef GAMER_DEBUG
const dim3 BlockDim_FluidSolver ( FLU_BLOCK_SIZE_X, FLU_BLOCK_SIZE_Y, 1 ); // for the fluidsolvers
// model-dependent operations
# if ( MODEL == HYDRO )
# elif ( MODEL == ELBDM )
// evaluate the optimized Taylor expansion coefficient
if ( ELBDM_Taylor3_Auto ) ELBDM_Taylor3_Coeff = ELBDM_SetTaylor3Coeff( dt, dh, ELBDM_Eta );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
int *NPatch_per_Stream = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
int *Flu_MemSize_In = new int [GPU_NStream];
int *Flu_MemSize_Out = new int [GPU_NStream];
int *Flux_MemSize = new int [GPU_NStream];
# ifdef MHD
int *Mag_MemSize_In = new int [GPU_NStream];
int *Mag_MemSize_Out = new int [GPU_NStream];
int *Ele_MemSize = new int [GPU_NStream];
# endif
# ifdef UNSPLIT_GRAVITY
int *USG_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
# endif
# ifdef DUAL_ENERGY
int *DE_MemSize_Out = new int [GPU_NStream];
# endif
// set the number of patches of each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatchGroup;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatchGroup / GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatchGroup - UsedPatch[GPU_NStream-1];
}
// set the size of data to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
Flu_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NIN *CUBE(FLU_NXT);
Flu_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NOUT*CUBE(PS2);
Flux_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NFLUX_TOTAL*9*PS2*PS2;
# ifdef MHD
Mag_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
Mag_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*PS2P1*SQR(PS2);
Ele_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_ELE*9*PS2P1*PS2;
# endif
# ifdef UNSPLIT_GRAVITY
USG_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*CUBE(USG_NXT_F);
Corner_MemSize [s] = sizeof(double)*NPatch_per_Stream[s]*3;
# endif
# ifdef DUAL_ENERGY
DE_MemSize_Out [s] = sizeof(char )*NPatch_per_Stream[s]*CUBE(PS2);
# endif
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_F_In + UsedPatch[s], h_Flu_Array_In + UsedPatch[s],
Flu_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Mag_Array_F_In + UsedPatch[s], h_Mag_Array_In + UsedPatch[s],
Mag_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_F + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
USG_MemSize [s], cudaMemcpyHostToDevice, Stream[s] ) );
if ( ExtAcc )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_F + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == RTVD )
CUFLU_FluidSolver_RTVD <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
dt, 1.0/dh, StoreFlux, XYZ, MinDens, MinPres, MinEint, EoS );
# elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
CUFLU_FluidSolver_MHM <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# elif ( FLU_SCHEME == CTU )
CUFLU_FluidSolver_CTU <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# else
# error : unsupported GPU hydro scheme
# endif // FLU_SCHEME
# elif ( MODEL == ELBDM )
CUFLU_ELBDMSolver <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
dt, 1.0/dh, ELBDM_Eta, StoreFlux, ELBDM_Taylor3_Coeff, XYZ, MinDens );
# else
# error : unsupported MODEL !!
# endif // MODEL
CUDA_CHECK_ERROR( cudaGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array_Out + UsedPatch[s], d_Flu_Array_F_Out + UsedPatch[s],
Flu_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
if ( StoreFlux )
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flux_Array + UsedPatch[s], d_Flux_Array + UsedPatch[s],
Flux_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Mag_Array_Out + UsedPatch[s], d_Mag_Array_F_Out + UsedPatch[s],
Mag_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
if ( StoreElectric )
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Ele_Array + UsedPatch[s], d_Ele_Array + UsedPatch[s],
Ele_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array_Out + UsedPatch[s], d_DE_Array_F_Out + UsedPatch[s],
DE_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] UsedPatch;
delete [] Flu_MemSize_In;
delete [] Flu_MemSize_Out;
delete [] Flux_MemSize;
# ifdef MHD
delete [] Mag_MemSize_In;
delete [] Mag_MemSize_Out;
delete [] Ele_MemSize;
# endif
# ifdef UNSPLIT_GRAVITY
delete [] USG_MemSize;
delete [] Corner_MemSize;
# endif
# ifdef DUAL_ENERGY
delete [] DE_MemSize_Out;
# endif
} // FUNCTION : CUAPI_Asyn_FluidSolver
#endif // #ifdef GPU
|
7e319fc18d097ee97474bd5fe1b57a9ed34ccff6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeCost(const double *Params, const float *Ws, const float *mus, const float *W, const float *mu, const bool *iMatch, const int *iC, const int *Wh, float *cmax){
int j, tid, bid, Nspikes, my_chan, this_chan, Nchan, NrankPC, NchanNear, Nthreads, k;
float xsum = 0.0f, Ci;
Nspikes = (int) Params[0];
Nchan = (int) Params[7];
NrankPC = (int) Params[1];
NchanNear = (int) Params[6];
Nthreads = blockDim.x;
tid = threadIdx.x;
bid = blockIdx.x;
while(tid<Nspikes){
my_chan = Wh[tid];
if (iMatch[my_chan + bid*Nchan]){
xsum = 0.0f;
for (k=0;k<NchanNear;k++){
this_chan = iC[k + NchanNear * my_chan];
for (j=0;j<NrankPC;j++)
xsum += Ws[j + NrankPC*k + NrankPC*NchanNear * tid] *
W[j + NrankPC*this_chan + NrankPC*Nchan * bid];
}
Ci = mu[bid]*mu[bid] + mus[tid]*mus[tid] -2*mus[tid]*mu[bid]*xsum;
cmax[tid + bid*Nspikes] = Ci;
}
tid+= Nthreads;
}
} | 7e319fc18d097ee97474bd5fe1b57a9ed34ccff6.cu | #include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeCost(const double *Params, const float *Ws, const float *mus, const float *W, const float *mu, const bool *iMatch, const int *iC, const int *Wh, float *cmax){
int j, tid, bid, Nspikes, my_chan, this_chan, Nchan, NrankPC, NchanNear, Nthreads, k;
float xsum = 0.0f, Ci;
Nspikes = (int) Params[0];
Nchan = (int) Params[7];
NrankPC = (int) Params[1];
NchanNear = (int) Params[6];
Nthreads = blockDim.x;
tid = threadIdx.x;
bid = blockIdx.x;
while(tid<Nspikes){
my_chan = Wh[tid];
if (iMatch[my_chan + bid*Nchan]){
xsum = 0.0f;
for (k=0;k<NchanNear;k++){
this_chan = iC[k + NchanNear * my_chan];
for (j=0;j<NrankPC;j++)
xsum += Ws[j + NrankPC*k + NrankPC*NchanNear * tid] *
W[j + NrankPC*this_chan + NrankPC*Nchan * bid];
}
Ci = mu[bid]*mu[bid] + mus[tid]*mus[tid] -2*mus[tid]*mu[bid]*xsum;
cmax[tid + bid*Nspikes] = Ci;
}
tid+= Nthreads;
}
} |
809e7e49be2ba0ad6b7a0e5bcb8056788f7bf952.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <hip/hip_runtime.h>
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS];// cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
// KERNEL CODE
#include "kernel.hip"
// WRITE DATA FUNCTION
void write_data( char* filename,
int frameNo,
int frames_processed,
int endoPoints,
int* input_a,
int* input_b,
int epiPoints,
int* input_2a,
int* input_2b){
//================================================================================80
// VARIABLES
//================================================================================80
FILE* fid;
int i,j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if( fid == NULL ){
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++)
{
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n",j);
for(i=0; i<endoPoints; i++){
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++){
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n",j);
for(i=0; i<epiPoints; i++){
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++){
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
int main(int argc, char *argv []){
printf("WG size of kernel = %d \n", NUMBER_THREADS);
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
if(argc!=3){
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
hipMalloc((void **)&common_change.d_frame, common.frame_mem);
// CHECK INPUT ARGUMENTS
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
// HARDCODED INPUTS FROM MATLAB
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
hipMalloc((void **)&common.d_endoRow, common.endo_mem);
hipMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, hipMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
hipMalloc((void **)&common.d_endoCol, common.endo_mem);
hipMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, hipMemcpyHostToDevice);
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
hipMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
hipMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
hipMalloc((void **)&common.d_epiRow, common.epi_mem);
hipMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, hipMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
hipMalloc((void **)&common.d_epiCol, common.epi_mem);
hipMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, hipMemcpyHostToDevice);
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
hipMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
hipMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
// TEMPLATE SIZES
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
// common
hipMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
hipMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
// AREA AROUND POINT FROM FRAME
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
// CONVOLUTION
// common
common.conv_rows = common.in_rows + common.in2_rows - 1;// number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1;// number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
// CUMULATIVE SUM
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
// CUMULATIVE SUM 2
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
// FINAL
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
// TEMPLATE MASK CREATE
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
// POINT MASK INITIALIZE
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
// MASK CONVOLUTION
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols;// number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
// KERNEL
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
hipMemcpyToSymbol(d_common, &common, sizeof(params_common));
hipMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
hipMemcpy(common_change.d_frame, frame, common.frame_mem, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, );
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
hipMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
#ifdef OUTPUT
//==================================================50
// DUMP DATA TO FILE
//==================================================50
write_data( "result.txt",
common.no_frames,
frames_processed,
common.endoPoints,
common.tEndoRowLoc,
common.tEndoColLoc,
common.epiPoints,
common.tEpiRowLoc,
common.tEpiColLoc);
//==================================================50
// End
//==================================================50
#endif
// DEALLOCATION
//====================================================================================================
// COMMON
//====================================================================================================
// frame
hipFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
hipFree(common.d_endoRow);
hipFree(common.d_endoCol);
hipFree(common.d_tEndoRowLoc);
hipFree(common.d_tEndoColLoc);
hipFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
hipFree(common.d_epiRow);
hipFree(common.d_epiCol);
hipFree(common.d_tEpiRowLoc);
hipFree(common.d_tEpiColLoc);
hipFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
hipFree(unique[i].d_in2);
hipFree(unique[i].d_conv);
hipFree(unique[i].d_in2_pad_cumv);
hipFree(unique[i].d_in2_pad_cumv_sel);
hipFree(unique[i].d_in2_sub_cumh);
hipFree(unique[i].d_in2_sub_cumh_sel);
hipFree(unique[i].d_in2_sub2);
hipFree(unique[i].d_in2_sqr);
hipFree(unique[i].d_in2_sqr_sub2);
hipFree(unique[i].d_in_sqr);
hipFree(unique[i].d_tMask);
hipFree(unique[i].d_mask_conv);
}
}
| 809e7e49be2ba0ad6b7a0e5bcb8056788f7bf952.cu | #include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <cuda.h>
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS];// cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
// KERNEL CODE
#include "kernel.cu"
// WRITE DATA FUNCTION
void write_data( char* filename,
int frameNo,
int frames_processed,
int endoPoints,
int* input_a,
int* input_b,
int epiPoints,
int* input_2a,
int* input_2b){
//================================================================================80
// VARIABLES
//================================================================================80
FILE* fid;
int i,j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if( fid == NULL ){
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++)
{
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n",j);
for(i=0; i<endoPoints; i++){
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++){
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n",j);
for(i=0; i<epiPoints; i++){
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++){
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
int main(int argc, char *argv []){
printf("WG size of kernel = %d \n", NUMBER_THREADS);
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
if(argc!=3){
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
cudaMalloc((void **)&common_change.d_frame, common.frame_mem);
// CHECK INPUT ARGUMENTS
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
// HARDCODED INPUTS FROM MATLAB
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
cudaMalloc((void **)&common.d_endoRow, common.endo_mem);
cudaMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, cudaMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
cudaMalloc((void **)&common.d_endoCol, common.endo_mem);
cudaMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, cudaMemcpyHostToDevice);
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
cudaMalloc((void **)&common.d_epiRow, common.epi_mem);
cudaMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, cudaMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
cudaMalloc((void **)&common.d_epiCol, common.epi_mem);
cudaMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, cudaMemcpyHostToDevice);
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
cudaMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
// TEMPLATE SIZES
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
// common
cudaMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
cudaMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
// AREA AROUND POINT FROM FRAME
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
// CONVOLUTION
// common
common.conv_rows = common.in_rows + common.in2_rows - 1;// number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1;// number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
// CUMULATIVE SUM
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
// CUMULATIVE SUM 2
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
// FINAL
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
// TEMPLATE MASK CREATE
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
// POINT MASK INITIALIZE
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
// MASK CONVOLUTION
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols;// number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
// KERNEL
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
cudaMemcpyToSymbol(d_common, &common, sizeof(params_common));
cudaMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
cudaMemcpy(common_change.d_frame, frame, common.frame_mem, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
kernel<<<blocks, threads>>>();
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
cudaMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
#ifdef OUTPUT
//==================================================50
// DUMP DATA TO FILE
//==================================================50
write_data( "result.txt",
common.no_frames,
frames_processed,
common.endoPoints,
common.tEndoRowLoc,
common.tEndoColLoc,
common.epiPoints,
common.tEpiRowLoc,
common.tEpiColLoc);
//==================================================50
// End
//==================================================50
#endif
// DEALLOCATION
//====================================================================================================
// COMMON
//====================================================================================================
// frame
cudaFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
cudaFree(common.d_endoRow);
cudaFree(common.d_endoCol);
cudaFree(common.d_tEndoRowLoc);
cudaFree(common.d_tEndoColLoc);
cudaFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
cudaFree(common.d_epiRow);
cudaFree(common.d_epiCol);
cudaFree(common.d_tEpiRowLoc);
cudaFree(common.d_tEpiColLoc);
cudaFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
cudaFree(unique[i].d_in2);
cudaFree(unique[i].d_conv);
cudaFree(unique[i].d_in2_pad_cumv);
cudaFree(unique[i].d_in2_pad_cumv_sel);
cudaFree(unique[i].d_in2_sub_cumh);
cudaFree(unique[i].d_in2_sub_cumh_sel);
cudaFree(unique[i].d_in2_sub2);
cudaFree(unique[i].d_in2_sqr);
cudaFree(unique[i].d_in2_sqr_sub2);
cudaFree(unique[i].d_in_sqr);
cudaFree(unique[i].d_tMask);
cudaFree(unique[i].d_mask_conv);
}
}
|
451980d5faa4cf80d7ce8249a6e028ddfbb0d8d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdafx.h"
#include "PipeWater.h"
#include "shader.h"
#include "camera.h"
#include "Pipeline.h"
#include "utilities.h"
#include "Renderer.h"
#include "CAMesh.h" // voxel mesh
#include "CommonDevice.cuh" // helpers, cudaCheck
#include "cuda_gl_interop.h"// gl interoperability
#include <map> // texture print
template class PipeWater<100, 1, 100>;
template class PipeWater<500, 1, 500>;
template class PipeWater<2000, 1, 2000>;
// texture storing the depth information
surface<void, 2> surfRef;
/*################################################################
##################################################################
KERNEL CODE
##################################################################
################################################################*/
// prints how many time each unique element of texture appears in it
void printTex(int x, int y, GLuint texID)
{
int numElements = x * y;
float* data = new float[numElements];
glBindTexture(GL_TEXTURE_2D, texID);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_FLOAT, data);
glBindTexture(GL_TEXTURE_2D, 0);
std::map<float, int> unique;
for (int i = 0; i < numElements; i++)
{
unique[data[i]]++;
}
// print how many times f.first appears
for (auto f : unique)
std::cout << f.first << " :: " << f.second << '\n';
delete[] data;
}
// makes a splash
template<int X, int Y, int Z>
__global__ static void perturbGrid(SplashArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float h = 0;
surf2Dread(&h, surfRef, tp.x * sizeof(float), tp.y);
// DHM
// y = Ae^(-bt)
float t = glm::distance({ tp.x, tp.y }, args.pos);
h += args.A * glm::pow(glm::e<float>(), -args.b * t);
surf2Dwrite(h, surfRef, tp.x * sizeof(float), tp.y);
}
}
// throttles pipes that would cause an update to lower water depth below 0
// TODO: this function is broken and not super necessary
template<int X, int Y, int Z>
__global__ static void clampGridPipes(Pipe* hPGrid, Pipe* vPGrid, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float depth;
surf2Dread(&depth, surfRef, tp.x * sizeof(float), tp.y);
float sumflow = 0;
float flows[4];
flows[0] = hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow; // flow from left +
flows[1] = hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow; // flow to right -
flows[2] = vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow; // flow from below +
flows[3] = vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow; // flow to top -
sumflow += flows[0];
sumflow -= flows[1];
sumflow += flows[2];
sumflow -= flows[3];
float finalDepth = depth + (sumflow * -dt);
if (finalDepth < 0)
{
float scalar = depth / (sumflow * -dt);
if (fabs(scalar) > 1)
{
//printf("meme: %.3f\n", scalar);
//continue;
}
if (flows[0] < 0)
{
//printf("divisor: %.3f flow0: %.3f\n", divisor);
hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow = flows[0] * scalar;
}
if (flows[1] > 0)
{
hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow = flows[1] * scalar;
}
if (flows[2] < 0)
{
vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow = flows[2] * scalar;
}
if (flows[3] > 0)
{
vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow = flows[3] * scalar;
}
}
}
}
// update the height of each water cell based on its current height and the flow
// to and from neighboring pipes
template<int X, int Y, int Z>
__global__ static void updateGridWater(Pipe* hPGrid, Pipe* vPGrid, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float depth;// = grid[i].depth;
surf2Dread(&depth, surfRef, tp.x * sizeof(float), tp.y);
// d += -dt*(SUM(Q)/(dx)^2)
// add to depth flow of adjacent pipes
float sumflow = 0;
// LEFT TO RIGHT FLOW
// tp.y is Z POSITION (vec2 constraint)
// add flow from left INTO this cell
// (vec2->pipe)
sumflow += hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow; // flow from left
sumflow -= hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow; // flow to right
sumflow += vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow; // flow from below
sumflow -= vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow; // flow to top
surf2Dwrite(depth + (sumflow * -dt), surfRef, tp.x * sizeof(float), tp.y);
}
}
// traverse the pipes that deliver water horizontally and update them based
// on height of neighboring water cells to left and right
template<int X, int Y, int Z>
__global__ static void updateHPipes(Pipe* hPGrid, PipeUpdateArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = (X+1) * (Z);
for (int i = index; i < n; i += stride)
{
float flow = hPGrid[i].flow;
// PIPE GRID ACCESS (X + 1) (pipe->vec2)
glm::ivec2 pipePos = expand<X+1>(i);
swap(pipePos.x, pipePos.y); // confusion
if (pipePos.x == 0 || pipePos.x == X)
{
hPGrid[i].flow = 0;
continue;
}
/*
0 1 2 <-- PIPE INDEX
| 0 | 1 | <-- CELL INDEX
This is why we need to do pipePos - { 1, 0 } to get the left cell,
but not for the right cell.
*/
// (vec2->normal!) USE NORMAL GRID INDEX
float leftHeight;
float rightHeight;
surf2Dread(&leftHeight, surfRef, pipePos.y * sizeof(float), pipePos.x - 1);
surf2Dread(&rightHeight, surfRef, pipePos.y * sizeof(float), pipePos.x);
// A = cross section
// A = d (w/ line above) * dx # OPTIONAL
// d (w/ line above) = upwind depth # OPTIONAL
// dh = surface height difference
// dh_(x+.5,y) = h_(x+1,y) - h_(x,y)
// dt = optional scalar
// Q += A*(g/dx)*dh*dt
float A = 1;
float g = 9.8;
float dt = .125;
float dx = 1; // CONSTANT (length of pipe)
float dh = rightHeight - leftHeight; // diff left->right
// flow from left to right
//thPGrid[i].flow = flow + (A * (g / dx) * dh * dt);
hPGrid[i].flow = flow + (A * (args.g / args.dx) * dh) * args.dt;
}
}
// traverse the pipes that deliver water vertically and update them based
// on height of neighboring water cells to top and bottom
template<int X, int Y, int Z>
__global__ static void updateVPipes(Pipe* vPGrid, PipeUpdateArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = (X) * (Z+1);
for (int i = index; i < n; i += stride)
{
float flow = vPGrid[i].flow;
glm::ivec2 pipePos = expand<Z + 1>(i);
//swap(pipePos.x, pipePos.y); // confusion
if (pipePos.y == 0 || pipePos.y == Z)
{
vPGrid[i].flow = 0;
continue;
}
float downheight;
float upheight;
//surf2Dread(&downheight, surfRef, pipePos.x * sizeof(float), pipePos.y - 1);
//surf2Dread(&upheight, surfRef, pipePos.x * sizeof(float), pipePos.y);
surf2Dread(&downheight, surfRef, (pipePos.y -1) * sizeof(float), pipePos.x );
surf2Dread(&upheight, surfRef, pipePos.y * sizeof(float), pipePos.x);
float A = 1;
float g = 9.8;
float dt = .125;
float dx = 1;
float dh = upheight - downheight;
//tvPGrid[i].flow = flow + (A * (g / dx) * dh * dt);
vPGrid[i].flow = flow + (A * (args.g / args.dx) * dh) * args.dt;
}
}
/*################################################################
##################################################################
END KERNEL CODE
##################################################################
################################################################*/
template<int X, int Y, int Z>
PipeWater<X, Y, Z>::PipeWater()
{
cudaCheck(hipMallocManaged(&hPGrid, (X + 1) * (Z) * sizeof(Pipe)));
cudaCheck(hipMallocManaged(&vPGrid, (X) * (Z + 1) * sizeof(Pipe)));
cudaCheck(hipMallocManaged(&temphPGrid, (X + 1) * (Z) * sizeof(Pipe)));
cudaCheck(hipMallocManaged(&tempvPGrid, (X) * (Z + 1) * sizeof(Pipe)));
}
template<int X, int Y, int Z>
PipeWater<X, Y, Z>::~PipeWater()
{
//cudaCheck(hipGraphicsUnregisterResource(imageResource));
}
#define _USE_MATH_DEFINES
#include <math.h>
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Init()
{
initDepthTex();
// reset flow of
for (int i = 0; i < (X + 1) * Z; i++)
hPGrid[i].flow = 0;
for (int i = 0; i < (Z + 1) * X; i++)
vPGrid[i].flow = 0;
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Update()
{
cudaCheck(hipGraphicsMapResources(1, &imageResource, 0));
cudaCheck(hipGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(hipBindSurfaceToArray(surfRef, arr));
// update pipes' flow
hipLaunchKernelGGL(( updateHPipes<X, Y, Z>), dim3(hPNumBlocks), dim3(PBlockSize), 0, 0, hPGrid, args);
hipLaunchKernelGGL(( updateVPipes<X, Y, Z>), dim3(vPNumBlocks), dim3(PBlockSize), 0, 0, vPGrid, args);
hipDeviceSynchronize();
//clampGridPipes<X, Y, Z><<<numBlocks, blockSize>>>(hPGrid, vPGrid, args.dt);
//hipDeviceSynchronize();
// update water depth
hipLaunchKernelGGL(( updateGridWater<X, Y, Z>), dim3(numBlocks), dim3(blockSize), 0, 0, hPGrid, vPGrid, args.dt);
hipDeviceSynchronize();
cudaCheck(hipGraphicsUnmapResources(1, &imageResource, 0));
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Render()
{
//ShaderPtr sr = Shader::shaders["flatPhong"];
ShaderPtr sr = Shader::shaders["heightWater"];
sr->Use();
glm::mat4 model(1);
model = glm::translate(model, glm::vec3(150, 40, 80));
model = glm::scale(model, glm::vec3(.01, .01, .01));
sr->setMat4("u_proj", Renderer::GetPipeline()->GetCamera(0)->GetProj());
sr->setMat4("u_view", Renderer::GetPipeline()->GetCamera(0)->GetView());
sr->setMat4("u_model", model);
//sr->setVec3("u_color", { .2, .7, .9 });
sr->setVec3("u_viewpos", Renderer::GetPipeline()->GetCamera(0)->GetPos());
sr->setVec3("sun.ambient", { .1, .1, .1 });
sr->setVec3("sun.diffuse", { .8, .8, .8 });
sr->setVec3("sun.specular", { .8, .8, .8 });
sr->setVec3("sun.direction", { 0, -1, 0 });
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, HeightTex);
sr->setInt("heightTex", 0);
glDisable(GL_CULL_FACE);
pVao->Bind();
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, nullptr);
pVao->Unbind();
glEnable(GL_CULL_FACE);
{
ImGui::Begin("Piped Water Simulation");
ImGui::Text("Dimensions: X = %d, Z = %d", X, Z);
ImGui::Separator();
ImGui::Text("Changing settings may lead \n to explosive results");
ImGui::SliderFloat("dt", &args.dt, 0, 1, "%.2f s");
ImGui::SliderFloat("dx", &args.dx, 0, 5, "%.2f m");
ImGui::SliderFloat("g", &args.g, 0, 50, "%.2f m/s^2");
ImGui::Checkbox("Calculate Normals", &calcNormals);
if (ImGui::Button("Splash water"))
{
cudaCheck(hipGraphicsMapResources(1, &imageResource, 0));
cudaCheck(hipGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(hipBindSurfaceToArray(surfRef, arr));
hipLaunchKernelGGL(( perturbGrid<X, Y, Z>), dim3(numBlocks), dim3(blockSize), 0, 0, splash);
//for (int i = 0; i < 10; i++)
//{
// perturbGrid<X, Y, Z> << <numBlocks, blockSize >> > (splashLoc + glm::ivec2(i * 25, i * 25));
//}
cudaCheck(hipGraphicsUnmapResources(1, &imageResource, 0));
}
if (ImGui::Button("Random Splash"))
{
SplashArgs sp = splash;
sp.pos = { Utils::get_random(0, X), Utils::get_random(0, Z) };
cudaCheck(hipGraphicsMapResources(1, &imageResource, 0));
cudaCheck(hipGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(hipBindSurfaceToArray(surfRef, arr));
hipLaunchKernelGGL(( perturbGrid<X, Y, Z>), dim3(numBlocks), dim3(blockSize), 0, 0, sp);
cudaCheck(hipGraphicsUnmapResources(1, &imageResource, 0));
}
ImGui::Separator();
ImGui::Text("Splash Settings");
ImGui::InputFloat2("Location", &splash.pos[0]);
ImGui::InputFloat("Amplitude", &splash.A);
ImGui::InputFloat("Falloff", &splash.b);
//float sum = 0;
//for (int i = 0; i < X * Y * Z; i++)
// sum += this->Grid[i].depth;
//ImGui::Text("Sum of water: %.2f", sum);
//ImGui::Text("Avg height: %.2f", sum / (X * Y * Z));
ImGui::End();
}
//printTex(X, Z, HeightTex);
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::genMesh()
{
delete this->mesh_;
std::vector<Vertex> vertices;
std::vector<GLuint> indices;
auto skip = [](const WaterCell& elem)->bool
{
return elem.depth == 0;
};
auto height = [](const WaterCell& elem)->float
{
return elem.depth;
};
mesh_ = GenVoxelMesh(this->Grid, X, Y, Z, skip, height);
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::initDepthTex()
{
vertices2d.clear();
indices.clear();
//vertices2d = std::vector<glm::vec2>(X * Z * 2, glm::vec2(0)); // num cells * attributes (pos + normal)
vertices2d.reserve(X * Z * 2);
indices.reserve((X - 1) * (Z - 1) * 2 * 3); // num cells * num tris per cell * num verts per tri
for (int x = 0; x < X; x++)
{
for (int z = 0; z < Z; z++)
{
glm::dvec2 p(x, z);
glm::dvec2 P(X, Z);
vertices2d.push_back(p); // pos xz
//vertices2d.push_back({ float(x) / float(X), float(z) / float(Z) }); // texcoord
vertices2d.push_back(p / P); // texcoord
}
}
// init indices
for (int x = 0; x < X - 1; x++)
{
// for each cell
for (int z = 0; z < Z - 1; z++)
{
GLuint one = flatten<X>(glm::ivec2(x, z));
GLuint two = flatten<X>(glm::ivec2(x + 1, z));
GLuint three = flatten<X>(glm::ivec2(x + 1, z + 1));
GLuint four = flatten<X>(glm::ivec2(x, z + 1));
indices.push_back(one);
indices.push_back(two);
indices.push_back(three);
indices.push_back(one);
indices.push_back(three);
indices.push_back(four);
}
}
pVbo = new VBO(&vertices2d[0], vertices2d.size() * sizeof(glm::vec2), GL_DYNAMIC_DRAW);
VBOlayout layout;
layout.Push<float>(2); // pos xz
layout.Push<float>(2); // texCoord
pVao = new VAO();
pVao->AddBuffer(*pVbo, layout);
pIbo = new IBO(indices.data(), indices.size());
pVao->Unbind();
// Generate 2D texture with 1 float element
glGenTextures(1, &HeightTex);
glBindTexture(GL_TEXTURE_2D, HeightTex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, X, Z, 0, GL_RED, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
GLfloat height = 0;
glClearTexImage(HeightTex, 0, GL_RED, GL_FLOAT, &height);
auto err = hipGraphicsGLRegisterImage(&imageResource, HeightTex, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore);
if (err != hipSuccess)
std::cout << "Error registering CUDA image: " << err << std::endl;
} | 451980d5faa4cf80d7ce8249a6e028ddfbb0d8d9.cu | #include "stdafx.h"
#include "PipeWater.h"
#include "shader.h"
#include "camera.h"
#include "Pipeline.h"
#include "utilities.h"
#include "Renderer.h"
#include "CAMesh.h" // voxel mesh
#include "CommonDevice.cuh" // helpers, cudaCheck
#include "cuda_gl_interop.h"// gl interoperability
#include <map> // texture print
template class PipeWater<100, 1, 100>;
template class PipeWater<500, 1, 500>;
template class PipeWater<2000, 1, 2000>;
// texture storing the depth information
surface<void, 2> surfRef;
/*################################################################
##################################################################
KERNEL CODE
##################################################################
################################################################*/
// prints how many time each unique element of texture appears in it
void printTex(int x, int y, GLuint texID)
{
int numElements = x * y;
float* data = new float[numElements];
glBindTexture(GL_TEXTURE_2D, texID);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_FLOAT, data);
glBindTexture(GL_TEXTURE_2D, 0);
std::map<float, int> unique;
for (int i = 0; i < numElements; i++)
{
unique[data[i]]++;
}
// print how many times f.first appears
for (auto f : unique)
std::cout << f.first << " :: " << f.second << '\n';
delete[] data;
}
// makes a splash
template<int X, int Y, int Z>
__global__ static void perturbGrid(SplashArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float h = 0;
surf2Dread(&h, surfRef, tp.x * sizeof(float), tp.y);
// DHM
// y = Ae^(-bt)
float t = glm::distance({ tp.x, tp.y }, args.pos);
h += args.A * glm::pow(glm::e<float>(), -args.b * t);
surf2Dwrite(h, surfRef, tp.x * sizeof(float), tp.y);
}
}
// throttles pipes that would cause an update to lower water depth below 0
// TODO: this function is broken and not super necessary
template<int X, int Y, int Z>
__global__ static void clampGridPipes(Pipe* hPGrid, Pipe* vPGrid, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float depth;
surf2Dread(&depth, surfRef, tp.x * sizeof(float), tp.y);
float sumflow = 0;
float flows[4];
flows[0] = hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow; // flow from left +
flows[1] = hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow; // flow to right -
flows[2] = vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow; // flow from below +
flows[3] = vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow; // flow to top -
sumflow += flows[0];
sumflow -= flows[1];
sumflow += flows[2];
sumflow -= flows[3];
float finalDepth = depth + (sumflow * -dt);
if (finalDepth < 0)
{
float scalar = depth / (sumflow * -dt);
if (fabs(scalar) > 1)
{
//printf("meme: %.3f\n", scalar);
//continue;
}
if (flows[0] < 0)
{
//printf("divisor: %.3f flow0: %.3f\n", divisor);
hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow = flows[0] * scalar;
}
if (flows[1] > 0)
{
hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow = flows[1] * scalar;
}
if (flows[2] < 0)
{
vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow = flows[2] * scalar;
}
if (flows[3] > 0)
{
vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow = flows[3] * scalar;
}
}
}
}
// update the height of each water cell based on its current height and the flow
// to and from neighboring pipes
template<int X, int Y, int Z>
__global__ static void updateGridWater(Pipe* hPGrid, Pipe* vPGrid, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = X * Y * Z;
for (int i = index; i < n; i += stride)
{
glm::ivec2 tp = expand<X>(i);
float depth;// = grid[i].depth;
surf2Dread(&depth, surfRef, tp.x * sizeof(float), tp.y);
// d += -dt*(SUM(Q)/(dx)^2)
// add to depth flow of adjacent pipes
float sumflow = 0;
// LEFT TO RIGHT FLOW
// tp.y is Z POSITION (vec2 constraint)
// add flow from left INTO this cell
// (vec2->pipe)
sumflow += hPGrid[flatten<X + 1>({ tp.y, tp.x })].flow; // flow from left
sumflow -= hPGrid[flatten<X + 1>({ tp.y + 1, tp.x })].flow; // flow to right
sumflow += vPGrid[flatten<Z + 1>({ tp.x, tp.y })].flow; // flow from below
sumflow -= vPGrid[flatten<Z + 1>({ tp.x + 1, tp.y })].flow; // flow to top
surf2Dwrite(depth + (sumflow * -dt), surfRef, tp.x * sizeof(float), tp.y);
}
}
// traverse the pipes that deliver water horizontally and update them based
// on height of neighboring water cells to left and right
template<int X, int Y, int Z>
__global__ static void updateHPipes(Pipe* hPGrid, PipeUpdateArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = (X+1) * (Z);
for (int i = index; i < n; i += stride)
{
float flow = hPGrid[i].flow;
// PIPE GRID ACCESS (X + 1) (pipe->vec2)
glm::ivec2 pipePos = expand<X+1>(i);
swap(pipePos.x, pipePos.y); // confusion
if (pipePos.x == 0 || pipePos.x == X)
{
hPGrid[i].flow = 0;
continue;
}
/*
0 1 2 <-- PIPE INDEX
| 0 | 1 | <-- CELL INDEX
This is why we need to do pipePos - { 1, 0 } to get the left cell,
but not for the right cell.
*/
// (vec2->normal!) USE NORMAL GRID INDEX
float leftHeight;
float rightHeight;
surf2Dread(&leftHeight, surfRef, pipePos.y * sizeof(float), pipePos.x - 1);
surf2Dread(&rightHeight, surfRef, pipePos.y * sizeof(float), pipePos.x);
// A = cross section
// A = d (w/ line above) * dx # OPTIONAL
// d (w/ line above) = upwind depth # OPTIONAL
// dh = surface height difference
// dh_(x+.5,y) = h_(x+1,y) - h_(x,y)
// dt = optional scalar
// Q += A*(g/dx)*dh*dt
float A = 1;
float g = 9.8;
float dt = .125;
float dx = 1; // CONSTANT (length of pipe)
float dh = rightHeight - leftHeight; // diff left->right
// flow from left to right
//thPGrid[i].flow = flow + (A * (g / dx) * dh * dt);
hPGrid[i].flow = flow + (A * (args.g / args.dx) * dh) * args.dt;
}
}
// traverse the pipes that deliver water vertically and update them based
// on height of neighboring water cells to top and bottom
template<int X, int Y, int Z>
__global__ static void updateVPipes(Pipe* vPGrid, PipeUpdateArgs args)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int n = (X) * (Z+1);
for (int i = index; i < n; i += stride)
{
float flow = vPGrid[i].flow;
glm::ivec2 pipePos = expand<Z + 1>(i);
//swap(pipePos.x, pipePos.y); // confusion
if (pipePos.y == 0 || pipePos.y == Z)
{
vPGrid[i].flow = 0;
continue;
}
float downheight;
float upheight;
//surf2Dread(&downheight, surfRef, pipePos.x * sizeof(float), pipePos.y - 1);
//surf2Dread(&upheight, surfRef, pipePos.x * sizeof(float), pipePos.y);
surf2Dread(&downheight, surfRef, (pipePos.y -1) * sizeof(float), pipePos.x );
surf2Dread(&upheight, surfRef, pipePos.y * sizeof(float), pipePos.x);
float A = 1;
float g = 9.8;
float dt = .125;
float dx = 1;
float dh = upheight - downheight;
//tvPGrid[i].flow = flow + (A * (g / dx) * dh * dt);
vPGrid[i].flow = flow + (A * (args.g / args.dx) * dh) * args.dt;
}
}
/*################################################################
##################################################################
END KERNEL CODE
##################################################################
################################################################*/
template<int X, int Y, int Z>
PipeWater<X, Y, Z>::PipeWater()
{
cudaCheck(cudaMallocManaged(&hPGrid, (X + 1) * (Z) * sizeof(Pipe)));
cudaCheck(cudaMallocManaged(&vPGrid, (X) * (Z + 1) * sizeof(Pipe)));
cudaCheck(cudaMallocManaged(&temphPGrid, (X + 1) * (Z) * sizeof(Pipe)));
cudaCheck(cudaMallocManaged(&tempvPGrid, (X) * (Z + 1) * sizeof(Pipe)));
}
template<int X, int Y, int Z>
PipeWater<X, Y, Z>::~PipeWater()
{
//cudaCheck(cudaGraphicsUnregisterResource(imageResource));
}
#define _USE_MATH_DEFINES
#include <math.h>
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Init()
{
initDepthTex();
// reset flow of
for (int i = 0; i < (X + 1) * Z; i++)
hPGrid[i].flow = 0;
for (int i = 0; i < (Z + 1) * X; i++)
vPGrid[i].flow = 0;
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Update()
{
cudaCheck(cudaGraphicsMapResources(1, &imageResource, 0));
cudaCheck(cudaGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(cudaBindSurfaceToArray(surfRef, arr));
// update pipes' flow
updateHPipes<X, Y, Z><<<hPNumBlocks, PBlockSize>>>(hPGrid, args);
updateVPipes<X, Y, Z><<<vPNumBlocks, PBlockSize>>>(vPGrid, args);
cudaDeviceSynchronize();
//clampGridPipes<X, Y, Z><<<numBlocks, blockSize>>>(hPGrid, vPGrid, args.dt);
//cudaDeviceSynchronize();
// update water depth
updateGridWater<X, Y, Z><<<numBlocks, blockSize>>>(hPGrid, vPGrid, args.dt);
cudaDeviceSynchronize();
cudaCheck(cudaGraphicsUnmapResources(1, &imageResource, 0));
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::Render()
{
//ShaderPtr sr = Shader::shaders["flatPhong"];
ShaderPtr sr = Shader::shaders["heightWater"];
sr->Use();
glm::mat4 model(1);
model = glm::translate(model, glm::vec3(150, 40, 80));
model = glm::scale(model, glm::vec3(.01, .01, .01));
sr->setMat4("u_proj", Renderer::GetPipeline()->GetCamera(0)->GetProj());
sr->setMat4("u_view", Renderer::GetPipeline()->GetCamera(0)->GetView());
sr->setMat4("u_model", model);
//sr->setVec3("u_color", { .2, .7, .9 });
sr->setVec3("u_viewpos", Renderer::GetPipeline()->GetCamera(0)->GetPos());
sr->setVec3("sun.ambient", { .1, .1, .1 });
sr->setVec3("sun.diffuse", { .8, .8, .8 });
sr->setVec3("sun.specular", { .8, .8, .8 });
sr->setVec3("sun.direction", { 0, -1, 0 });
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, HeightTex);
sr->setInt("heightTex", 0);
glDisable(GL_CULL_FACE);
pVao->Bind();
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, nullptr);
pVao->Unbind();
glEnable(GL_CULL_FACE);
{
ImGui::Begin("Piped Water Simulation");
ImGui::Text("Dimensions: X = %d, Z = %d", X, Z);
ImGui::Separator();
ImGui::Text("Changing settings may lead \n to explosive results");
ImGui::SliderFloat("dt", &args.dt, 0, 1, "%.2f s");
ImGui::SliderFloat("dx", &args.dx, 0, 5, "%.2f m");
ImGui::SliderFloat("g", &args.g, 0, 50, "%.2f m/s^2");
ImGui::Checkbox("Calculate Normals", &calcNormals);
if (ImGui::Button("Splash water"))
{
cudaCheck(cudaGraphicsMapResources(1, &imageResource, 0));
cudaCheck(cudaGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(cudaBindSurfaceToArray(surfRef, arr));
perturbGrid<X, Y, Z><<<numBlocks, blockSize>>>(splash);
//for (int i = 0; i < 10; i++)
//{
// perturbGrid<X, Y, Z> << <numBlocks, blockSize >> > (splashLoc + glm::ivec2(i * 25, i * 25));
//}
cudaCheck(cudaGraphicsUnmapResources(1, &imageResource, 0));
}
if (ImGui::Button("Random Splash"))
{
SplashArgs sp = splash;
sp.pos = { Utils::get_random(0, X), Utils::get_random(0, Z) };
cudaCheck(cudaGraphicsMapResources(1, &imageResource, 0));
cudaCheck(cudaGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0));
cudaCheck(cudaBindSurfaceToArray(surfRef, arr));
perturbGrid<X, Y, Z><<<numBlocks, blockSize>>>(sp);
cudaCheck(cudaGraphicsUnmapResources(1, &imageResource, 0));
}
ImGui::Separator();
ImGui::Text("Splash Settings");
ImGui::InputFloat2("Location", &splash.pos[0]);
ImGui::InputFloat("Amplitude", &splash.A);
ImGui::InputFloat("Falloff", &splash.b);
//float sum = 0;
//for (int i = 0; i < X * Y * Z; i++)
// sum += this->Grid[i].depth;
//ImGui::Text("Sum of water: %.2f", sum);
//ImGui::Text("Avg height: %.2f", sum / (X * Y * Z));
ImGui::End();
}
//printTex(X, Z, HeightTex);
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::genMesh()
{
delete this->mesh_;
std::vector<Vertex> vertices;
std::vector<GLuint> indices;
auto skip = [](const WaterCell& elem)->bool
{
return elem.depth == 0;
};
auto height = [](const WaterCell& elem)->float
{
return elem.depth;
};
mesh_ = GenVoxelMesh(this->Grid, X, Y, Z, skip, height);
}
template<int X, int Y, int Z>
void PipeWater<X, Y, Z>::initDepthTex()
{
vertices2d.clear();
indices.clear();
//vertices2d = std::vector<glm::vec2>(X * Z * 2, glm::vec2(0)); // num cells * attributes (pos + normal)
vertices2d.reserve(X * Z * 2);
indices.reserve((X - 1) * (Z - 1) * 2 * 3); // num cells * num tris per cell * num verts per tri
for (int x = 0; x < X; x++)
{
for (int z = 0; z < Z; z++)
{
glm::dvec2 p(x, z);
glm::dvec2 P(X, Z);
vertices2d.push_back(p); // pos xz
//vertices2d.push_back({ float(x) / float(X), float(z) / float(Z) }); // texcoord
vertices2d.push_back(p / P); // texcoord
}
}
// init indices
for (int x = 0; x < X - 1; x++)
{
// for each cell
for (int z = 0; z < Z - 1; z++)
{
GLuint one = flatten<X>(glm::ivec2(x, z));
GLuint two = flatten<X>(glm::ivec2(x + 1, z));
GLuint three = flatten<X>(glm::ivec2(x + 1, z + 1));
GLuint four = flatten<X>(glm::ivec2(x, z + 1));
indices.push_back(one);
indices.push_back(two);
indices.push_back(three);
indices.push_back(one);
indices.push_back(three);
indices.push_back(four);
}
}
pVbo = new VBO(&vertices2d[0], vertices2d.size() * sizeof(glm::vec2), GL_DYNAMIC_DRAW);
VBOlayout layout;
layout.Push<float>(2); // pos xz
layout.Push<float>(2); // texCoord
pVao = new VAO();
pVao->AddBuffer(*pVbo, layout);
pIbo = new IBO(indices.data(), indices.size());
pVao->Unbind();
// Generate 2D texture with 1 float element
glGenTextures(1, &HeightTex);
glBindTexture(GL_TEXTURE_2D, HeightTex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, X, Z, 0, GL_RED, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
GLfloat height = 0;
glClearTexImage(HeightTex, 0, GL_RED, GL_FLOAT, &height);
auto err = cudaGraphicsGLRegisterImage(&imageResource, HeightTex, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore);
if (err != cudaSuccess)
std::cout << "Error registering CUDA image: " << err << std::endl;
} |
24a2cf649cc1e53f87c2a36c9a755d7d2faf40ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
hipMalloc((void**)&d_time,sizeof(double)*blocks*threads);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
hipLaunchKernelGGL(( shared_model_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its);
hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
hipFree(d_time);
hipFree(d_out);
hipFree(d_in1);
hipFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} | 24a2cf649cc1e53f87c2a36c9a755d7d2faf40ae.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
shared_model_1<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its);
cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
cudaFree(d_time);
cudaFree(d_out);
cudaFree(d_in1);
cudaFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} |
bf2b94e250d0d8e0d22ac25d9a79a6d4c5f934a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//__global__ --> GPU function which can be launched by many blocks and threads
//__device__ --> GPU function or variables
//__host__ --> CPU function or variables
__device__ char* CudaCrypt(char* rawPassword){
char * newPassword = (char *) malloc(sizeof(char) * 11);
newPassword[0] = rawPassword[0] + 2;
newPassword[1] = rawPassword[0] - 2;
newPassword[2] = rawPassword[0] + 1;
newPassword[4] = rawPassword[1] - 3;
newPassword[5] = rawPassword[1] - 1;
newPassword[6] = rawPassword[2] + 2;
newPassword[7] = rawPassword[2] - 2;
newPassword[8] = rawPassword[3] + 4;
newPassword[9] = rawPassword[3] - 4;
newPassword[10] = '\0';
for(int i =0; i<10; i++){
if(i >= 0 && i < 6){ //checking all lower case letter limits
if(newPassword[i] > 122){
newPassword[i] = (newPassword[i] - 122) + 97;
}else if(newPassword[i] < 97){
newPassword[i] = (97 - newPassword[i]) + 97;
}
}else{ //checking number section
if(newPassword[i] > 57){
newPassword[i] = (newPassword[i] - 57) + 48;
}else if(newPassword[i] < 48){
newPassword[i] = (48 - newPassword[i]) + 48;
}
}
}
return newPassword;
}
__global__ void crack(char * alphabet, char * numbers){
char genRawPass[4];
genRawPass[0] = alphabet[blockIdx.x];
genRawPass[1] = alphabet[blockIdx.y];
genRawPass[2] = numbers[threadIdx.x];
genRawPass[3] = numbers[threadIdx.y];
//firstLetter - 'a' - 'z' (26 characters)
//secondLetter - 'a' - 'z' (26 characters)
//firstNum - '0' - '9' (10 characters)
//secondNum - '0' - '9' (10 characters)
//Idx --> gives current index of the block or thread
printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], CudaCrypt(genRawPass));
}
int main(int argc, char ** argv){
char cpuAlphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'};
char * gpuAlphabet;
hipMalloc( (void**) &gpuAlphabet, sizeof(char) * 26);
hipMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, hipMemcpyHostToDevice);
char * gpuNumbers;
hipMalloc( (void**) &gpuNumbers, sizeof(char) * 26);
hipMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, hipMemcpyHostToDevice);
hipLaunchKernelGGL((
crack), dim3(dim3(26,26,1)), dim3(dim3(10,10,1)) , 0, 0, gpuAlphabet, gpuNumbers );
hipDeviceSynchronize();
return 0;
}
| bf2b94e250d0d8e0d22ac25d9a79a6d4c5f934a2.cu | #include <stdio.h>
#include <stdlib.h>
//__global__ --> GPU function which can be launched by many blocks and threads
//__device__ --> GPU function or variables
//__host__ --> CPU function or variables
__device__ char* CudaCrypt(char* rawPassword){
char * newPassword = (char *) malloc(sizeof(char) * 11);
newPassword[0] = rawPassword[0] + 2;
newPassword[1] = rawPassword[0] - 2;
newPassword[2] = rawPassword[0] + 1;
newPassword[4] = rawPassword[1] - 3;
newPassword[5] = rawPassword[1] - 1;
newPassword[6] = rawPassword[2] + 2;
newPassword[7] = rawPassword[2] - 2;
newPassword[8] = rawPassword[3] + 4;
newPassword[9] = rawPassword[3] - 4;
newPassword[10] = '\0';
for(int i =0; i<10; i++){
if(i >= 0 && i < 6){ //checking all lower case letter limits
if(newPassword[i] > 122){
newPassword[i] = (newPassword[i] - 122) + 97;
}else if(newPassword[i] < 97){
newPassword[i] = (97 - newPassword[i]) + 97;
}
}else{ //checking number section
if(newPassword[i] > 57){
newPassword[i] = (newPassword[i] - 57) + 48;
}else if(newPassword[i] < 48){
newPassword[i] = (48 - newPassword[i]) + 48;
}
}
}
return newPassword;
}
__global__ void crack(char * alphabet, char * numbers){
char genRawPass[4];
genRawPass[0] = alphabet[blockIdx.x];
genRawPass[1] = alphabet[blockIdx.y];
genRawPass[2] = numbers[threadIdx.x];
genRawPass[3] = numbers[threadIdx.y];
//firstLetter - 'a' - 'z' (26 characters)
//secondLetter - 'a' - 'z' (26 characters)
//firstNum - '0' - '9' (10 characters)
//secondNum - '0' - '9' (10 characters)
//Idx --> gives current index of the block or thread
printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], CudaCrypt(genRawPass));
}
int main(int argc, char ** argv){
char cpuAlphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'};
char * gpuAlphabet;
cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26);
cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice);
char * gpuNumbers;
cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 26);
cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, cudaMemcpyHostToDevice);
crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers );
cudaThreadSynchronize();
return 0;
}
|
a164e35d1d94f2d4d773522b3dedc9040e303d08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdbool.h>
#define PACKED
#define SEARCH_ALL_THE_BEST
#define COLLECT_LOG
#undef USE_PRECOMPUTED_HDIFF
#define BLOCK_DIM (32)
#define N_INIT_DISTRIBUTION (BLOCK_DIM * 64)
#define MAX_GPU_PLAN (24)
#define MAX_BUF_RATIO (256)
#define STATE_WIDTH 4
#define STATE_N (STATE_WIDTH * STATE_WIDTH)
typedef unsigned char uchar;
typedef signed char Direction;
#define dir_reverse(dir) ((Direction)(3 - (dir)))
#define DIR_N 4
#define DIR_FIRST 0
#define DIR_UP 0
#define DIR_RIGHT 1
#define DIR_LEFT 2
#define DIR_DOWN 3
#define POS_X(pos) ((pos) &3)
#define POS_Y(pos) ((pos) >> 2)
typedef struct search_stat_tag
{
bool solved;
int len;
unsigned long long int loads;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded;
#endif
//bool assert_failed;
} search_stat;
typedef struct input_tag
{
uchar tiles[STATE_N];
int init_depth;
Direction parent_dir;
} Input;
/* state implementation */
/*
* goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
#ifdef USE_PRECOMPUTED_HDIFF
__device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N];
#endif
typedef struct state_tag
{
#ifndef PACKED
uchar tile[STATE_N];
#else
unsigned long long tile;
#endif
uchar empty;
uchar depth;
Direction parent_dir;
uchar h_value; /* ub of h_value is STATE_WIDTH*2*(STATE_N-1), e.g. 90 */
} d_State;
#ifndef PACKED
#define state_tile_get(i) (state->tile[i])
#define state_tile_set(i, v) (state->tile[i] = (v))
#else
#define STATE_TILE_BITS 4
#define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1)
#define state_tile_ofs(i) (i << 2)
#define state_tile_get(i) \
((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \
state_tile_ofs(i))
#define state_tile_set(i, val) \
do \
{ \
state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \
state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \
} while (0)
#endif
#define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i))
__device__ static void
state_init(d_State *state, Input *input)
{
state->depth = input->init_depth;
state->parent_dir = input->parent_dir;
for (int i = 0; i < STATE_N; ++i)
{
if (input->tiles[i] == 0)
state->empty = i;
state_tile_set(i, input->tiles[i]);
}
state->h_value = 0;
for (int i = 0; i < STATE_N; ++i)
{
uchar tile = state_tile_get(i);
if (tile == 0)
continue;
state->h_value += distance(POS_X(i), POS_X(tile));
state->h_value += distance(POS_Y(i), POS_Y(tile));
}
}
__device__ static inline bool
state_is_goal(d_State state)
{
return state.h_value == 0;
}
__device__ static inline int
state_get_f(d_State state)
{
return state.depth + state.h_value;
}
__device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N];
__device__ static inline bool
state_movable(d_State state, Direction dir)
{
return movable_table_shared[state.empty][dir];
}
__device__ __constant__ const static int pos_diff_table[DIR_N] = {
-STATE_WIDTH, 1, -1, +STATE_WIDTH};
__device__ static inline int
calc_h_diff(int opponent, int from, int rev_dir)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
int from_x = POS_X(from), from_y = POS_Y(from);
if (rev_dir == DIR_LEFT)
return goal_x > from_x ? -1 : 1;
else if (rev_dir == DIR_RIGHT)
return goal_x < from_x ? -1 : 1;
else if (rev_dir == DIR_UP)
return goal_y > from_y ? -1 : 1;
else
return goal_y < from_y ? -1 : 1;
}
__device__ static inline void
state_move(d_State *state, Direction dir)
{
int new_empty = state->empty + pos_diff_table[dir];
int opponent = state_tile_get(new_empty);
#ifdef USE_PRECOMPUTED_HDIFF
state->h_value += h_diff_table_shared[opponent][new_empty][dir];
#else
state->h_value += calc_h_diff(opponent, new_empty, dir);
#endif
state_tile_set(state->empty, opponent);
state->empty = new_empty;
state->parent_dir = dir;
++state->depth;
}
/* stack implementation */
#define STACK_BUF_LEN (MAX_GPU_PLAN * (BLOCK_DIM/DIR_N))
typedef struct div_stack_tag
{
unsigned int n;
d_State buf[STACK_BUF_LEN];
} d_Stack;
__device__ static inline bool
stack_is_empty(d_Stack *stack)
{
bool ret = (stack->n == 0);
__syncthreads();
return ret;
}
__device__ static inline void
stack_put(d_Stack *stack, d_State *state, bool put)
{
if (put)
{
unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */
stack->buf[i] = *state;
}
__syncthreads();
}
__device__ static inline bool
stack_pop(d_Stack *stack, d_State *state)
{
int tid = threadIdx.x;
int i = (int) stack->n - 1 - (int) (tid >> 2);
if (i >= 0)
*state = stack->buf[i];
__syncthreads();
if (tid == 0)
stack->n = stack->n >= BLOCK_DIM / DIR_N ?
stack->n - BLOCK_DIM / DIR_N : 0;
__syncthreads();
return i >= 0;
}
//__device__ __shared__ Direction candidate_dir_table[4][3] = {}
/*
* solver implementation
*/
__device__ static void
idas_internal(d_Stack *stack, int f_limit, search_stat *stat)
{
d_State state;
unsigned long long int loop_cnt = 0;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded = 0;
#endif
if (threadIdx.x == 0)
stat->solved = false;
for (;;)
{
if (stack_is_empty(stack))
{
stat->loads = loop_cnt;
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
break;
}
++loop_cnt;
bool found = stack_pop(stack, &state),
put = false;
if (found)
{
Direction dir = threadIdx.x & 3;
#ifdef COLLECT_LOG
nodes_expanded++;
#endif
/* NOTE: candidate_dir_table may be effective to avoid divergence */
if (state.parent_dir == dir_reverse(dir))
continue;
if (state_movable(state, dir))
{
state_move(&state, dir);
if (state_get_f(state) <= f_limit)
{
if (state_is_goal(state))
{
#ifndef SEARCH_ALL_THE_BEST
asm("trap;");
#else
stat->loads = loop_cnt;
stat->len = state.depth;
stat->solved = true;
#endif
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
}
else
put = true;
}
}
}
stack_put(stack, &state, put);
}
}
/* XXX: movable table is effective in this case? */
__global__ void
idas_kernel(Input *input, search_stat *stat, int f_limit,
signed char *h_diff_table, bool *movable_table)
{
__shared__ d_Stack stack;
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid == 0)
stat[bid].loads = 0;
d_State state;
state_init(&state, &input[bid]);
if (state_get_f(state) > f_limit)
return;
if (tid == 0)
{
stack.buf[0] = state;
stack.n = 1;
}
for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x)
if (i < STATE_N * DIR_N)
movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i];
#ifdef USE_PRECOMPUTED_HDIFF
for (int dir = 0; dir < DIR_N; ++dir)
for (int j = tid; j < STATE_N * STATE_N; j += blockDim.x)
if (j < STATE_N * STATE_N)
h_diff_table_shared[j / STATE_N][j % STATE_N][dir] =
h_diff_table[j * DIR_N + dir];
#endif
__syncthreads();
idas_internal(&stack, f_limit, &stat[bid]);
}
/* host library implementation */
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef UNABLE_LOG
#define elog(...) fprintf(stderr, __VA_ARGS__)
#else
#define elog(...) ;
#endif
void *
palloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
elog("malloc failed\n");
return ptr;
}
void *
repalloc(void *old_ptr, size_t new_size)
{
void *ptr = realloc(old_ptr, new_size);
if (!ptr)
elog("realloc failed\n");
return ptr;
}
void
pfree(void *ptr)
{
if (!ptr)
elog("empty ptr\n");
free(ptr);
}
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char idx_t;
/*
* [0,0] [1,0] [2,0] [3,0]
* [0,1] [1,1] [2,1] [3,1]
* [0,2] [1,2] [2,2] [3,2]
* [0,3] [1,3] [2,3] [3,3]
*/
/*
* goal state is
* [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
typedef struct state_tag_cpu
{
int depth; /* XXX: needed? */
uchar pos[STATE_WIDTH][STATE_WIDTH];
idx_t i, j; /* pos of empty */
Direction parent_dir;
int h_value;
} * State;
#define v(state, i, j) ((state)->pos[i][j])
#define ev(state) (v(state, state->i, state->j))
#define lv(state) (v(state, state->i - 1, state->j))
#define dv(state) (v(state, state->i, state->j + 1))
#define rv(state) (v(state, state->i + 1, state->j))
#define uv(state) (v(state, state->i, state->j - 1))
static uchar from_x[STATE_WIDTH * STATE_WIDTH],
from_y[STATE_WIDTH * STATE_WIDTH];
static inline void
fill_from_xy(State from)
{
for (idx_t x = 0; x < STATE_WIDTH; ++x)
for (idx_t y = 0; y < STATE_WIDTH; ++y)
{
from_x[v(from, x, y)] = x;
from_y[v(from, x, y)] = y;
}
}
static inline int
heuristic_manhattan_distance(State from)
{
int h_value = 0;
fill_from_xy(from);
for (idx_t i = 1; i < STATE_N; ++i)
{
h_value += distance(from_x[i], i & 3);
h_value += distance(from_y[i], i >> 2);
}
return h_value;
}
bool
state_is_goal(State state)
{
return state->h_value == 0;
}
static inline State
state_alloc(void)
{
return (State) palloc(sizeof(struct state_tag_cpu));
}
static inline void
state_free(State state)
{
pfree(state);
}
State
state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth)
{
State state = state_alloc();
int cnt = 0;
state->depth = init_depth;
state->parent_dir = (Direction) -1;
for (idx_t j = 0; j < STATE_WIDTH; ++j)
for (idx_t i = 0; i < STATE_WIDTH; ++i)
{
if (v_list[cnt] == 0)
{
state->i = i;
state->j = j;
}
v(state, i, j) = v_list[cnt++];
}
state->h_value = heuristic_manhattan_distance(state);
return state;
}
void
state_fini(State state)
{
state_free(state);
}
State
state_copy(State src)
{
State dst = state_alloc();
memcpy(dst, src, sizeof(*src));
return dst;
}
static inline bool
state_left_movable(State state)
{
return state->i != 0;
}
static inline bool
state_down_movable(State state)
{
return state->j != STATE_WIDTH - 1;
}
static inline bool
state_right_movable(State state)
{
return state->i != STATE_WIDTH - 1;
}
static inline bool
state_up_movable(State state)
{
return state->j != 0;
}
bool
state_movable(State state, Direction dir)
{
return (dir != DIR_LEFT || state_left_movable(state)) &&
(dir != DIR_DOWN || state_down_movable(state)) &&
(dir != DIR_RIGHT || state_right_movable(state)) &&
(dir != DIR_UP || state_up_movable(state));
}
#define h_diff(who, from_i, from_j, dir) \
(h_diff_table[((who) << 6) + ((from_j) << 4) + ((from_i) << 2) + (dir)])
static int h_diff_table[STATE_N * STATE_N * DIR_N] = {
1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1,
1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1,
1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1,
1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1,
1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1,
1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1,
1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1,
1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1,
1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1};
void
state_move(State state, Direction dir)
{
idx_t who;
assert(state_movable(state, dir));
switch (dir)
{
case DIR_LEFT:
who = ev(state) = lv(state);
state->i--;
break;
case DIR_DOWN:
who = ev(state) = dv(state);
state->j++;
break;
case DIR_RIGHT:
who = ev(state) = rv(state);
state->i++;
break;
case DIR_UP:
who = ev(state) = uv(state);
state->j--;
break;
default:
elog("unexpected direction");
assert(false);
}
state->h_value =
state->h_value + h_diff(who, state->i, state->j, dir_reverse(dir));
state->parent_dir = dir;
}
bool
state_pos_equal(State s1, State s2)
{
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
if (v(s1, i, j) != v(s2, i, j))
return false;
return true;
}
size_t
state_hash(State state)
{
size_t hash_value = 0;
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2));
return hash_value;
}
int
state_get_hvalue(State state)
{
return state->h_value;
}
int
state_get_depth(State state)
{
return state->depth;
}
static void
state_dump(State state)
{
elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value,
state->depth + state->h_value);
for (int i = 0; i < STATE_N; ++i)
elog("%d%c", i == state->i + STATE_WIDTH * state->j
? 0
: state->pos[i % STATE_WIDTH][i / STATE_WIDTH],
i == STATE_N - 1 ? '\n' : ',');
}
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
typedef enum {
HT_SUCCESS = 0,
HT_FAILED_FOUND,
HT_FAILED_NOT_FOUND,
} HTStatus;
/* XXX: hash function for State should be surveyed */
inline static size_t
hashfunc(State key)
{
return state_hash(key);
}
typedef struct ht_entry_tag *HTEntry;
struct ht_entry_tag
{
HTEntry next;
State key;
int value;
};
static HTEntry
ht_entry_init(State key)
{
HTEntry entry = (HTEntry) palloc(sizeof(*entry));
entry->key = state_copy(key);
entry->next = NULL;
return entry;
}
static void
ht_entry_fini(HTEntry entry)
{
pfree(entry);
}
typedef struct ht_tag
{
size_t n_bins;
size_t n_elems;
HTEntry *bin;
} * HT;
static bool
ht_rehash_required(HT ht)
{
return ht->n_bins <= ht->n_elems;
}
static size_t
calc_n_bins(size_t required)
{
/* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */
size_t size = 1;
assert(required > 0);
while (required > size)
size <<= 1;
return size;
}
HT
ht_init(size_t init_size_hint)
{
size_t n_bins = calc_n_bins(init_size_hint);
HT ht = (HT) palloc(sizeof(*ht));
ht->n_bins = n_bins;
ht->n_elems = 0;
assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins);
ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins);
memset(ht->bin, 0, sizeof(*ht->bin) * n_bins);
return ht;
}
static void
ht_rehash(HT ht)
{
HTEntry *new_bin;
size_t new_size = ht->n_bins << 1;
assert(ht->n_bins<SIZE_MAX>> 1);
new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size);
memset(new_bin, 0, sizeof(*new_bin) * new_size);
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
size_t idx = hashfunc(entry->key) & (new_size - 1);
entry->next = new_bin[idx];
new_bin[idx] = entry;
entry = next;
}
}
pfree(ht->bin);
ht->n_bins = new_size;
ht->bin = new_bin;
}
void
ht_fini(HT ht)
{
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
state_fini(entry->key);
ht_entry_fini(entry);
entry = next;
}
}
pfree(ht->bin);
pfree(ht);
}
HTStatus
ht_insert(HT ht, State key, int **value)
{
size_t i;
HTEntry entry, new_entry;
if (ht_rehash_required(ht))
ht_rehash(ht);
i = hashfunc(key) & (ht->n_bins - 1);
entry = ht->bin[i];
while (entry)
{
if (state_pos_equal(key, entry->key))
{
*value = &entry->value;
return HT_FAILED_FOUND;
}
entry = entry->next;
}
new_entry = ht_entry_init(key);
new_entry->next = ht->bin[i];
ht->bin[i] = new_entry;
*value = &new_entry->value;
assert(ht->n_elems < SIZE_MAX);
ht->n_elems++;
return HT_SUCCESS;
}
/*
* Priority Queue implementation
*/
#include <assert.h>
#include <stdint.h>
typedef struct pq_entry_tag
{
State state;
int f, g;
} PQEntryData;
typedef PQEntryData *PQEntry;
/* tiebreaking is done comparing g value */
static inline bool
pq_entry_higher_priority(PQEntry e1, PQEntry e2)
{
return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g);
}
/*
* NOTE:
* This priority queue is implemented doubly reallocated array.
* It will only extend and will not shrink, for now.
* It may be improved by using array of layers of iteratively widened array
*/
typedef struct pq_tag
{
size_t n_elems;
size_t capa;
PQEntryData *array;
} * PQ;
static inline size_t
calc_init_capa(size_t capa_hint)
{
size_t capa = 1;
assert(capa_hint > 0);
while (capa < capa_hint)
capa <<= 1;
return capa - 1;
}
PQ
pq_init(size_t init_capa_hint)
{
PQ pq = (PQ) palloc(sizeof(*pq));
pq->n_elems = 0;
pq->capa = calc_init_capa(init_capa_hint);
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa);
return pq;
}
void
pq_fini(PQ pq)
{
for (size_t i = 0; i < pq->n_elems; ++i)
state_fini(pq->array[i].state);
pfree(pq->array);
pfree(pq);
}
static inline bool
pq_is_full(PQ pq)
{
assert(pq->n_elems <= pq->capa);
return pq->n_elems == pq->capa;
}
static inline void
pq_extend(PQ pq)
{
pq->capa = (pq->capa << 1) + 1;
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array =
(PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa);
}
static inline void
pq_swap_entry(PQ pq, size_t i, size_t j)
{
PQEntryData tmp = pq->array[i];
pq->array[i] = pq->array[j];
pq->array[j] = tmp;
}
static inline size_t
pq_up(size_t i)
{
/* NOTE: By using 1-origin, it may be written more simply, i >> 1 */
return (i - 1) >> 1;
}
static inline size_t
pq_left(size_t i)
{
return (i << 1) + 1;
}
static void
heapify_up(PQ pq)
{
for (size_t i = pq->n_elems; i > 0;)
{
size_t ui = pq_up(i);
assert(i > 0);
if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui]))
break;
pq_swap_entry(pq, i, ui);
i = ui;
}
}
void
pq_put(PQ pq, State state, int f, int g)
{
if (pq_is_full(pq))
pq_extend(pq);
pq->array[pq->n_elems].state = state_copy(state);
pq->array[pq->n_elems].f = f; /* this may be abundant */
pq->array[pq->n_elems].g = g;
heapify_up(pq);
++pq->n_elems;
}
static void
heapify_down(PQ pq)
{
size_t sentinel = pq->n_elems;
for (size_t i = 0;;)
{
size_t ri, li = pq_left(i);
if (li >= sentinel)
break;
ri = li + 1;
if (ri >= sentinel)
{
if (pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
pq_swap_entry(pq, i, li);
/* Reached the bottom */
break;
}
/* NOTE: If p(ri) == p(li), it may be good to go right
* since the filling order is left-first */
if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri]))
{
if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
break;
pq_swap_entry(pq, i, li);
i = li;
}
else
{
if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i]))
break;
pq_swap_entry(pq, i, ri);
i = ri;
}
}
}
State
pq_pop(PQ pq)
{
State ret_state;
if (pq->n_elems == 0)
return NULL;
ret_state = pq->array[0].state;
--pq->n_elems;
pq->array[0] = pq->array[pq->n_elems];
heapify_down(pq);
return ret_state;
}
void
pq_dump(PQ pq)
{
elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa);
for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++)
{
if (i == cr_required)
{
elog("\n");
cr_required = (cr_required << 1) + 1;
}
elog("%d,", pq->array[i].f);
elog("%d ", pq->array[i].g);
}
elog("\n");
}
#include <stdlib.h>
#include <string.h>
int
rrand(int m)
{
return (int) ((double) m * (rand() / (RAND_MAX + 1.0)));
}
void
shuffle_input(Input input[], int n_inputs)
{
Input tmp;
size_t n = n_inputs;
while (n > 1)
{
size_t k = rrand(n--);
memcpy(&tmp, &input[n], sizeof(Input));
memcpy(&input[n], &input[k], sizeof(Input));
memcpy(&input[k], &tmp, sizeof(Input));
}
}
static HT closed;
bool
distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs,
int *min_fvalue)
{
int cnt = 0;
State state;
PQ q = pq_init(distr_n + 10);
HTStatus ht_status;
int * ht_value;
bool solved = false;
closed = ht_init(10000);
ht_status = ht_insert(closed, init_state, &ht_value);
*ht_value = 0;
pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0);
++cnt;
while ((state = pq_pop(q)))
{
--cnt;
if (state_is_goal(state))
{
solved = true;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value <= state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(q, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= distr_n)
break;
}
*cnt_inputs = cnt;
elog("LOG: init_distr, cnt=%d\n", cnt);
if (!solved)
{
int minf = INT_MAX;
for (int id = 0; id < cnt; ++id)
{
State state = pq_pop(q);
assert(state);
for (int i = 0; i < STATE_N; ++i)
input[id].tiles[i] =
state->pos[i % STATE_WIDTH][i / STATE_WIDTH];
input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[id].init_depth = state_get_depth(state);
input[id].parent_dir = state->parent_dir;
if (minf > state_get_depth(state) + state_get_hvalue(state))
minf = state_get_depth(state) + state_get_hvalue(state);
}
assert(pq_pop(q) == NULL);
// shuffle_input(input, cnt);
*min_fvalue = minf;
}
pq_fini(q);
return solved;
}
static int
input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail,
int *buf_len)
{
int cnt = 0;
int * ht_value;
State state = state_init(input[i].tiles, input[i].init_depth);
state->parent_dir = input[i].parent_dir;
PQ pq = pq_init(devide_n);
HTStatus ht_status;
pq_put(pq, state, state_get_hvalue(state), 0);
++cnt;
assert(devide_n > 0);
while ((state = pq_pop(pq)))
{
--cnt;
if (state_is_goal(state))
{
/* It may not be optimal goal */
pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state),
state_get_depth(state));
++cnt;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value < state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(pq, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= devide_n)
break;
}
int new_buf_len = *buf_len;
while (tail + cnt >= new_buf_len)
new_buf_len <<= 1;
if (new_buf_len != *buf_len)
{
*buf_len = new_buf_len;
repalloc(input, sizeof(*input) * new_buf_len);
elog("LOG: host buf resize\n");
}
input[i] = input[tail - 1];
for (int id = 0; id < cnt; ++id)
{
int ofs = tail - 1 + id;
State state = pq_pop(pq);
assert(state);
for (int j = 0; j < STATE_N; ++j)
input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH];
input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[ofs].init_depth = state_get_depth(state);
input[ofs].parent_dir = state->parent_dir;
}
pq_fini(pq);
return cnt - 1;
}
/* main */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#define exit_failure(...) \
do \
{ \
printf(__VA_ARGS__); \
exit(EXIT_FAILURE); \
} while (0)
static int
pop_int_from_str(const char *str, char **end_ptr)
{
long int rv = strtol(str, end_ptr, 0);
errno = 0;
if (errno != 0)
exit_failure("%s: %s cannot be converted into long\n", __func__, str);
else if (end_ptr && str == *end_ptr)
exit_failure("%s: reach end of string", __func__);
if (rv > INT_MAX || rv < INT_MIN)
exit_failure("%s: too big number, %ld\n", __func__, rv);
return (int) rv;
}
#define MAX_LINE_LEN 100
static void
load_state_from_file(const char *fname, uchar *s)
{
FILE *fp;
char str[MAX_LINE_LEN];
char *str_ptr = str, *end_ptr;
fp = fopen(fname, "r");
if (!fp)
exit_failure("%s: %s cannot be opened\n", __func__, fname);
if (!fgets(str, MAX_LINE_LEN, fp))
exit_failure("%s: fgets failed\n", __func__);
for (int i = 0; i < STATE_N; ++i)
{
s[i] = pop_int_from_str(str_ptr, &end_ptr);
str_ptr = end_ptr;
}
fclose(fp);
}
#undef MAX_LINE_LEN
#define CUDA_CHECK(call) \
do \
{ \
const hipError_t e = call; \
if (e != hipSuccess) \
exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \
__LINE__, e, hipGetErrorString(e)); \
} while (0)
__host__ static void *
cudaPalloc(size_t size)
{
void *ptr;
CUDA_CHECK(hipMalloc(&ptr, size));
return ptr;
}
__host__ static void
cudaPfree(void *ptr)
{
CUDA_CHECK(hipFree(ptr));
}
#define h_d_t(op, i, dir) \
(h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)])
__host__ static void
init_mdist(signed char h_diff_table[])
{
for (int opponent = 0; opponent < STATE_N; ++opponent)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
for (int i = 0; i < STATE_N; ++i)
{
int from_x = POS_X(i), from_y = POS_Y(i);
for (uchar dir = 0; dir < DIR_N; ++dir)
{
if (dir == DIR_LEFT)
h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1;
if (dir == DIR_RIGHT)
h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1;
if (dir == DIR_UP)
h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1;
if (dir == DIR_DOWN)
h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1;
}
}
}
}
#undef h_d_t
#define m_t(i, d) (movable_table[(i) *DIR_N + (d)])
__host__ static void
init_movable_table(bool movable_table[])
{
for (int i = 0; i < STATE_N; ++i)
for (unsigned int d = 0; d < DIR_N; ++d)
{
if (d == DIR_RIGHT)
m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1);
else if (d == DIR_LEFT)
m_t(i, d) = (POS_X(i) > 0);
else if (d == DIR_DOWN)
m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1);
else if (d == DIR_UP)
m_t(i, d) = (POS_Y(i) > 0);
}
}
#undef m_t
// static char dir_char[] = {'U', 'R', 'L', 'D'};
#define INPUT_SIZE (sizeof(Input) * buf_len)
#define STAT_SIZE (sizeof(search_stat) * buf_len)
#define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N)
#define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N)
int
main(int argc, char *argv[])
{
int n_roots;
int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO;
Input *input = (Input *) palloc(INPUT_SIZE),
*d_input = (Input *) cudaPalloc(INPUT_SIZE);
search_stat *stat = (search_stat *) palloc(STAT_SIZE),
*d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE),
*d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE);
signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE),
*d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE);
int min_fvalue = 0;
if (argc != 2)
exit_failure("usage: bin/cumain <ifname>\n");
load_state_from_file(argv[1], input[0].tiles);
{
State init_state = state_init(input[0].tiles, 0);
state_dump(init_state);
if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots,
&min_fvalue))
{
elog("solution is found by distributor\n");
goto solution_found;
}
state_fini(init_state);
}
init_mdist(h_diff_table);
init_movable_table(movable_table);
CUDA_CHECK(hipMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemset(d_input, 0, INPUT_SIZE));
for (uchar f_limit = min_fvalue;; f_limit += 2)
{
CUDA_CHECK(hipMemset(d_stat, 0, STAT_SIZE));
CUDA_CHECK(
hipMemcpy(d_input, input, INPUT_SIZE, hipMemcpyHostToDevice));
elog("f_limit=%d\n", (int) f_limit);
hipLaunchKernelGGL(( idas_kernel), dim3(n_roots), dim3(BLOCK_DIM), 0, 0, d_input, d_stat, f_limit,
d_h_diff_table, d_movable_table);
CUDA_CHECK(
hipGetLastError()); /* asm trap is called when find solution */
CUDA_CHECK(hipMemcpy(stat, d_stat, STAT_SIZE, hipMemcpyDeviceToHost));
unsigned long long int loads_sum = 0;
for (int i = 0; i < n_roots; ++i)
loads_sum += stat[i].loads;
#ifdef COLLECT_LOG
elog("STAT: loop\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].loads);
putchar('\n');
elog("STAT: nodes_expanded\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].nodes_expanded);
putchar('\n');
elog("STAT: efficiency\n");
for (int i = 0; i < n_roots; ++i)
if (stat[i].loads != 0)
elog("%lld, ", stat[i].nodes_expanded / stat[i].loads);
putchar('\n');
#endif
int increased = 0;
unsigned long long int loads_av = loads_sum / n_roots;
int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < n_roots; ++i)
{
if (stat[i].loads < loads_av)
stat_cnt[0]++;
else if (stat[i].loads < 2 * loads_av)
stat_cnt[1]++;
else if (stat[i].loads < 4 * loads_av)
stat_cnt[2]++;
else if (stat[i].loads < 8 * loads_av)
stat_cnt[3]++;
else if (stat[i].loads < 16 * loads_av)
stat_cnt[4]++;
else if (stat[i].loads < 32 * loads_av)
stat_cnt[5]++;
else if (stat[i].loads < 64 * loads_av)
stat_cnt[6]++;
else if (stat[i].loads < 128 * loads_av)
stat_cnt[7]++;
else
stat_cnt[8]++;
int policy = loads_av == 0 ? stat[i].loads
: (stat[i].loads - 1) / loads_av + 1;
int buf_len_old = buf_len;
if (policy > 1 && stat[i].loads > 10)
increased += input_devide(input, stat, i, policy,
n_roots + increased, &buf_len);
if (buf_len != buf_len_old)
{
elog("XXX: fix MAX_BUF_RATIO\n");
stat = (search_stat *) repalloc(stat, STAT_SIZE);
cudaPfree(d_input);
cudaPfree(d_stat);
d_input = (Input *) cudaPalloc(INPUT_SIZE);
d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
}
}
elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av);
elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, "
"64av=%d, 128av=%d, more=%d\n",
stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4],
stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]);
n_roots += increased;
elog("STAT: n_roots=%d(+%d)\n", n_roots, increased);
#ifdef SEARCH_ALL_THE_BEST
for (int i = 0; i < n_roots; ++i)
if (stat[i].solved)
{
elog("find all the optimal solution(s), at depth=%d\n", stat[i].len);
goto solution_found;
}
#endif
// shuffle_input(input, n_roots); /* it may not be needed in case of idas_global */
}
solution_found:
cudaPfree(d_input);
cudaPfree(d_stat);
cudaPfree(d_movable_table);
cudaPfree(d_h_diff_table);
CUDA_CHECK(hipDeviceReset());
pfree(input);
pfree(stat);
pfree(movable_table);
pfree(h_diff_table);
return 0;
}
| a164e35d1d94f2d4d773522b3dedc9040e303d08.cu | #include <stdbool.h>
#define PACKED
#define SEARCH_ALL_THE_BEST
#define COLLECT_LOG
#undef USE_PRECOMPUTED_HDIFF
#define BLOCK_DIM (32)
#define N_INIT_DISTRIBUTION (BLOCK_DIM * 64)
#define MAX_GPU_PLAN (24)
#define MAX_BUF_RATIO (256)
#define STATE_WIDTH 4
#define STATE_N (STATE_WIDTH * STATE_WIDTH)
typedef unsigned char uchar;
typedef signed char Direction;
#define dir_reverse(dir) ((Direction)(3 - (dir)))
#define DIR_N 4
#define DIR_FIRST 0
#define DIR_UP 0
#define DIR_RIGHT 1
#define DIR_LEFT 2
#define DIR_DOWN 3
#define POS_X(pos) ((pos) &3)
#define POS_Y(pos) ((pos) >> 2)
typedef struct search_stat_tag
{
bool solved;
int len;
unsigned long long int loads;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded;
#endif
//bool assert_failed;
} search_stat;
typedef struct input_tag
{
uchar tiles[STATE_N];
int init_depth;
Direction parent_dir;
} Input;
/* state implementation */
/*
* goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
#ifdef USE_PRECOMPUTED_HDIFF
__device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N];
#endif
typedef struct state_tag
{
#ifndef PACKED
uchar tile[STATE_N];
#else
unsigned long long tile;
#endif
uchar empty;
uchar depth;
Direction parent_dir;
uchar h_value; /* ub of h_value is STATE_WIDTH*2*(STATE_N-1), e.g. 90 */
} d_State;
#ifndef PACKED
#define state_tile_get(i) (state->tile[i])
#define state_tile_set(i, v) (state->tile[i] = (v))
#else
#define STATE_TILE_BITS 4
#define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1)
#define state_tile_ofs(i) (i << 2)
#define state_tile_get(i) \
((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \
state_tile_ofs(i))
#define state_tile_set(i, val) \
do \
{ \
state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \
state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \
} while (0)
#endif
#define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i))
__device__ static void
state_init(d_State *state, Input *input)
{
state->depth = input->init_depth;
state->parent_dir = input->parent_dir;
for (int i = 0; i < STATE_N; ++i)
{
if (input->tiles[i] == 0)
state->empty = i;
state_tile_set(i, input->tiles[i]);
}
state->h_value = 0;
for (int i = 0; i < STATE_N; ++i)
{
uchar tile = state_tile_get(i);
if (tile == 0)
continue;
state->h_value += distance(POS_X(i), POS_X(tile));
state->h_value += distance(POS_Y(i), POS_Y(tile));
}
}
__device__ static inline bool
state_is_goal(d_State state)
{
return state.h_value == 0;
}
__device__ static inline int
state_get_f(d_State state)
{
return state.depth + state.h_value;
}
__device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N];
__device__ static inline bool
state_movable(d_State state, Direction dir)
{
return movable_table_shared[state.empty][dir];
}
__device__ __constant__ const static int pos_diff_table[DIR_N] = {
-STATE_WIDTH, 1, -1, +STATE_WIDTH};
__device__ static inline int
calc_h_diff(int opponent, int from, int rev_dir)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
int from_x = POS_X(from), from_y = POS_Y(from);
if (rev_dir == DIR_LEFT)
return goal_x > from_x ? -1 : 1;
else if (rev_dir == DIR_RIGHT)
return goal_x < from_x ? -1 : 1;
else if (rev_dir == DIR_UP)
return goal_y > from_y ? -1 : 1;
else
return goal_y < from_y ? -1 : 1;
}
__device__ static inline void
state_move(d_State *state, Direction dir)
{
int new_empty = state->empty + pos_diff_table[dir];
int opponent = state_tile_get(new_empty);
#ifdef USE_PRECOMPUTED_HDIFF
state->h_value += h_diff_table_shared[opponent][new_empty][dir];
#else
state->h_value += calc_h_diff(opponent, new_empty, dir);
#endif
state_tile_set(state->empty, opponent);
state->empty = new_empty;
state->parent_dir = dir;
++state->depth;
}
/* stack implementation */
#define STACK_BUF_LEN (MAX_GPU_PLAN * (BLOCK_DIM/DIR_N))
typedef struct div_stack_tag
{
unsigned int n;
d_State buf[STACK_BUF_LEN];
} d_Stack;
__device__ static inline bool
stack_is_empty(d_Stack *stack)
{
bool ret = (stack->n == 0);
__syncthreads();
return ret;
}
__device__ static inline void
stack_put(d_Stack *stack, d_State *state, bool put)
{
if (put)
{
unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */
stack->buf[i] = *state;
}
__syncthreads();
}
__device__ static inline bool
stack_pop(d_Stack *stack, d_State *state)
{
int tid = threadIdx.x;
int i = (int) stack->n - 1 - (int) (tid >> 2);
if (i >= 0)
*state = stack->buf[i];
__syncthreads();
if (tid == 0)
stack->n = stack->n >= BLOCK_DIM / DIR_N ?
stack->n - BLOCK_DIM / DIR_N : 0;
__syncthreads();
return i >= 0;
}
//__device__ __shared__ Direction candidate_dir_table[4][3] = {}
/*
* solver implementation
*/
__device__ static void
idas_internal(d_Stack *stack, int f_limit, search_stat *stat)
{
d_State state;
unsigned long long int loop_cnt = 0;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded = 0;
#endif
if (threadIdx.x == 0)
stat->solved = false;
for (;;)
{
if (stack_is_empty(stack))
{
stat->loads = loop_cnt;
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
break;
}
++loop_cnt;
bool found = stack_pop(stack, &state),
put = false;
if (found)
{
Direction dir = threadIdx.x & 3;
#ifdef COLLECT_LOG
nodes_expanded++;
#endif
/* NOTE: candidate_dir_table may be effective to avoid divergence */
if (state.parent_dir == dir_reverse(dir))
continue;
if (state_movable(state, dir))
{
state_move(&state, dir);
if (state_get_f(state) <= f_limit)
{
if (state_is_goal(state))
{
#ifndef SEARCH_ALL_THE_BEST
asm("trap;");
#else
stat->loads = loop_cnt;
stat->len = state.depth;
stat->solved = true;
#endif
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
}
else
put = true;
}
}
}
stack_put(stack, &state, put);
}
}
/* XXX: movable table is effective in this case? */
__global__ void
idas_kernel(Input *input, search_stat *stat, int f_limit,
signed char *h_diff_table, bool *movable_table)
{
__shared__ d_Stack stack;
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid == 0)
stat[bid].loads = 0;
d_State state;
state_init(&state, &input[bid]);
if (state_get_f(state) > f_limit)
return;
if (tid == 0)
{
stack.buf[0] = state;
stack.n = 1;
}
for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x)
if (i < STATE_N * DIR_N)
movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i];
#ifdef USE_PRECOMPUTED_HDIFF
for (int dir = 0; dir < DIR_N; ++dir)
for (int j = tid; j < STATE_N * STATE_N; j += blockDim.x)
if (j < STATE_N * STATE_N)
h_diff_table_shared[j / STATE_N][j % STATE_N][dir] =
h_diff_table[j * DIR_N + dir];
#endif
__syncthreads();
idas_internal(&stack, f_limit, &stat[bid]);
}
/* host library implementation */
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef UNABLE_LOG
#define elog(...) fprintf(stderr, __VA_ARGS__)
#else
#define elog(...) ;
#endif
void *
palloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
elog("malloc failed\n");
return ptr;
}
void *
repalloc(void *old_ptr, size_t new_size)
{
void *ptr = realloc(old_ptr, new_size);
if (!ptr)
elog("realloc failed\n");
return ptr;
}
void
pfree(void *ptr)
{
if (!ptr)
elog("empty ptr\n");
free(ptr);
}
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char idx_t;
/*
* [0,0] [1,0] [2,0] [3,0]
* [0,1] [1,1] [2,1] [3,1]
* [0,2] [1,2] [2,2] [3,2]
* [0,3] [1,3] [2,3] [3,3]
*/
/*
* goal state is
* [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
typedef struct state_tag_cpu
{
int depth; /* XXX: needed? */
uchar pos[STATE_WIDTH][STATE_WIDTH];
idx_t i, j; /* pos of empty */
Direction parent_dir;
int h_value;
} * State;
#define v(state, i, j) ((state)->pos[i][j])
#define ev(state) (v(state, state->i, state->j))
#define lv(state) (v(state, state->i - 1, state->j))
#define dv(state) (v(state, state->i, state->j + 1))
#define rv(state) (v(state, state->i + 1, state->j))
#define uv(state) (v(state, state->i, state->j - 1))
static uchar from_x[STATE_WIDTH * STATE_WIDTH],
from_y[STATE_WIDTH * STATE_WIDTH];
static inline void
fill_from_xy(State from)
{
for (idx_t x = 0; x < STATE_WIDTH; ++x)
for (idx_t y = 0; y < STATE_WIDTH; ++y)
{
from_x[v(from, x, y)] = x;
from_y[v(from, x, y)] = y;
}
}
static inline int
heuristic_manhattan_distance(State from)
{
int h_value = 0;
fill_from_xy(from);
for (idx_t i = 1; i < STATE_N; ++i)
{
h_value += distance(from_x[i], i & 3);
h_value += distance(from_y[i], i >> 2);
}
return h_value;
}
bool
state_is_goal(State state)
{
return state->h_value == 0;
}
static inline State
state_alloc(void)
{
return (State) palloc(sizeof(struct state_tag_cpu));
}
static inline void
state_free(State state)
{
pfree(state);
}
State
state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth)
{
State state = state_alloc();
int cnt = 0;
state->depth = init_depth;
state->parent_dir = (Direction) -1;
for (idx_t j = 0; j < STATE_WIDTH; ++j)
for (idx_t i = 0; i < STATE_WIDTH; ++i)
{
if (v_list[cnt] == 0)
{
state->i = i;
state->j = j;
}
v(state, i, j) = v_list[cnt++];
}
state->h_value = heuristic_manhattan_distance(state);
return state;
}
void
state_fini(State state)
{
state_free(state);
}
State
state_copy(State src)
{
State dst = state_alloc();
memcpy(dst, src, sizeof(*src));
return dst;
}
static inline bool
state_left_movable(State state)
{
return state->i != 0;
}
static inline bool
state_down_movable(State state)
{
return state->j != STATE_WIDTH - 1;
}
static inline bool
state_right_movable(State state)
{
return state->i != STATE_WIDTH - 1;
}
static inline bool
state_up_movable(State state)
{
return state->j != 0;
}
bool
state_movable(State state, Direction dir)
{
return (dir != DIR_LEFT || state_left_movable(state)) &&
(dir != DIR_DOWN || state_down_movable(state)) &&
(dir != DIR_RIGHT || state_right_movable(state)) &&
(dir != DIR_UP || state_up_movable(state));
}
#define h_diff(who, from_i, from_j, dir) \
(h_diff_table[((who) << 6) + ((from_j) << 4) + ((from_i) << 2) + (dir)])
static int h_diff_table[STATE_N * STATE_N * DIR_N] = {
1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1,
1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1,
1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1,
1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1,
1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1,
1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1,
1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1,
1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1,
1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1};
void
state_move(State state, Direction dir)
{
idx_t who;
assert(state_movable(state, dir));
switch (dir)
{
case DIR_LEFT:
who = ev(state) = lv(state);
state->i--;
break;
case DIR_DOWN:
who = ev(state) = dv(state);
state->j++;
break;
case DIR_RIGHT:
who = ev(state) = rv(state);
state->i++;
break;
case DIR_UP:
who = ev(state) = uv(state);
state->j--;
break;
default:
elog("unexpected direction");
assert(false);
}
state->h_value =
state->h_value + h_diff(who, state->i, state->j, dir_reverse(dir));
state->parent_dir = dir;
}
bool
state_pos_equal(State s1, State s2)
{
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
if (v(s1, i, j) != v(s2, i, j))
return false;
return true;
}
size_t
state_hash(State state)
{
size_t hash_value = 0;
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2));
return hash_value;
}
int
state_get_hvalue(State state)
{
return state->h_value;
}
int
state_get_depth(State state)
{
return state->depth;
}
static void
state_dump(State state)
{
elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value,
state->depth + state->h_value);
for (int i = 0; i < STATE_N; ++i)
elog("%d%c", i == state->i + STATE_WIDTH * state->j
? 0
: state->pos[i % STATE_WIDTH][i / STATE_WIDTH],
i == STATE_N - 1 ? '\n' : ',');
}
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
typedef enum {
HT_SUCCESS = 0,
HT_FAILED_FOUND,
HT_FAILED_NOT_FOUND,
} HTStatus;
/* XXX: hash function for State should be surveyed */
inline static size_t
hashfunc(State key)
{
return state_hash(key);
}
typedef struct ht_entry_tag *HTEntry;
struct ht_entry_tag
{
HTEntry next;
State key;
int value;
};
static HTEntry
ht_entry_init(State key)
{
HTEntry entry = (HTEntry) palloc(sizeof(*entry));
entry->key = state_copy(key);
entry->next = NULL;
return entry;
}
static void
ht_entry_fini(HTEntry entry)
{
pfree(entry);
}
typedef struct ht_tag
{
size_t n_bins;
size_t n_elems;
HTEntry *bin;
} * HT;
static bool
ht_rehash_required(HT ht)
{
return ht->n_bins <= ht->n_elems;
}
static size_t
calc_n_bins(size_t required)
{
/* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */
size_t size = 1;
assert(required > 0);
while (required > size)
size <<= 1;
return size;
}
HT
ht_init(size_t init_size_hint)
{
size_t n_bins = calc_n_bins(init_size_hint);
HT ht = (HT) palloc(sizeof(*ht));
ht->n_bins = n_bins;
ht->n_elems = 0;
assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins);
ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins);
memset(ht->bin, 0, sizeof(*ht->bin) * n_bins);
return ht;
}
static void
ht_rehash(HT ht)
{
HTEntry *new_bin;
size_t new_size = ht->n_bins << 1;
assert(ht->n_bins<SIZE_MAX>> 1);
new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size);
memset(new_bin, 0, sizeof(*new_bin) * new_size);
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
size_t idx = hashfunc(entry->key) & (new_size - 1);
entry->next = new_bin[idx];
new_bin[idx] = entry;
entry = next;
}
}
pfree(ht->bin);
ht->n_bins = new_size;
ht->bin = new_bin;
}
void
ht_fini(HT ht)
{
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
state_fini(entry->key);
ht_entry_fini(entry);
entry = next;
}
}
pfree(ht->bin);
pfree(ht);
}
HTStatus
ht_insert(HT ht, State key, int **value)
{
size_t i;
HTEntry entry, new_entry;
if (ht_rehash_required(ht))
ht_rehash(ht);
i = hashfunc(key) & (ht->n_bins - 1);
entry = ht->bin[i];
while (entry)
{
if (state_pos_equal(key, entry->key))
{
*value = &entry->value;
return HT_FAILED_FOUND;
}
entry = entry->next;
}
new_entry = ht_entry_init(key);
new_entry->next = ht->bin[i];
ht->bin[i] = new_entry;
*value = &new_entry->value;
assert(ht->n_elems < SIZE_MAX);
ht->n_elems++;
return HT_SUCCESS;
}
/*
* Priority Queue implementation
*/
#include <assert.h>
#include <stdint.h>
typedef struct pq_entry_tag
{
State state;
int f, g;
} PQEntryData;
typedef PQEntryData *PQEntry;
/* tiebreaking is done comparing g value */
static inline bool
pq_entry_higher_priority(PQEntry e1, PQEntry e2)
{
return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g);
}
/*
* NOTE:
* This priority queue is implemented doubly reallocated array.
* It will only extend and will not shrink, for now.
* It may be improved by using array of layers of iteratively widened array
*/
typedef struct pq_tag
{
size_t n_elems;
size_t capa;
PQEntryData *array;
} * PQ;
static inline size_t
calc_init_capa(size_t capa_hint)
{
size_t capa = 1;
assert(capa_hint > 0);
while (capa < capa_hint)
capa <<= 1;
return capa - 1;
}
PQ
pq_init(size_t init_capa_hint)
{
PQ pq = (PQ) palloc(sizeof(*pq));
pq->n_elems = 0;
pq->capa = calc_init_capa(init_capa_hint);
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa);
return pq;
}
void
pq_fini(PQ pq)
{
for (size_t i = 0; i < pq->n_elems; ++i)
state_fini(pq->array[i].state);
pfree(pq->array);
pfree(pq);
}
static inline bool
pq_is_full(PQ pq)
{
assert(pq->n_elems <= pq->capa);
return pq->n_elems == pq->capa;
}
static inline void
pq_extend(PQ pq)
{
pq->capa = (pq->capa << 1) + 1;
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array =
(PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa);
}
static inline void
pq_swap_entry(PQ pq, size_t i, size_t j)
{
PQEntryData tmp = pq->array[i];
pq->array[i] = pq->array[j];
pq->array[j] = tmp;
}
static inline size_t
pq_up(size_t i)
{
/* NOTE: By using 1-origin, it may be written more simply, i >> 1 */
return (i - 1) >> 1;
}
static inline size_t
pq_left(size_t i)
{
return (i << 1) + 1;
}
static void
heapify_up(PQ pq)
{
for (size_t i = pq->n_elems; i > 0;)
{
size_t ui = pq_up(i);
assert(i > 0);
if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui]))
break;
pq_swap_entry(pq, i, ui);
i = ui;
}
}
void
pq_put(PQ pq, State state, int f, int g)
{
if (pq_is_full(pq))
pq_extend(pq);
pq->array[pq->n_elems].state = state_copy(state);
pq->array[pq->n_elems].f = f; /* this may be abundant */
pq->array[pq->n_elems].g = g;
heapify_up(pq);
++pq->n_elems;
}
static void
heapify_down(PQ pq)
{
size_t sentinel = pq->n_elems;
for (size_t i = 0;;)
{
size_t ri, li = pq_left(i);
if (li >= sentinel)
break;
ri = li + 1;
if (ri >= sentinel)
{
if (pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
pq_swap_entry(pq, i, li);
/* Reached the bottom */
break;
}
/* NOTE: If p(ri) == p(li), it may be good to go right
* since the filling order is left-first */
if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri]))
{
if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
break;
pq_swap_entry(pq, i, li);
i = li;
}
else
{
if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i]))
break;
pq_swap_entry(pq, i, ri);
i = ri;
}
}
}
State
pq_pop(PQ pq)
{
State ret_state;
if (pq->n_elems == 0)
return NULL;
ret_state = pq->array[0].state;
--pq->n_elems;
pq->array[0] = pq->array[pq->n_elems];
heapify_down(pq);
return ret_state;
}
void
pq_dump(PQ pq)
{
elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa);
for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++)
{
if (i == cr_required)
{
elog("\n");
cr_required = (cr_required << 1) + 1;
}
elog("%d,", pq->array[i].f);
elog("%d ", pq->array[i].g);
}
elog("\n");
}
#include <stdlib.h>
#include <string.h>
int
rrand(int m)
{
return (int) ((double) m * (rand() / (RAND_MAX + 1.0)));
}
void
shuffle_input(Input input[], int n_inputs)
{
Input tmp;
size_t n = n_inputs;
while (n > 1)
{
size_t k = rrand(n--);
memcpy(&tmp, &input[n], sizeof(Input));
memcpy(&input[n], &input[k], sizeof(Input));
memcpy(&input[k], &tmp, sizeof(Input));
}
}
static HT closed;
bool
distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs,
int *min_fvalue)
{
int cnt = 0;
State state;
PQ q = pq_init(distr_n + 10);
HTStatus ht_status;
int * ht_value;
bool solved = false;
closed = ht_init(10000);
ht_status = ht_insert(closed, init_state, &ht_value);
*ht_value = 0;
pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0);
++cnt;
while ((state = pq_pop(q)))
{
--cnt;
if (state_is_goal(state))
{
solved = true;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value <= state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(q, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= distr_n)
break;
}
*cnt_inputs = cnt;
elog("LOG: init_distr, cnt=%d\n", cnt);
if (!solved)
{
int minf = INT_MAX;
for (int id = 0; id < cnt; ++id)
{
State state = pq_pop(q);
assert(state);
for (int i = 0; i < STATE_N; ++i)
input[id].tiles[i] =
state->pos[i % STATE_WIDTH][i / STATE_WIDTH];
input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[id].init_depth = state_get_depth(state);
input[id].parent_dir = state->parent_dir;
if (minf > state_get_depth(state) + state_get_hvalue(state))
minf = state_get_depth(state) + state_get_hvalue(state);
}
assert(pq_pop(q) == NULL);
// shuffle_input(input, cnt);
*min_fvalue = minf;
}
pq_fini(q);
return solved;
}
static int
input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail,
int *buf_len)
{
int cnt = 0;
int * ht_value;
State state = state_init(input[i].tiles, input[i].init_depth);
state->parent_dir = input[i].parent_dir;
PQ pq = pq_init(devide_n);
HTStatus ht_status;
pq_put(pq, state, state_get_hvalue(state), 0);
++cnt;
assert(devide_n > 0);
while ((state = pq_pop(pq)))
{
--cnt;
if (state_is_goal(state))
{
/* It may not be optimal goal */
pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state),
state_get_depth(state));
++cnt;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value < state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(pq, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= devide_n)
break;
}
int new_buf_len = *buf_len;
while (tail + cnt >= new_buf_len)
new_buf_len <<= 1;
if (new_buf_len != *buf_len)
{
*buf_len = new_buf_len;
repalloc(input, sizeof(*input) * new_buf_len);
elog("LOG: host buf resize\n");
}
input[i] = input[tail - 1];
for (int id = 0; id < cnt; ++id)
{
int ofs = tail - 1 + id;
State state = pq_pop(pq);
assert(state);
for (int j = 0; j < STATE_N; ++j)
input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH];
input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[ofs].init_depth = state_get_depth(state);
input[ofs].parent_dir = state->parent_dir;
}
pq_fini(pq);
return cnt - 1;
}
/* main */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#define exit_failure(...) \
do \
{ \
printf(__VA_ARGS__); \
exit(EXIT_FAILURE); \
} while (0)
static int
pop_int_from_str(const char *str, char **end_ptr)
{
long int rv = strtol(str, end_ptr, 0);
errno = 0;
if (errno != 0)
exit_failure("%s: %s cannot be converted into long\n", __func__, str);
else if (end_ptr && str == *end_ptr)
exit_failure("%s: reach end of string", __func__);
if (rv > INT_MAX || rv < INT_MIN)
exit_failure("%s: too big number, %ld\n", __func__, rv);
return (int) rv;
}
#define MAX_LINE_LEN 100
static void
load_state_from_file(const char *fname, uchar *s)
{
FILE *fp;
char str[MAX_LINE_LEN];
char *str_ptr = str, *end_ptr;
fp = fopen(fname, "r");
if (!fp)
exit_failure("%s: %s cannot be opened\n", __func__, fname);
if (!fgets(str, MAX_LINE_LEN, fp))
exit_failure("%s: fgets failed\n", __func__);
for (int i = 0; i < STATE_N; ++i)
{
s[i] = pop_int_from_str(str_ptr, &end_ptr);
str_ptr = end_ptr;
}
fclose(fp);
}
#undef MAX_LINE_LEN
#define CUDA_CHECK(call) \
do \
{ \
const cudaError_t e = call; \
if (e != cudaSuccess) \
exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \
__LINE__, e, cudaGetErrorString(e)); \
} while (0)
__host__ static void *
cudaPalloc(size_t size)
{
void *ptr;
CUDA_CHECK(cudaMalloc(&ptr, size));
return ptr;
}
__host__ static void
cudaPfree(void *ptr)
{
CUDA_CHECK(cudaFree(ptr));
}
#define h_d_t(op, i, dir) \
(h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)])
__host__ static void
init_mdist(signed char h_diff_table[])
{
for (int opponent = 0; opponent < STATE_N; ++opponent)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
for (int i = 0; i < STATE_N; ++i)
{
int from_x = POS_X(i), from_y = POS_Y(i);
for (uchar dir = 0; dir < DIR_N; ++dir)
{
if (dir == DIR_LEFT)
h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1;
if (dir == DIR_RIGHT)
h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1;
if (dir == DIR_UP)
h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1;
if (dir == DIR_DOWN)
h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1;
}
}
}
}
#undef h_d_t
#define m_t(i, d) (movable_table[(i) *DIR_N + (d)])
__host__ static void
init_movable_table(bool movable_table[])
{
for (int i = 0; i < STATE_N; ++i)
for (unsigned int d = 0; d < DIR_N; ++d)
{
if (d == DIR_RIGHT)
m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1);
else if (d == DIR_LEFT)
m_t(i, d) = (POS_X(i) > 0);
else if (d == DIR_DOWN)
m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1);
else if (d == DIR_UP)
m_t(i, d) = (POS_Y(i) > 0);
}
}
#undef m_t
// static char dir_char[] = {'U', 'R', 'L', 'D'};
#define INPUT_SIZE (sizeof(Input) * buf_len)
#define STAT_SIZE (sizeof(search_stat) * buf_len)
#define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N)
#define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N)
int
main(int argc, char *argv[])
{
int n_roots;
int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO;
Input *input = (Input *) palloc(INPUT_SIZE),
*d_input = (Input *) cudaPalloc(INPUT_SIZE);
search_stat *stat = (search_stat *) palloc(STAT_SIZE),
*d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE),
*d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE);
signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE),
*d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE);
int min_fvalue = 0;
if (argc != 2)
exit_failure("usage: bin/cumain <ifname>\n");
load_state_from_file(argv[1], input[0].tiles);
{
State init_state = state_init(input[0].tiles, 0);
state_dump(init_state);
if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots,
&min_fvalue))
{
elog("solution is found by distributor\n");
goto solution_found;
}
state_fini(init_state);
}
init_mdist(h_diff_table);
init_movable_table(movable_table);
CUDA_CHECK(cudaMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemset(d_input, 0, INPUT_SIZE));
for (uchar f_limit = min_fvalue;; f_limit += 2)
{
CUDA_CHECK(cudaMemset(d_stat, 0, STAT_SIZE));
CUDA_CHECK(
cudaMemcpy(d_input, input, INPUT_SIZE, cudaMemcpyHostToDevice));
elog("f_limit=%d\n", (int) f_limit);
idas_kernel<<<n_roots, BLOCK_DIM>>>(d_input, d_stat, f_limit,
d_h_diff_table, d_movable_table);
CUDA_CHECK(
cudaGetLastError()); /* asm trap is called when find solution */
CUDA_CHECK(cudaMemcpy(stat, d_stat, STAT_SIZE, cudaMemcpyDeviceToHost));
unsigned long long int loads_sum = 0;
for (int i = 0; i < n_roots; ++i)
loads_sum += stat[i].loads;
#ifdef COLLECT_LOG
elog("STAT: loop\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].loads);
putchar('\n');
elog("STAT: nodes_expanded\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].nodes_expanded);
putchar('\n');
elog("STAT: efficiency\n");
for (int i = 0; i < n_roots; ++i)
if (stat[i].loads != 0)
elog("%lld, ", stat[i].nodes_expanded / stat[i].loads);
putchar('\n');
#endif
int increased = 0;
unsigned long long int loads_av = loads_sum / n_roots;
int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < n_roots; ++i)
{
if (stat[i].loads < loads_av)
stat_cnt[0]++;
else if (stat[i].loads < 2 * loads_av)
stat_cnt[1]++;
else if (stat[i].loads < 4 * loads_av)
stat_cnt[2]++;
else if (stat[i].loads < 8 * loads_av)
stat_cnt[3]++;
else if (stat[i].loads < 16 * loads_av)
stat_cnt[4]++;
else if (stat[i].loads < 32 * loads_av)
stat_cnt[5]++;
else if (stat[i].loads < 64 * loads_av)
stat_cnt[6]++;
else if (stat[i].loads < 128 * loads_av)
stat_cnt[7]++;
else
stat_cnt[8]++;
int policy = loads_av == 0 ? stat[i].loads
: (stat[i].loads - 1) / loads_av + 1;
int buf_len_old = buf_len;
if (policy > 1 && stat[i].loads > 10)
increased += input_devide(input, stat, i, policy,
n_roots + increased, &buf_len);
if (buf_len != buf_len_old)
{
elog("XXX: fix MAX_BUF_RATIO\n");
stat = (search_stat *) repalloc(stat, STAT_SIZE);
cudaPfree(d_input);
cudaPfree(d_stat);
d_input = (Input *) cudaPalloc(INPUT_SIZE);
d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
}
}
elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av);
elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, "
"64av=%d, 128av=%d, more=%d\n",
stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4],
stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]);
n_roots += increased;
elog("STAT: n_roots=%d(+%d)\n", n_roots, increased);
#ifdef SEARCH_ALL_THE_BEST
for (int i = 0; i < n_roots; ++i)
if (stat[i].solved)
{
elog("find all the optimal solution(s), at depth=%d\n", stat[i].len);
goto solution_found;
}
#endif
// shuffle_input(input, n_roots); /* it may not be needed in case of idas_global */
}
solution_found:
cudaPfree(d_input);
cudaPfree(d_stat);
cudaPfree(d_movable_table);
cudaPfree(d_h_diff_table);
CUDA_CHECK(cudaDeviceReset());
pfree(input);
pfree(stat);
pfree(movable_table);
pfree(h_diff_table);
return 0;
}
|
2cf616be7984b889728cab474fc61aa805b4117e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include "common/book.h"
/*
To summarize, host pointers can access memory from host code, and device pointers can access memory from
device code.
You can pass pointers allocated with hipMalloc() to functions that
execute on the device.
You can use pointers allocated with hipMalloc()to read or write
memory from code that executes on the device.
You can pass pointers allocated with hipMalloc()to functions that
execute on the host.
You cannot use pointers allocated with hipMalloc()to read or write
memory from code that executes on the host.
*/
//This kernel function will run in the device
__global__ void add ( int a, int b, int *c ) {
*c = a + b;
}
int main ( void ) {
int c;
int *dev_c;
//pointer to a pointer and sizeof
HANDLE_ERROR( hipMalloc ( (void**) &dev_c, sizeof(int) ) );
//kernel call
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 5, 26, dev_c);
HANDLE_ERROR( hipMemcpy ( &c, dev_c, sizeof(int), hipMemcpyDeviceToHost ) );
printf( "5 + 26 = %d\n", c);
hipFree (dev_c);
return 0;
}
| 2cf616be7984b889728cab474fc61aa805b4117e.cu | #include <stdio.h>
#include <iostream>
#include "common/book.h"
/*
To summarize, host pointers can access memory from host code, and device pointers can access memory from
device code.
You can pass pointers allocated with cudaMalloc() to functions that
execute on the device.
You can use pointers allocated with cudaMalloc()to read or write
memory from code that executes on the device.
You can pass pointers allocated with cudaMalloc()to functions that
execute on the host.
You cannot use pointers allocated with cudaMalloc()to read or write
memory from code that executes on the host.
*/
//This kernel function will run in the device
__global__ void add ( int a, int b, int *c ) {
*c = a + b;
}
int main ( void ) {
int c;
int *dev_c;
//pointer to a pointer and sizeof
HANDLE_ERROR( cudaMalloc ( (void**) &dev_c, sizeof(int) ) );
//kernel call
add<<<1,1>>>( 5, 26, dev_c);
HANDLE_ERROR( cudaMemcpy ( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost ) );
printf( "5 + 26 = %d\n", c);
cudaFree (dev_c);
return 0;
}
|
1c56ae8f22fa56fbaf80730de1bc00c8a86b23d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define MAX_CHAR_PER_LINE 128
#define FLT_MAX 3.40282347e+38
#define malloc2D(name, xDim, yDim, type) do { \
name = (type **)malloc(xDim * sizeof(type *)); \
name[0] = (type *)malloc(xDim * yDim * sizeof(type)); \
for (size_t i = 1; i < xDim; i++) \
name[i] = name[i-1] + yDim; \
} while (0)
float** file_read(char *filename, int *num_objs, int *num_coordinates)
{
float **objects;
int i, j, len;
ssize_t numBytesRead;
FILE *infile;
char *line, *ret;
int lineLen;
if ((infile = fopen(filename, "r")) == NULL) {
fprintf(stderr, "Error: no such file (%s)\n", filename);
return NULL;
}
lineLen = MAX_CHAR_PER_LINE;
line = (char*) malloc(lineLen);
(*num_objs) = 0;
while (fgets(line, lineLen, infile) != NULL) {
while (strlen(line) == lineLen-1) {
len = strlen(line);
fseek(infile, -len, SEEK_CUR);
lineLen += MAX_CHAR_PER_LINE;
line = (char*) realloc(line, lineLen);
ret = fgets(line, lineLen, infile);
}
if (strtok(line, " \t\n") != 0)
(*num_objs)++;
}
rewind(infile);
(*num_coordinates) = 0;
while (fgets(line, lineLen, infile) != NULL) {
if (strtok(line, " \t\n") != 0) {
while (strtok(NULL, " ,\t\n") != NULL) (*num_coordinates)++;
break;
}
}
rewind(infile);
len = (*num_objs) * (*num_coordinates);
objects = (float**)malloc((*num_objs) * sizeof(float*));
objects[0] = (float*) malloc(len * sizeof(float));
for (i=1; i<(*num_objs); i++)
objects[i] = objects[i-1] + (*num_coordinates);
i = 0;
while (fgets(line, lineLen, infile) != NULL) {
if (strtok(line, " \t\n") == NULL) continue;
for (j=0; j<(*num_coordinates); j++)
objects[i][j] = atof(strtok(NULL, " ,\t\n"));
i++;
}
fclose(infile);
free(line);
return objects;
}
static int cal_pow_2(int n) {
int res = 0;
while(n > 0){
n >>= 1;
res = (res<<1) | 1;
}
return (res+1);
}
__host__ __device__ static
float cal_dist(int num_coordinates, int num_objs, int num_clusters, float *objects, float *clusters, int objectId, int clusterId){
float ans=0.0;
for (int i = 0; i < num_coordinates; i++) {
float temp = (objects[num_objs * i + objectId] - clusters[num_clusters * i + clusterId]);
ans += pow(temp,2);
}
ans = sqrt(ans);
return ans;
}
__global__ static
void find_nearest_cluster(int num_coordinates, int num_objs, int num_clusters, float *objects, float *dev_clusters, int *relationship,
int *curr_temporaries){
extern __shared__ char sharedMemory[];
unsigned char *relationshipChanged = (unsigned char *)sharedMemory;
relationshipChanged[threadIdx.x] = 0;
int objectId = threadIdx.x + (blockDim.x * blockIdx.x);
if (objectId < num_objs) {
float min_dist;
int index = -1;
min_dist = FLT_MAX;
float *clusters = dev_clusters;
for (int i=0; i<num_clusters; i++) {
float dist = cal_dist(num_coordinates, num_objs, num_clusters,
objects, clusters, objectId, i);
index = (dist < min_dist ? (min_dist = dist, i): index);
}
if (relationship[objectId] != index) {
relationship[objectId] = index;
relationshipChanged[threadIdx.x] = 1;
}
__syncthreads();
unsigned int s = blockDim.x / 2;
while(s > 0) {
relationshipChanged[threadIdx.x] += ((threadIdx.x < s) ? relationshipChanged[threadIdx.x + s] : 0);
s >>= 1;
__syncthreads();
}
if (!(threadIdx.x)) {
curr_temporaries[blockIdx.x] = relationshipChanged[0];
}
}
}
__global__ static
void compute_delta(int *devicetemporaries, int numtemporaries, int numtemporaries2){
numtemporaries2 >>= 1;
extern __shared__ unsigned int curr_temporaries[];
curr_temporaries[threadIdx.x] =
((threadIdx.x >= numtemporaries) ? 0 : devicetemporaries[threadIdx.x]);
__syncthreads();
unsigned int s = numtemporaries2;
while(s > 0) {
curr_temporaries[threadIdx.x] += ((threadIdx.x < s) ? curr_temporaries[threadIdx.x + s] : 0);
s >>= 1;
__syncthreads();
}
if (!(threadIdx.x)) {
devicetemporaries[0] = curr_temporaries[0];
}
}
float** cuda_kmeans(float **objects, int num_coordinates, int num_objs, int num_clusters, int *relationship){
float **dimObjects;
malloc2D(dimObjects, num_coordinates, num_objs, float);
for (int i = 0; i < num_coordinates; i++) {
for (int j = 0; j < num_objs; j++) {
dimObjects[i][j] = objects[j][i];
}
}
float *dev_clusters;
float **dimClusters;
malloc2D(dimClusters, num_coordinates, num_clusters, float);
for (int i = 0; i < num_coordinates; i++) {
for (int j = 0; j < num_clusters; j++) {
dimClusters[i][j] = dimObjects[i][j];
}
}
memset(relationship, -1, num_objs*sizeof(int));
int *newClusterSize;
newClusterSize = (int*) calloc(num_clusters, sizeof(int));
float **newClusters;
malloc2D(newClusters, num_coordinates, num_clusters, float);
memset(newClusters[0], 0, num_coordinates * num_clusters * sizeof(float));
unsigned int numThreadsPerClusterBlock = 128;
unsigned int numClusterBlocks =
(num_objs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char);
unsigned int numReductionThreads =
cal_pow_2(numClusterBlocks);
unsigned int reductionBlockSharedDataSize =
numReductionThreads * sizeof(unsigned int);
float *dev_objs;
int *dev_relationship;
int *devicetemporaries;
hipMalloc(&dev_objs, num_objs*num_coordinates*sizeof(float));
hipMalloc(&dev_clusters, num_clusters*num_coordinates*sizeof(float));
hipMalloc(&dev_relationship, num_objs*sizeof(int));
hipMalloc(&devicetemporaries, numReductionThreads*sizeof(unsigned int));
hipMemcpy(dev_objs, dimObjects[0], num_objs*num_coordinates*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_relationship, relationship, num_objs*sizeof(int), hipMemcpyHostToDevice);
for(int loop = 0; loop < 500; loop++){
hipMemcpy(dev_clusters, dimClusters[0], num_clusters*num_coordinates*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_nearest_cluster)
, dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize , 0,
num_coordinates, num_objs, num_clusters,
dev_objs, dev_clusters, dev_relationship, devicetemporaries);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_delta) , dim3(1), dim3(numReductionThreads), reductionBlockSharedDataSize , 0,
devicetemporaries, numClusterBlocks, numReductionThreads);
hipDeviceSynchronize();
int d;
hipMemcpy(&d, devicetemporaries, sizeof(int), hipMemcpyDeviceToHost);
float delta = (float)d;
hipMemcpy(relationship, dev_relationship, num_objs*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<num_objs; i++) {
newClusterSize[relationship[i]] += 1;
for (int j=0; j<num_coordinates; j++)
newClusters[j][relationship[i]] += objects[i][j];
}
for (int i=0; i<num_clusters; i++) {
for (int j=0; j<num_coordinates; j++) {
if (newClusterSize[i] != 0)
dimClusters[j][i] = (newClusters[j][i] / (1.0*newClusterSize[i]));
newClusters[j][i] = 0;
}
newClusterSize[i] = 0;
}
if(delta > 0.001){
break;
}
}
float **clusters;
malloc2D(clusters, num_clusters, num_coordinates, float);
for (int i = 0; i < num_clusters; i++) {
for (int j = 0; j < num_coordinates; j++) {
clusters[i][j] = dimClusters[j][i];
}
}
return clusters;
}
int main(int argc, char **argv) {
int opt;
int num_clusters, num_coordinates, num_objs;
int *relationship;
char *filename;
float **objects;
float **clusters;
num_clusters = 0;
filename = NULL;
while ( (opt=getopt(argc,argv,"i:n:"))!= EOF) {
switch (opt) {
case 'i': filename=optarg;
break;
case 'n': num_clusters = atoi(optarg);
break;
case '?':
break;
default:
break;
}
}
struct timeval tvalBefore, tvalAfter;
objects = file_read(filename, &num_objs, &num_coordinates);
relationship = (int*) malloc(num_objs * sizeof(int));
gettimeofday (&tvalBefore, NULL);
clusters = cuda_kmeans(objects, num_coordinates, num_objs, num_clusters,
relationship);
gettimeofday (&tvalAfter, NULL);
printf("num_objs = %d\n", num_objs);
printf("num_coordinates = %d\n", num_coordinates);
printf("num_clusters = %d\n", num_clusters);
printf("Time: %ld microseconds\n",
((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L
+tvalAfter.tv_usec) - tvalBefore.tv_usec
);
return(0);
} | 1c56ae8f22fa56fbaf80730de1bc00c8a86b23d2.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define MAX_CHAR_PER_LINE 128
#define FLT_MAX 3.40282347e+38
#define malloc2D(name, xDim, yDim, type) do { \
name = (type **)malloc(xDim * sizeof(type *)); \
name[0] = (type *)malloc(xDim * yDim * sizeof(type)); \
for (size_t i = 1; i < xDim; i++) \
name[i] = name[i-1] + yDim; \
} while (0)
float** file_read(char *filename, int *num_objs, int *num_coordinates)
{
float **objects;
int i, j, len;
ssize_t numBytesRead;
FILE *infile;
char *line, *ret;
int lineLen;
if ((infile = fopen(filename, "r")) == NULL) {
fprintf(stderr, "Error: no such file (%s)\n", filename);
return NULL;
}
lineLen = MAX_CHAR_PER_LINE;
line = (char*) malloc(lineLen);
(*num_objs) = 0;
while (fgets(line, lineLen, infile) != NULL) {
while (strlen(line) == lineLen-1) {
len = strlen(line);
fseek(infile, -len, SEEK_CUR);
lineLen += MAX_CHAR_PER_LINE;
line = (char*) realloc(line, lineLen);
ret = fgets(line, lineLen, infile);
}
if (strtok(line, " \t\n") != 0)
(*num_objs)++;
}
rewind(infile);
(*num_coordinates) = 0;
while (fgets(line, lineLen, infile) != NULL) {
if (strtok(line, " \t\n") != 0) {
while (strtok(NULL, " ,\t\n") != NULL) (*num_coordinates)++;
break;
}
}
rewind(infile);
len = (*num_objs) * (*num_coordinates);
objects = (float**)malloc((*num_objs) * sizeof(float*));
objects[0] = (float*) malloc(len * sizeof(float));
for (i=1; i<(*num_objs); i++)
objects[i] = objects[i-1] + (*num_coordinates);
i = 0;
while (fgets(line, lineLen, infile) != NULL) {
if (strtok(line, " \t\n") == NULL) continue;
for (j=0; j<(*num_coordinates); j++)
objects[i][j] = atof(strtok(NULL, " ,\t\n"));
i++;
}
fclose(infile);
free(line);
return objects;
}
static int cal_pow_2(int n) {
int res = 0;
while(n > 0){
n >>= 1;
res = (res<<1) | 1;
}
return (res+1);
}
__host__ __device__ static
float cal_dist(int num_coordinates, int num_objs, int num_clusters, float *objects, float *clusters, int objectId, int clusterId){
float ans=0.0;
for (int i = 0; i < num_coordinates; i++) {
float temp = (objects[num_objs * i + objectId] - clusters[num_clusters * i + clusterId]);
ans += pow(temp,2);
}
ans = sqrt(ans);
return ans;
}
__global__ static
void find_nearest_cluster(int num_coordinates, int num_objs, int num_clusters, float *objects, float *dev_clusters, int *relationship,
int *curr_temporaries){
extern __shared__ char sharedMemory[];
unsigned char *relationshipChanged = (unsigned char *)sharedMemory;
relationshipChanged[threadIdx.x] = 0;
int objectId = threadIdx.x + (blockDim.x * blockIdx.x);
if (objectId < num_objs) {
float min_dist;
int index = -1;
min_dist = FLT_MAX;
float *clusters = dev_clusters;
for (int i=0; i<num_clusters; i++) {
float dist = cal_dist(num_coordinates, num_objs, num_clusters,
objects, clusters, objectId, i);
index = (dist < min_dist ? (min_dist = dist, i): index);
}
if (relationship[objectId] != index) {
relationship[objectId] = index;
relationshipChanged[threadIdx.x] = 1;
}
__syncthreads();
unsigned int s = blockDim.x / 2;
while(s > 0) {
relationshipChanged[threadIdx.x] += ((threadIdx.x < s) ? relationshipChanged[threadIdx.x + s] : 0);
s >>= 1;
__syncthreads();
}
if (!(threadIdx.x)) {
curr_temporaries[blockIdx.x] = relationshipChanged[0];
}
}
}
__global__ static
void compute_delta(int *devicetemporaries, int numtemporaries, int numtemporaries2){
numtemporaries2 >>= 1;
extern __shared__ unsigned int curr_temporaries[];
curr_temporaries[threadIdx.x] =
((threadIdx.x >= numtemporaries) ? 0 : devicetemporaries[threadIdx.x]);
__syncthreads();
unsigned int s = numtemporaries2;
while(s > 0) {
curr_temporaries[threadIdx.x] += ((threadIdx.x < s) ? curr_temporaries[threadIdx.x + s] : 0);
s >>= 1;
__syncthreads();
}
if (!(threadIdx.x)) {
devicetemporaries[0] = curr_temporaries[0];
}
}
float** cuda_kmeans(float **objects, int num_coordinates, int num_objs, int num_clusters, int *relationship){
float **dimObjects;
malloc2D(dimObjects, num_coordinates, num_objs, float);
for (int i = 0; i < num_coordinates; i++) {
for (int j = 0; j < num_objs; j++) {
dimObjects[i][j] = objects[j][i];
}
}
float *dev_clusters;
float **dimClusters;
malloc2D(dimClusters, num_coordinates, num_clusters, float);
for (int i = 0; i < num_coordinates; i++) {
for (int j = 0; j < num_clusters; j++) {
dimClusters[i][j] = dimObjects[i][j];
}
}
memset(relationship, -1, num_objs*sizeof(int));
int *newClusterSize;
newClusterSize = (int*) calloc(num_clusters, sizeof(int));
float **newClusters;
malloc2D(newClusters, num_coordinates, num_clusters, float);
memset(newClusters[0], 0, num_coordinates * num_clusters * sizeof(float));
unsigned int numThreadsPerClusterBlock = 128;
unsigned int numClusterBlocks =
(num_objs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char);
unsigned int numReductionThreads =
cal_pow_2(numClusterBlocks);
unsigned int reductionBlockSharedDataSize =
numReductionThreads * sizeof(unsigned int);
float *dev_objs;
int *dev_relationship;
int *devicetemporaries;
cudaMalloc(&dev_objs, num_objs*num_coordinates*sizeof(float));
cudaMalloc(&dev_clusters, num_clusters*num_coordinates*sizeof(float));
cudaMalloc(&dev_relationship, num_objs*sizeof(int));
cudaMalloc(&devicetemporaries, numReductionThreads*sizeof(unsigned int));
cudaMemcpy(dev_objs, dimObjects[0], num_objs*num_coordinates*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_relationship, relationship, num_objs*sizeof(int), cudaMemcpyHostToDevice);
for(int loop = 0; loop < 500; loop++){
cudaMemcpy(dev_clusters, dimClusters[0], num_clusters*num_coordinates*sizeof(float), cudaMemcpyHostToDevice);
find_nearest_cluster
<<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >>>
(num_coordinates, num_objs, num_clusters,
dev_objs, dev_clusters, dev_relationship, devicetemporaries);
cudaDeviceSynchronize();
compute_delta <<< 1, numReductionThreads, reductionBlockSharedDataSize >>>
(devicetemporaries, numClusterBlocks, numReductionThreads);
cudaDeviceSynchronize();
int d;
cudaMemcpy(&d, devicetemporaries, sizeof(int), cudaMemcpyDeviceToHost);
float delta = (float)d;
cudaMemcpy(relationship, dev_relationship, num_objs*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<num_objs; i++) {
newClusterSize[relationship[i]] += 1;
for (int j=0; j<num_coordinates; j++)
newClusters[j][relationship[i]] += objects[i][j];
}
for (int i=0; i<num_clusters; i++) {
for (int j=0; j<num_coordinates; j++) {
if (newClusterSize[i] != 0)
dimClusters[j][i] = (newClusters[j][i] / (1.0*newClusterSize[i]));
newClusters[j][i] = 0;
}
newClusterSize[i] = 0;
}
if(delta > 0.001){
break;
}
}
float **clusters;
malloc2D(clusters, num_clusters, num_coordinates, float);
for (int i = 0; i < num_clusters; i++) {
for (int j = 0; j < num_coordinates; j++) {
clusters[i][j] = dimClusters[j][i];
}
}
return clusters;
}
int main(int argc, char **argv) {
int opt;
int num_clusters, num_coordinates, num_objs;
int *relationship;
char *filename;
float **objects;
float **clusters;
num_clusters = 0;
filename = NULL;
while ( (opt=getopt(argc,argv,"i:n:"))!= EOF) {
switch (opt) {
case 'i': filename=optarg;
break;
case 'n': num_clusters = atoi(optarg);
break;
case '?':
break;
default:
break;
}
}
struct timeval tvalBefore, tvalAfter;
objects = file_read(filename, &num_objs, &num_coordinates);
relationship = (int*) malloc(num_objs * sizeof(int));
gettimeofday (&tvalBefore, NULL);
clusters = cuda_kmeans(objects, num_coordinates, num_objs, num_clusters,
relationship);
gettimeofday (&tvalAfter, NULL);
printf("num_objs = %d\n", num_objs);
printf("num_coordinates = %d\n", num_coordinates);
printf("num_clusters = %d\n", num_clusters);
printf("Time: %ld microseconds\n",
((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L
+tvalAfter.tv_usec) - tvalBefore.tv_usec
);
return(0);
} |
6458d3a9484939c3d2183bcd2320644e83233be7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "macro.hpp"
#include <algorithm>
#include <iostream>
#include <cstdlib>
#define double float
int RandomNumber(){return static_cast<double>(rand() % 1000);}
__global__ void sum3(double * a,
double * b,
double * c,
double * result,
unsigned size)
{
unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < size)
{
result[i] = (a[i] + b[i] + c[i]);
}
};
int main()
{
unsigned size = 1e7;
srand(0);
double* a = new double[size];
double* b = new double[size];
double* c = new double[size];
double* result = new double[size];
std::generate(a, a+size, RandomNumber);
std::generate(b, b+size, RandomNumber);
std::generate(c, c+size, RandomNumber);
double* ad, *bd,* cd;
double* resultd;
unsigned * sized;
hipMalloc((void**) &ad, size*sizeof(double)) ;
hipMalloc((void**) &bd, size*sizeof(double)) ;
hipMalloc((void**) &cd, size*sizeof(double)) ;
hipMalloc((void**) &resultd, size*sizeof(double));
hipMalloc((void**) &sized, sizeof(unsigned)) ;
for(int i = 0; i < 1000; ++i)
{
unsigned block_size = 515;
unsigned num_blocks = (size + block_size - 1) / block_size;
hipMemcpy(ad, a, size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(bd, b, size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cd, c, size*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum3), dim3(num_blocks), dim3(block_size), 0, 0, ad, bd, cd, resultd, size);
hipMemcpy(result, resultd, size*sizeof(double), hipMemcpyDeviceToHost);
}
#ifdef PRINT
for( int i = 0; i < size; ++i)
{
std::cout << a[i] << ", "<< b[i] <<"," << c[i] << "," << result[i]<< std::endl;
}
#endif
hipFree(ad);
hipFree(bd);
hipFree(cd);
hipFree(resultd);
delete[] a;
delete[] b;
delete[] c;
delete[] result;
return 0;
}
| 6458d3a9484939c3d2183bcd2320644e83233be7.cu | #include "macro.hpp"
#include <algorithm>
#include <iostream>
#include <cstdlib>
#define double float
int RandomNumber(){return static_cast<double>(rand() % 1000);}
__global__ void sum3(double * a,
double * b,
double * c,
double * result,
unsigned size)
{
unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < size)
{
result[i] = (a[i] + b[i] + c[i]);
}
};
int main()
{
unsigned size = 1e7;
srand(0);
double* a = new double[size];
double* b = new double[size];
double* c = new double[size];
double* result = new double[size];
std::generate(a, a+size, RandomNumber);
std::generate(b, b+size, RandomNumber);
std::generate(c, c+size, RandomNumber);
double* ad, *bd,* cd;
double* resultd;
unsigned * sized;
cudaMalloc((void**) &ad, size*sizeof(double)) ;
cudaMalloc((void**) &bd, size*sizeof(double)) ;
cudaMalloc((void**) &cd, size*sizeof(double)) ;
cudaMalloc((void**) &resultd, size*sizeof(double));
cudaMalloc((void**) &sized, sizeof(unsigned)) ;
for(int i = 0; i < 1000; ++i)
{
unsigned block_size = 515;
unsigned num_blocks = (size + block_size - 1) / block_size;
cudaMemcpy(ad, a, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cd, c, size*sizeof(double), cudaMemcpyHostToDevice);
sum3<<<num_blocks, block_size>>>(ad, bd, cd, resultd, size);
cudaMemcpy(result, resultd, size*sizeof(double), cudaMemcpyDeviceToHost);
}
#ifdef PRINT
for( int i = 0; i < size; ++i)
{
std::cout << a[i] << ", "<< b[i] <<"," << c[i] << "," << result[i]<< std::endl;
}
#endif
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
cudaFree(resultd);
delete[] a;
delete[] b;
delete[] c;
delete[] result;
return 0;
}
|
39a79f23a98b105a3666cdfc6d0b68963842489e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdint.h"
#include "Array2D.cu"
extern __device__ float clamp(float x, float minX, float maxX){
return max(minX, min(maxX, x));
}
extern __device__ int clamp(int x, int minX, int maxX){
return max(minX, min(maxX, x));
}
extern "C" __global__ void advect(Array2D<0> q, Array2D<1> qNew, Array2D<2> u, Array2D<3> v, const float dt, const float rdx)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int height = qNew.getCount(0);
int width = qNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
float pos_x = i - u[j][i] * dt * rdx;
float pos_y = j - v[j][i] * dt * rdx;
pos_x = clamp(pos_x, 0.0, (float)width-1);
pos_y = clamp(pos_y, 0.0, (float)height-1);
int x = (int) floor(pos_x);
int y = (int) floor(pos_y);
float t_x = pos_x - x;
float t_y = pos_y - y;
// bilinear interpolation
float pixel00 = q[y][x];
float pixel10 = q[y][clamp((x + 1), 0, width - 1)];
float pixel01 = q[clamp((y + 1), 0, height - 1)][x];
float pixel11 = q[clamp((y + 1), 0, height - 1)][clamp((x + 1), 0, width - 1)];
qNew[j][i] = (1.f - t_y)*((1.f - t_x)*pixel00 + t_x*pixel10) + t_y*((1.f - t_x)*pixel01 + t_x*pixel11);
}
extern "C" __global__ void jacobi(Array2D<0> x, Array2D<1> xNew, Array2D<2> b, const float alpha, const float rbeta)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = xNew.getCount(0);
int width = xNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
xNew[j][i] = rbeta * (alpha * b[j][i]
+ x[j][clamp((i + 1), 0, width - 1)]
+ x[j][clamp((i - 1), 0, width - 1)]
+ x[clamp((j + 1), 0, height - 1)][i]
+ x[clamp((j - 1), 0, height - 1)][i]);
}
extern "C" __global__ void divergence(Array2D<0> u, Array2D<1> v, Array2D<2> div, const float halfrdx)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = div.getCount(0);
int width = div.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
div[j][i] = halfrdx * (u[j][clamp(i + 1, 0, width - 1)]
- u[j][clamp(i - 1, 0, width - 1)]
+ v[clamp(j + 1, 0, height - 1)][i]
- v[clamp(j - 1, 0, height - 1)][i]);
}
extern "C" __global__ void subtractGradient(Array2D<0> p, Array2D<1> u, Array2D<2> v, Array2D<3> uNew, Array2D<4> vNew, const float halfrdx)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = uNew.getCount(0);
int width = uNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
uNew[j][i] = u[j][i] - halfrdx * (p[j][clamp(i + 1, 0, width - 1)]
- p[j][clamp(i - 1, 0, width - 1)]);
vNew[j][i] = v[j][i] - halfrdx * (p[clamp(j + 1, 0, height - 1)][i]
- p[clamp(j - 1, 0, height - 1)][i]);
}
extern "C" __global__ void boundary(Array2D<0> x, float scale)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = x.getCount(0);
int width = x.getCount(1);
if (i >= width || j >= height)
return;
if (i == 0)
x[j][i] = scale*x[j][i + 1];
else if (i == width - 1)
x[j][i] = scale*x[j][i - 1];
else if (j == 0)
x[j][i] = scale*x[j + 1][i];
else if (j == height - 1)
x[j][i] = scale*x[j - 1][i];
}
extern "C" __global__ void addInk(Array2D<0> u, Array2D<1> v, Array2D<2> ink, const int x, const int y, const float u_, const float v_, const float ink_)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = u.getCount(0);
int width = u.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
int dx = i - x;
int dy = j - y;
float s = 1.f / pow(2., static_cast<double>(dx*dx + dy*dy) / 200.);
u[j][i] += u_ * s;
v[j][i] += v_ * s;
ink[j][i] += ink_ * s;
ink[j][i] = clamp(ink[j][i], 0.0, 255.0);
}
extern "C" __global__ void convertToColor(uint8_t *color, Array2D<0> x)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = x.getCount(0);
int width = x.getCount(1);
size_t index = i + width * j;
if (i >= width || j >= height || i < 0 || j < 0)
return;
uint8_t value = 255 - static_cast<uint8_t>(x[j][i]);
color[4 * index] = value;
color[4 * index + 1] = value;
color[4 * index + 2] = value;
color[4 * index + 3] = 0;
}
extern "C" __global__ void convertToColor2(uint8_t *color, Array2D<0> r, Array2D<1> g, Array2D<2> b)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = r.getCount(0);
int width = r.getCount(1);
size_t index = i + width * j;
if (i >= width || j >= height || i < 0 || j < 0)
return;
color[4 * index] = static_cast<uint8_t>(r[j][i]);
color[4 * index + 1] = static_cast<uint8_t>(g[j][i]);
color[4 * index + 2] = static_cast<uint8_t>(b[j][i]);
color[4 * index + 3] = 0;
} | 39a79f23a98b105a3666cdfc6d0b68963842489e.cu | #include "stdint.h"
#include "Array2D.cu"
extern __device__ float clamp(float x, float minX, float maxX){
return max(minX, min(maxX, x));
}
extern __device__ int clamp(int x, int minX, int maxX){
return max(minX, min(maxX, x));
}
extern "C" __global__ void advect(Array2D<0> q, Array2D<1> qNew, Array2D<2> u, Array2D<3> v, const float dt, const float rdx)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int height = qNew.getCount(0);
int width = qNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
float pos_x = i - u[j][i] * dt * rdx;
float pos_y = j - v[j][i] * dt * rdx;
pos_x = clamp(pos_x, 0.0, (float)width-1);
pos_y = clamp(pos_y, 0.0, (float)height-1);
int x = (int) floor(pos_x);
int y = (int) floor(pos_y);
float t_x = pos_x - x;
float t_y = pos_y - y;
// bilinear interpolation
float pixel00 = q[y][x];
float pixel10 = q[y][clamp((x + 1), 0, width - 1)];
float pixel01 = q[clamp((y + 1), 0, height - 1)][x];
float pixel11 = q[clamp((y + 1), 0, height - 1)][clamp((x + 1), 0, width - 1)];
qNew[j][i] = (1.f - t_y)*((1.f - t_x)*pixel00 + t_x*pixel10) + t_y*((1.f - t_x)*pixel01 + t_x*pixel11);
}
extern "C" __global__ void jacobi(Array2D<0> x, Array2D<1> xNew, Array2D<2> b, const float alpha, const float rbeta)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = xNew.getCount(0);
int width = xNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
xNew[j][i] = rbeta * (alpha * b[j][i]
+ x[j][clamp((i + 1), 0, width - 1)]
+ x[j][clamp((i - 1), 0, width - 1)]
+ x[clamp((j + 1), 0, height - 1)][i]
+ x[clamp((j - 1), 0, height - 1)][i]);
}
extern "C" __global__ void divergence(Array2D<0> u, Array2D<1> v, Array2D<2> div, const float halfrdx)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = div.getCount(0);
int width = div.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
div[j][i] = halfrdx * (u[j][clamp(i + 1, 0, width - 1)]
- u[j][clamp(i - 1, 0, width - 1)]
+ v[clamp(j + 1, 0, height - 1)][i]
- v[clamp(j - 1, 0, height - 1)][i]);
}
extern "C" __global__ void subtractGradient(Array2D<0> p, Array2D<1> u, Array2D<2> v, Array2D<3> uNew, Array2D<4> vNew, const float halfrdx)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = uNew.getCount(0);
int width = uNew.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
uNew[j][i] = u[j][i] - halfrdx * (p[j][clamp(i + 1, 0, width - 1)]
- p[j][clamp(i - 1, 0, width - 1)]);
vNew[j][i] = v[j][i] - halfrdx * (p[clamp(j + 1, 0, height - 1)][i]
- p[clamp(j - 1, 0, height - 1)][i]);
}
extern "C" __global__ void boundary(Array2D<0> x, float scale)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = x.getCount(0);
int width = x.getCount(1);
if (i >= width || j >= height)
return;
if (i == 0)
x[j][i] = scale*x[j][i + 1];
else if (i == width - 1)
x[j][i] = scale*x[j][i - 1];
else if (j == 0)
x[j][i] = scale*x[j + 1][i];
else if (j == height - 1)
x[j][i] = scale*x[j - 1][i];
}
extern "C" __global__ void addInk(Array2D<0> u, Array2D<1> v, Array2D<2> ink, const int x, const int y, const float u_, const float v_, const float ink_)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = u.getCount(0);
int width = u.getCount(1);
if (i >= width || j >= height || i < 0 || j < 0)
return;
int dx = i - x;
int dy = j - y;
float s = 1.f / pow(2., static_cast<double>(dx*dx + dy*dy) / 200.);
u[j][i] += u_ * s;
v[j][i] += v_ * s;
ink[j][i] += ink_ * s;
ink[j][i] = clamp(ink[j][i], 0.0, 255.0);
}
extern "C" __global__ void convertToColor(uint8_t *color, Array2D<0> x)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = x.getCount(0);
int width = x.getCount(1);
size_t index = i + width * j;
if (i >= width || j >= height || i < 0 || j < 0)
return;
uint8_t value = 255 - static_cast<uint8_t>(x[j][i]);
color[4 * index] = value;
color[4 * index + 1] = value;
color[4 * index + 2] = value;
color[4 * index + 3] = 0;
}
extern "C" __global__ void convertToColor2(uint8_t *color, Array2D<0> r, Array2D<1> g, Array2D<2> b)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t j = blockIdx.y * blockDim.y + threadIdx.y;
int height = r.getCount(0);
int width = r.getCount(1);
size_t index = i + width * j;
if (i >= width || j >= height || i < 0 || j < 0)
return;
color[4 * index] = static_cast<uint8_t>(r[j][i]);
color[4 * index + 1] = static_cast<uint8_t>(g[j][i]);
color[4 * index + 2] = static_cast<uint8_t>(b[j][i]);
color[4 * index + 3] = 0;
} |
3dfdc0eccb3f5def292edaae33d50deb175ab2db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Implements the Landau kernel
*/
#include <petscconf.h>
#include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/
#include <petsclandau.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/aij/seq/aij.h>
#include <petscmat.h>
#include <petscdevice_cuda.h>
#include "../land_tensors.h"
#include <petscaijdevice.h>
PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps maps[], pointInterpolationP4est (*pointMaps)[LANDAU_MAX_Q_FACE], PetscInt Nf[], PetscInt Nq, PetscInt grid)
{
P4estVertexMaps h_maps;
PetscFunctionBegin;
h_maps.num_elements = maps[grid].num_elements;
h_maps.num_face = maps[grid].num_face;
h_maps.num_reduced = maps[grid].num_reduced;
h_maps.deviceType = maps[grid].deviceType;
h_maps.Nf = Nf[grid];
h_maps.numgrids = maps[grid].numgrids;
PetscCallCUDA(hipMalloc((void **)&h_maps.c_maps, maps[grid].num_reduced * sizeof *pointMaps));
PetscCallCUDA(hipMemcpy(h_maps.c_maps, maps[grid].c_maps, maps[grid].num_reduced * sizeof *pointMaps, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&h_maps.gIdx, maps[grid].num_elements * sizeof *maps[grid].gIdx));
PetscCallCUDA(hipMemcpy(h_maps.gIdx, maps[grid].gIdx, maps[grid].num_elements * sizeof *maps[grid].gIdx, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&maps[grid].d_self, sizeof(P4estVertexMaps)));
PetscCallCUDA(hipMemcpy(maps[grid].d_self, &h_maps, sizeof(P4estVertexMaps), hipMemcpyHostToDevice));
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps maps[], PetscInt num_grids)
{
PetscFunctionBegin;
for (PetscInt grid = 0; grid < num_grids; grid++) {
P4estVertexMaps *d_maps = maps[grid].d_self, h_maps;
PetscCallCUDA(hipMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), hipMemcpyDeviceToHost));
PetscCallCUDA(hipFree(h_maps.c_maps));
PetscCallCUDA(hipFree(h_maps.gIdx));
PetscCallCUDA(hipFree(d_maps));
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataSet(DM plex, const PetscInt Nq, const PetscInt batch_sz, const PetscInt num_grids, PetscInt a_numCells[], PetscInt a_species_offset[], PetscInt a_mat_offset[], PetscReal nu_alpha[], PetscReal nu_beta[], PetscReal a_invMass[], PetscReal a_invJ[], PetscReal a_x[], PetscReal a_y[], PetscReal a_z[], PetscReal a_w[], LandauStaticData *SData_d)
{
PetscTabulation *Tf;
PetscReal *BB, *DD;
PetscInt dim, Nb = Nq, szf = sizeof(PetscReal), szs = sizeof(PetscScalar), szi = sizeof(PetscInt);
PetscInt h_ip_offset[LANDAU_MAX_GRIDS + 1], h_ipf_offset[LANDAU_MAX_GRIDS + 1], h_elem_offset[LANDAU_MAX_GRIDS + 1], nip, IPfdf_sz, Nf;
PetscDS prob;
PetscFunctionBegin;
PetscCall(DMGetDimension(plex, &dim));
PetscCall(DMGetDS(plex, &prob));
PetscCheck(LANDAU_DIM == dim, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
PetscCall(PetscDSGetTabulation(prob, &Tf));
BB = Tf[0]->T[0];
DD = Tf[0]->T[1];
Nf = h_ip_offset[0] = h_ipf_offset[0] = h_elem_offset[0] = 0;
nip = 0;
IPfdf_sz = 0;
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt nfloc = a_species_offset[grid + 1] - a_species_offset[grid];
h_elem_offset[grid + 1] = h_elem_offset[grid] + a_numCells[grid];
nip += a_numCells[grid] * Nq;
h_ip_offset[grid + 1] = nip;
IPfdf_sz += Nq * nfloc * a_numCells[grid];
h_ipf_offset[grid + 1] = IPfdf_sz;
}
Nf = a_species_offset[num_grids];
{
PetscCallCUDA(hipMalloc((void **)&SData_d->B, Nq * Nb * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->B, BB, Nq * Nb * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->D, Nq * Nb * dim * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->D, DD, Nq * Nb * dim * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->alpha, Nf * szf)); // kernel input
PetscCallCUDA(hipMalloc((void **)&SData_d->beta, Nf * szf)); // kernel input
PetscCallCUDA(hipMalloc((void **)&SData_d->invMass, Nf * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->alpha, nu_alpha, Nf * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(SData_d->beta, nu_beta, Nf * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(SData_d->invMass, a_invMass, Nf * szf, hipMemcpyHostToDevice));
// collect geometry
PetscCallCUDA(hipMalloc((void **)&SData_d->invJ, nip * dim * dim * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->invJ, a_invJ, nip * dim * dim * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->x, nip * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->x, a_x, nip * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->y, nip * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->y, a_y, nip * szf, hipMemcpyHostToDevice));
#if LANDAU_DIM == 3
PetscCallCUDA(hipMalloc((void **)&SData_d->z, nip * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->z, a_z, nip * szf, hipMemcpyHostToDevice));
#endif
PetscCallCUDA(hipMalloc((void **)&SData_d->w, nip * szf)); // kernel input
PetscCallCUDA(hipMemcpy(SData_d->w, a_w, nip * szf, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->NCells, num_grids * szi));
PetscCallCUDA(hipMemcpy(SData_d->NCells, a_numCells, num_grids * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->species_offset, (num_grids + 1) * szi));
PetscCallCUDA(hipMemcpy(SData_d->species_offset, a_species_offset, (num_grids + 1) * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->mat_offset, (num_grids + 1) * szi));
PetscCallCUDA(hipMemcpy(SData_d->mat_offset, a_mat_offset, (num_grids + 1) * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->ip_offset, (num_grids + 1) * szi));
PetscCallCUDA(hipMemcpy(SData_d->ip_offset, h_ip_offset, (num_grids + 1) * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->ipf_offset, (num_grids + 1) * szi));
PetscCallCUDA(hipMemcpy(SData_d->ipf_offset, h_ipf_offset, (num_grids + 1) * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->elem_offset, (num_grids + 1) * szi));
PetscCallCUDA(hipMemcpy(SData_d->elem_offset, h_elem_offset, (num_grids + 1) * szi, hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&SData_d->maps, num_grids * sizeof(P4estVertexMaps *)));
// allocate space for dynamic data once
PetscCallCUDA(hipMalloc((void **)&SData_d->Eq_m, Nf * szf)); // this could be for each vertex (todo?)
PetscCallCUDA(hipMalloc((void **)&SData_d->f, nip * Nf * szs * batch_sz)); // for each vertex in batch
PetscCallCUDA(hipMalloc((void **)&SData_d->dfdx, nip * Nf * szs * batch_sz));
PetscCallCUDA(hipMalloc((void **)&SData_d->dfdy, nip * Nf * szs * batch_sz));
#if LANDAU_DIM == 3
PetscCallCUDA(hipMalloc((void **)&SData_d->dfdz, nip * Nf * szs * batch_sz));
#endif
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataClear(LandauStaticData *SData_d)
{
PetscFunctionBegin;
if (SData_d->alpha) {
PetscCallCUDA(hipFree(SData_d->alpha));
SData_d->alpha = NULL;
PetscCallCUDA(hipFree(SData_d->beta));
PetscCallCUDA(hipFree(SData_d->invMass));
PetscCallCUDA(hipFree(SData_d->B));
PetscCallCUDA(hipFree(SData_d->D));
PetscCallCUDA(hipFree(SData_d->invJ));
#if LANDAU_DIM == 3
PetscCallCUDA(hipFree(SData_d->z));
#endif
PetscCallCUDA(hipFree(SData_d->x));
PetscCallCUDA(hipFree(SData_d->y));
PetscCallCUDA(hipFree(SData_d->w));
// dynamic data
PetscCallCUDA(hipFree(SData_d->Eq_m));
PetscCallCUDA(hipFree(SData_d->f));
PetscCallCUDA(hipFree(SData_d->dfdx));
PetscCallCUDA(hipFree(SData_d->dfdy));
#if LANDAU_DIM == 3
PetscCallCUDA(hipFree(SData_d->dfdz));
#endif
PetscCallCUDA(hipFree(SData_d->NCells));
PetscCallCUDA(hipFree(SData_d->species_offset));
PetscCallCUDA(hipFree(SData_d->mat_offset));
PetscCallCUDA(hipFree(SData_d->ip_offset));
PetscCallCUDA(hipFree(SData_d->ipf_offset));
PetscCallCUDA(hipFree(SData_d->elem_offset));
PetscCallCUDA(hipFree(SData_d->maps));
}
PetscFunctionReturn(0);
}
//
// The GPU Landau kernel
//
__global__ void landau_form_fdf(const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal d_invJ[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar *d_vertex_f, P4estVertexMaps *d_maps[], PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM == 3
PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[]) // output
{
const PetscInt Nq = blockDim.y, myQi = threadIdx.y;
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x, IPf_sz_glb = d_ipf_offset[num_grids];
const PetscReal *Bq = &BB[myQi * Nb], *Dq = &DD[myQi * Nb * dim];
PetscInt grid = 0, f, d, b, e, q;
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_nip = d_numCells[grid] * Nq, loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset);
const PetscScalar *coef;
PetscReal u_x[LANDAU_DIM];
const PetscReal *invJ = &d_invJ[(d_ip_offset[grid] + loc_elem * Nq + myQi) * dim * dim];
PetscScalar coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQ];
if (!d_maps) {
coef = &d_vertex_f[b_id * IPf_sz_glb + d_ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // closure and IP indexing are the same
} else {
coef = coef_buff;
for (f = 0; f < loc_Nf; ++f) {
LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][f][0];
for (b = 0; b < Nb; ++b) {
PetscInt idx = Idxs[b];
if (idx >= 0) {
coef_buff[f * Nb + b] = d_vertex_f[idx + moffset];
} else {
idx = -idx - 1;
coef_buff[f * Nb + b] = 0;
for (q = 0; q < d_maps[grid]->num_face; q++) {
PetscInt id = d_maps[grid]->c_maps[idx][q].gid;
PetscReal scale = d_maps[grid]->c_maps[idx][q].scale;
if (id >= 0) coef_buff[f * Nb + b] += scale * d_vertex_f[id + moffset];
}
}
}
}
}
/* get f and df */
for (f = threadIdx.x; f < loc_Nf; f += blockDim.x) {
PetscReal refSpaceDer[LANDAU_DIM];
const PetscInt idx = b_id * IPf_sz_glb + d_ipf_offset[grid] + f * loc_nip + loc_elem * Nq + myQi;
d_f[idx] = 0.0;
for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
for (b = 0; b < Nb; ++b) {
const PetscInt cidx = b;
d_f[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
}
for (d = 0; d < dim; ++d) {
for (e = 0, u_x[d] = 0.0; e < dim; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
}
d_dfdx[idx] = u_x[0];
d_dfdy[idx] = u_x[1];
#if LANDAU_DIM == 3
d_dfdz[idx] = u_x[2];
#endif
}
}
}
__device__ void landau_jac_kernel(const PetscInt num_grids, const PetscInt jpidx, PetscInt nip_global, const PetscInt grid, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[], const PetscReal invJj[], const PetscInt Nftot, const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar *elemMat, P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, // output
PetscScalar s_fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory
PetscReal s_scale[][LANDAU_MAX_Q_FACE], PetscInt s_idx[][LANDAU_MAX_Q_FACE], PetscReal s_g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_nu_alpha[], PetscReal s_nu_beta[], PetscReal s_invMass[], PetscReal s_f[], PetscReal s_dfx[], PetscReal s_dfy[], PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[], // global memory
#if LANDAU_DIM == 3
const PetscReal zz[], PetscReal s_dfz[], PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[])
{
const PetscInt Nq = blockDim.y, myQi = threadIdx.y;
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x, IPf_sz_glb = d_ipf_offset[num_grids];
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset);
int delta, d, f, g, d2, dp, d3, fieldA, ipidx_b;
PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
#if LANDAU_DIM == 2
const PetscReal vj[3] = {xx[jpidx], yy[jpidx]};
constexpr int dim = 2;
#else
const PetscReal vj[3] = {xx[jpidx], yy[jpidx], zz[jpidx]};
constexpr int dim = 3;
#endif
const PetscInt f_off = d_species_offset[grid], Nb = Nq;
// create g2 & g3
for (f = threadIdx.x; f < loc_Nf; f += blockDim.x) {
for (d = 0; d < dim; d++) { // clear accumulation data D & K
s_gg2[d][myQi][f] = 0;
for (d2 = 0; d2 < dim; d2++) s_gg3[d][d2][myQi][f] = 0;
}
}
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] = 0;
#pragma unroll
for (d3 = 0; d3 < dim; d3++) gg3_temp[d2][d3] = 0;
}
if (threadIdx.y == 0) {
// copy species into shared memory
for (fieldA = threadIdx.x; fieldA < Nftot; fieldA += blockDim.x) {
s_nu_alpha[fieldA] = nu_alpha[fieldA];
s_nu_beta[fieldA] = nu_beta[fieldA];
s_invMass[fieldA] = invMass[fieldA];
}
}
__syncthreads();
// inner integral, collect gg2/3
for (ipidx_b = 0; ipidx_b < nip_global; ipidx_b += blockDim.x) {
const PetscInt ipidx = ipidx_b + threadIdx.x;
PetscInt f_off_r, grid_r, loc_Nf_r, nip_loc_r, ipidx_g, fieldB, IPf_idx_r;
__syncthreads();
if (ipidx < nip_global) {
grid_r = 0;
while (ipidx >= d_ip_offset[grid_r + 1]) grid_r++;
f_off_r = d_species_offset[grid_r];
ipidx_g = ipidx - d_ip_offset[grid_r];
nip_loc_r = d_numCells[grid_r] * Nq;
loc_Nf_r = d_species_offset[grid_r + 1] - d_species_offset[grid_r];
IPf_idx_r = b_id * IPf_sz_glb + d_ipf_offset[grid_r] + ipidx_g;
for (fieldB = threadIdx.y; fieldB < loc_Nf_r; fieldB += blockDim.y) {
const PetscInt idx = IPf_idx_r + fieldB * nip_loc_r;
s_f[fieldB * blockDim.x + threadIdx.x] = d_f[idx]; // all vector threads get copy of data
s_dfx[fieldB * blockDim.x + threadIdx.x] = d_dfdx[idx];
s_dfy[fieldB * blockDim.x + threadIdx.x] = d_dfdy[idx];
#if LANDAU_DIM == 3
s_dfz[fieldB * blockDim.x + threadIdx.x] = d_dfdz[idx];
#endif
}
}
__syncthreads();
if (ipidx < nip_global) {
const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
#if LANDAU_DIM == 2
PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
LandauTensor2D(vj, x, y, Ud, Uk, mask);
#else
PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
LandauTensor3D(vj, x, y, z, U, mask);
#endif
for (int fieldB = 0; fieldB < loc_Nf_r; fieldB++) {
temp1[0] += s_dfx[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
temp1[1] += s_dfy[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
#if LANDAU_DIM == 3
temp1[2] += s_dfz[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
#endif
temp2 += s_f[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r];
}
temp1[0] *= wi;
temp1[1] *= wi;
#if LANDAU_DIM == 3
temp1[2] *= wi;
#endif
temp2 *= wi;
#if LANDAU_DIM == 2
#pragma unroll
for (d2 = 0; d2 < 2; d2++) {
#pragma unroll
for (d3 = 0; d3 < 2; ++d3) {
/* K = U * grad(f): g2=e: i,A */
gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
/* D = -U * (I \kron (fx)): g3=f: i,j,A */
gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
}
}
#else
#pragma unroll
for (d2 = 0; d2 < 3; ++d2) {
#pragma unroll
for (d3 = 0; d3 < 3; ++d3) {
/* K = U * grad(f): g2 = e: i,A */
gg2_temp[d2] += U[d2][d3] * temp1[d3];
/* D = -U * (I \kron (fx)): g3 = f: i,j,A */
gg3_temp[d2][d3] += U[d2][d3] * temp2;
}
}
#endif
}
} /* IPs */
/* reduce gg temp sums across threads */
for (delta = blockDim.x / 2; delta > 0; delta /= 2) {
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x);
#pragma unroll
for (d3 = 0; d3 < dim; d3++) gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x);
}
}
// add alpha and put in gg2/3
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) {
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
s_gg2[d2][myQi][fieldA] += gg2_temp[d2] * s_nu_alpha[fieldA + f_off];
#pragma unroll
for (d3 = 0; d3 < dim; d3++) s_gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3] * s_nu_alpha[fieldA + f_off] * s_invMass[fieldA + f_off];
}
}
__syncthreads();
/* add electric field term once per IP */
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) s_gg2[dim - 1][myQi][fieldA] += Eq_m[fieldA + f_off];
__syncthreads();
/* Jacobian transform - g2 */
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) {
PetscReal wj = ww[jpidx];
for (d = 0; d < dim; ++d) {
s_g2[d][myQi][fieldA] = 0.0;
for (d2 = 0; d2 < dim; ++d2) {
s_g2[d][myQi][fieldA] += invJj[d * dim + d2] * s_gg2[d2][myQi][fieldA];
s_g3[d][d2][myQi][fieldA] = 0.0;
for (d3 = 0; d3 < dim; ++d3) {
for (dp = 0; dp < dim; ++dp) s_g3[d][d2][myQi][fieldA] += invJj[d * dim + d3] * s_gg3[d3][dp][myQi][fieldA] * invJj[d2 * dim + dp];
}
s_g3[d][d2][myQi][fieldA] *= wj;
}
s_g2[d][myQi][fieldA] *= wj;
}
}
__syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices
/* FE matrix construction */
{
int fieldA, d, qj, d2, q, idx, totDim = Nb * loc_Nf;
/* assemble */
for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0; qj < Nq; qj++) {
const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
for (d = 0; d < dim; ++d) {
t += DIq[f * dim + d] * s_g2[d][qj][fieldA] * BJq[g];
for (d2 = 0; d2 < dim; ++d2) t += DIq[f * dim + d] * s_g3[d][d2][qj][fieldA] * DIq[g * dim + d2];
}
}
if (elemMat) {
const PetscInt fOff = (fieldA * Nb + f) * totDim + fieldA * Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (s_fieldMats) {
PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE];
PetscInt nr, nc;
const LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx + moffset;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps[grid]->num_face; q++) {
if (d_maps[grid]->c_maps[idx][q].gid >= 0) s_idx[f][q] = d_maps[grid]->c_maps[idx][q].gid + moffset;
else s_idx[f][q] = -1;
s_scale[f][q] = d_maps[grid]->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps[grid]->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps[grid]->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) vals[q * nc + d] = s_scale[f][q] * s_scale[g][d] * s_fieldMats[f][g];
}
MatSetValuesDevice(d_mat, nr, s_idx[f], nc, s_idx[g], vals, ADD_VALUES);
}
}
__syncthreads();
}
}
}
}
//
// The CUDA Landau kernel
//
__global__ void __launch_bounds__(256, 2) landau_jacobian(const PetscInt nip_global, const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal invJj[], const PetscInt Nftot, const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal *const BB, const PetscReal *const DD, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[], PetscScalar d_elem_mats[], P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM == 3
const PetscReal zz[], PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[])
{
extern __shared__ PetscReal smem[];
int size = 0;
PetscReal(*s_g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM;
PetscReal(*s_g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES;
PetscReal(*s_gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM;
PetscReal(*s_gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES;
PetscReal *s_nu_alpha = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_nu_beta = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_invMass = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_f = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
PetscReal *s_dfx = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
PetscReal *s_dfy = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
#if LANDAU_DIM == 3
PetscReal *s_dfz = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
#endif
PetscScalar(*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal(*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt(*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x;
PetscInt Nq = blockDim.y, grid = 0; // Nq == Nb
PetscScalar *elemMat = NULL; /* my output */
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = d_ip_offset[grid] + myQi + loc_elem * Nq;
const PetscReal *invJ = &invJj[jpidx * dim * dim];
if (d_elem_mats) {
PetscInt totDim = loc_Nf * Nb;
elemMat = d_elem_mats; // start a beginning and get to my element matrix
for (PetscInt b_id2 = 0; b_id2 < b_id; b_id2++) {
for (PetscInt grid2 = 0; grid2 < num_grids; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
}
for (PetscInt grid2 = 0; grid2 < grid; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids, could be in an offset
}
elemMat += loc_elem * totDim * totDim; // index into local matrix & zero out
for (int i = threadIdx.x + threadIdx.y * blockDim.x; i < totDim * totDim; i += blockDim.x * blockDim.y) elemMat[i] = 0;
}
__syncthreads();
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar(*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_NQ;
s_scale = (PetscReal(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
s_idx = (PetscInt(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
__syncthreads();
landau_jac_kernel(num_grids, jpidx, nip_global, grid, xx, yy, ww, invJ, Nftot, nu_alpha, nu_beta, invMass, Eq_m, BB, DD, elemMat, d_maps, d_mat, *s_fieldMats, *s_scale, *s_idx, *s_g2, *s_g3, *s_gg2, *s_gg3, s_nu_alpha, s_nu_beta, s_invMass, s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
zz, s_dfz, d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
}
}
__global__ void __launch_bounds__(256, 4) landau_mass(const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal d_w[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar d_elem_mats[], P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, PetscReal shift, const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_elem_offset[])
{
extern __shared__ PetscReal smem[];
const PetscInt Nq = blockDim.y, b_elem_idx = blockIdx.y, b_id = blockIdx.x;
PetscScalar *elemMat = NULL; /* my output */
PetscInt fieldA, d, qj, q, idx, f, g, grid = 0, size = 0;
PetscScalar(*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal(*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt(*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar(*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_NQ;
s_scale = (PetscReal(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
s_idx = (PetscInt(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset), totDim = loc_Nf * Nq;
if (d_elem_mats) {
elemMat = d_elem_mats; // start a beginning
for (PetscInt b_id2 = 0; b_id2 < b_id; b_id2++) {
for (PetscInt grid2 = 0; grid2 < num_grids; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
}
for (PetscInt grid2 = 0; grid2 < grid; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
elemMat += loc_elem * totDim * totDim;
for (int i = threadIdx.x + threadIdx.y * blockDim.x; i < totDim * totDim; i += blockDim.x * blockDim.y) elemMat[i] = 0;
}
__syncthreads();
/* FE mass matrix construction */
for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE];
PetscInt nr, nc;
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0; qj < Nq; qj++) {
const PetscReal *BJq = &BB[qj * Nb];
const PetscInt jpidx = d_ip_offset[grid] + qj + loc_elem * Nq;
if (dim == 2) {
t += BJq[f] * d_w[jpidx] * shift * BJq[g] * 2. * PETSC_PI;
} else {
t += BJq[f] * d_w[jpidx] * shift * BJq[g];
}
}
if (elemMat) {
const PetscInt fOff = (fieldA * Nb + f) * totDim + fieldA * Nb + g;
elemMat[fOff] += t; // ????
} else (*s_fieldMats)[f][g] = t;
}
}
if (!elemMat) {
const LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
(*s_idx)[f][0] = idx + moffset;
(*s_scale)[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps[grid]->num_face; q++) {
if (d_maps[grid]->c_maps[idx][q].gid >= 0) (*s_idx)[f][q] = d_maps[grid]->c_maps[idx][q].gid + moffset;
else (*s_idx)[f][q] = -1;
(*s_scale)[f][q] = d_maps[grid]->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps[grid]->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps[grid]->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) vals[q * nc + d] = (*s_scale)[f][q] * (*s_scale)[g][d] * (*s_fieldMats)[f][g];
}
MatSetValuesDevice(d_mat, nr, (*s_idx)[f], nc, (*s_idx)[g], vals, ADD_VALUES);
}
}
}
__syncthreads();
}
}
}
PetscErrorCode LandauCUDAJacobian(DM plex[], const PetscInt Nq, const PetscInt batch_sz, const PetscInt num_grids, const PetscInt a_numCells[], PetscReal a_Eq_m[], PetscScalar a_elem_closure[], const PetscScalar a_xarray[], const LandauStaticData *SData_d, const PetscReal shift, const PetscLogEvent events[], const PetscInt a_mat_offset[], const PetscInt a_species_offset[], Mat subJ[], Mat JacP)
{
hipError_t cerr;
PetscInt Nb = Nq, dim, nip_global, num_cells_batch, elem_mat_size_tot;
PetscInt *d_numCells, *d_species_offset, *d_mat_offset, *d_ip_offset, *d_ipf_offset, *d_elem_offset;
PetscInt szf = sizeof(PetscReal), szs = sizeof(PetscScalar), Nftot = a_species_offset[num_grids];
PetscReal *d_BB = NULL, *d_DD = NULL, *d_invJj = NULL, *d_nu_alpha = NULL, *d_nu_beta = NULL, *d_invMass = NULL, *d_Eq_m = NULL, *d_x = NULL, *d_y = NULL, *d_w = NULL;
PetscScalar *d_elem_mats = NULL, *d_vertex_f = NULL;
PetscReal *d_f = NULL, *d_dfdx = NULL, *d_dfdy = NULL;
#if LANDAU_DIM == 3
PetscReal *d_dfdz = NULL, *d_z = NULL;
#endif
LandauCtx *ctx;
PetscSplitCSRDataStructure d_mat = NULL;
P4estVertexMaps **d_maps, *maps[LANDAU_MAX_GRIDS];
int nnn = 256 / Nq; // machine dependent
PetscContainer container;
PetscFunctionBegin;
PetscCall(PetscLogEventBegin(events[3], 0, 0, 0, 0));
while (nnn & nnn - 1) nnn = nnn & nnn - 1;
if (nnn > 16) nnn = 16;
PetscCall(DMGetApplicationContext(plex[0], &ctx));
PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
PetscCall(DMGetDimension(plex[0], &dim));
PetscCheck(dim == LANDAU_DIM, PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %d != dim %" PetscInt_FMT, LANDAU_DIM, dim);
if (ctx->gpu_assembly) {
PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
if (container) { // not here first call
static int init = 0; // hack. just do every time, or put in setup (but that is in base class code), or add init_maps flag
if (!init++) {
P4estVertexMaps *h_maps = NULL;
PetscCall(PetscContainerGetPointer(container, (void **)&h_maps));
for (PetscInt grid = 0; grid < num_grids; grid++) {
if (h_maps[grid].d_self) {
maps[grid] = h_maps[grid].d_self;
} else {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container");
}
}
PetscCallCUDA(hipMemcpy(SData_d->maps, maps, num_grids * sizeof(P4estVertexMaps *), hipMemcpyHostToDevice));
}
d_maps = (P4estVertexMaps **)SData_d->maps;
// this does the setup the first time called
PetscCall(MatCUSPARSEGetDeviceMatWrite(JacP, &d_mat));
} else {
d_maps = NULL;
}
} else {
container = NULL;
d_maps = NULL;
}
PetscCall(PetscLogEventEnd(events[3], 0, 0, 0, 0));
{
PetscInt elem_mat_size = 0;
nip_global = num_cells_batch = 0;
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt Nfloc = a_species_offset[grid + 1] - a_species_offset[grid], totDim = Nfloc * Nb;
nip_global += a_numCells[grid] * Nq;
num_cells_batch += a_numCells[grid]; // is in d_elem_offset, but not on host
elem_mat_size += a_numCells[grid] * totDim * totDim; // could save in an offset here -- batch major ordering
}
elem_mat_size_tot = d_maps ? 0 : elem_mat_size;
}
dim3 dimGrid(batch_sz, num_cells_batch);
if (elem_mat_size_tot) {
PetscCallCUDA(hipMalloc((void **)&d_elem_mats, batch_sz * elem_mat_size_tot * szs)); // kernel output - first call is on CPU
} else d_elem_mats = NULL;
// create data
d_BB = (PetscReal *)SData_d->B;
d_DD = (PetscReal *)SData_d->D;
if (a_elem_closure || a_xarray) { // form f and df
PetscCall(PetscLogEventBegin(events[1], 0, 0, 0, 0));
PetscCallCUDA(hipMemcpy(SData_d->Eq_m, a_Eq_m, Nftot * szf, hipMemcpyHostToDevice));
d_invJj = (PetscReal *)SData_d->invJ;
d_nu_alpha = (PetscReal *)SData_d->alpha;
d_nu_beta = (PetscReal *)SData_d->beta;
d_invMass = (PetscReal *)SData_d->invMass;
d_x = (PetscReal *)SData_d->x;
d_y = (PetscReal *)SData_d->y;
d_w = (PetscReal *)SData_d->w;
d_Eq_m = (PetscReal *)SData_d->Eq_m;
d_dfdx = (PetscReal *)SData_d->dfdx;
d_dfdy = (PetscReal *)SData_d->dfdy;
#if LANDAU_DIM == 3
d_dfdz = (PetscReal *)SData_d->dfdz;
d_z = (PetscReal *)SData_d->z;
#endif
d_f = (PetscReal *)SData_d->f;
// get a d_vertex_f
if (a_elem_closure) {
PetscInt closure_sz = 0; // argh, don't have this on the host!!!
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt nfloc = a_species_offset[grid + 1] - a_species_offset[grid];
closure_sz += Nq * nfloc * a_numCells[grid];
}
closure_sz *= batch_sz;
PetscCallCUDA(hipMalloc((void **)&d_vertex_f, closure_sz * sizeof(*a_elem_closure)));
PetscCallCUDA(hipMemcpy(d_vertex_f, a_elem_closure, closure_sz * sizeof(*a_elem_closure), hipMemcpyHostToDevice));
} else {
d_vertex_f = (PetscScalar *)a_xarray;
}
PetscCall(PetscLogEventEnd(events[1], 0, 0, 0, 0));
} else {
d_w = (PetscReal *)SData_d->w; // mass just needs the weights
}
//
d_numCells = (PetscInt *)SData_d->NCells; // redundant -- remove
d_species_offset = (PetscInt *)SData_d->species_offset;
d_mat_offset = (PetscInt *)SData_d->mat_offset;
d_ip_offset = (PetscInt *)SData_d->ip_offset;
d_ipf_offset = (PetscInt *)SData_d->ipf_offset;
d_elem_offset = (PetscInt *)SData_d->elem_offset;
if (a_elem_closure || a_xarray) { // form f and df
dim3 dimBlockFDF(nnn > Nftot ? Nftot : nnn, Nq), dimBlock((nip_global > nnn) ? nnn : nip_global, Nq);
PetscCall(PetscLogEventBegin(events[8], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
PetscCall(PetscInfo(plex[0], "Form F and dF/dx vectors: nip_global=%" PetscInt_FMT " num_grids=%" PetscInt_FMT "\n", nip_global, num_grids));
hipLaunchKernelGGL(( landau_form_fdf), dim3(dimGrid), dim3(dimBlockFDF), 0, 0, dim, Nb, num_grids, d_invJj, d_BB, d_DD, d_vertex_f, d_maps, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
PetscCUDACheckLaunch;
PetscCall(PetscLogGpuFlops(batch_sz * nip_global * (PetscLogDouble)(2 * Nb * (1 + dim))));
if (a_elem_closure) {
PetscCallCUDA(hipFree(d_vertex_f));
d_vertex_f = NULL;
}
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[8], 0, 0, 0, 0));
// Jacobian
PetscCall(PetscLogEventBegin(events[4], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
PetscCall(PetscLogGpuFlops(batch_sz * nip_global * (PetscLogDouble)(a_elem_closure ? (nip_global * (11 * Nftot + 4 * dim * dim) + 6 * Nftot * dim * dim * dim + 10 * Nftot * dim * dim + 4 * Nftot * dim + Nb * Nftot * Nb * Nq * dim * dim * 5) : Nb * Nftot * Nb * Nq * 4)));
PetscInt ii = 2 * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM * (1 + LANDAU_DIM) + 3 * LANDAU_MAX_SPECIES + (1 + LANDAU_DIM) * dimBlock.x * LANDAU_MAX_SPECIES + LANDAU_MAX_NQ * LANDAU_MAX_NQ + 2 * LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
if (ii * szf >= 49152) {
cerr = hipFuncSetAttribute(landau_jacobian, hipFuncAttributeMaxDynamicSharedMemorySize, 98304);
PetscCallCUDA(cerr);
}
PetscCall(PetscInfo(plex[0], "Jacobian shared memory size: %" PetscInt_FMT " bytes, d_elem_mats=%p d_maps=%p\n", ii, d_elem_mats, d_maps));
hipLaunchKernelGGL(( landau_jacobian), dim3(dimGrid), dim3(dimBlock), ii * szf, 0, nip_global, dim, Nb, num_grids, d_invJj, Nftot, d_nu_alpha, d_nu_beta, d_invMass, d_Eq_m, d_BB, d_DD, d_x, d_y, d_w, d_elem_mats, d_maps, d_mat, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
d_z, d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
PetscCUDACheckLaunch; // has sync
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[4], 0, 0, 0, 0));
} else { // mass
dim3 dimBlock(nnn, Nq);
PetscInt ii = LANDAU_MAX_NQ * LANDAU_MAX_NQ + 2 * LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
if (ii * szf >= 49152) {
cerr = hipFuncSetAttribute(landau_mass, hipFuncAttributeMaxDynamicSharedMemorySize, 98304);
PetscCallCUDA(cerr);
}
PetscCall(PetscInfo(plex[0], "Mass d_maps = %p. Nq=%" PetscInt_FMT ", vector size %d num_cells_batch=%" PetscInt_FMT ", %" PetscInt_FMT " shared memory words\n", d_maps, Nq, nnn, num_cells_batch, ii));
PetscCall(PetscLogEventBegin(events[16], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
hipLaunchKernelGGL(( landau_mass), dim3(dimGrid), dim3(dimBlock), ii * szf, 0, dim, Nb, num_grids, d_w, d_BB, d_DD, d_elem_mats, d_maps, d_mat, shift, d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_elem_offset);
PetscCUDACheckLaunch; // has sync
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[16], 0, 0, 0, 0));
}
// First time assembly with or without GPU assembly
if (d_elem_mats) {
PetscInt elem_mats_idx = 0;
for (PetscInt b_id = 0; b_id < batch_sz; b_id++) { // OpenMP (once)
for (PetscInt grid = 0; grid < num_grids; grid++) { // elem_mats_idx += totDim*totDim*a_numCells[grid];
const PetscInt Nfloc = a_species_offset[grid + 1] - a_species_offset[grid], totDim = Nfloc * Nq;
PetscScalar *elemMats = NULL, *elMat;
PetscSection section, globalSection;
PetscInt cStart, cEnd, ej;
PetscInt moffset = LAND_MOFFSET(b_id, grid, batch_sz, num_grids, a_mat_offset), nloc, nzl, colbuf[1024], row;
const PetscInt *cols;
const PetscScalar *vals;
Mat B = subJ[LAND_PACK_IDX(b_id, grid)];
PetscCall(PetscLogEventBegin(events[5], 0, 0, 0, 0));
PetscCall(DMPlexGetHeightStratum(plex[grid], 0, &cStart, &cEnd));
PetscCall(DMGetLocalSection(plex[grid], §ion));
PetscCall(DMGetGlobalSection(plex[grid], &globalSection));
PetscCall(PetscMalloc1(totDim * totDim * a_numCells[grid], &elemMats));
PetscCallCUDA(hipMemcpy(elemMats, &d_elem_mats[elem_mats_idx], totDim * totDim * a_numCells[grid] * sizeof(*elemMats), hipMemcpyDeviceToHost));
PetscCall(PetscLogEventEnd(events[5], 0, 0, 0, 0));
PetscCall(PetscLogEventBegin(events[6], 0, 0, 0, 0));
for (ej = cStart, elMat = elemMats; ej < cEnd; ++ej, elMat += totDim * totDim) {
PetscCall(DMPlexMatSetClosure(plex[grid], section, globalSection, B, ej, elMat, ADD_VALUES));
if (ej == -1) {
int d, f;
PetscPrintf(PETSC_COMM_SELF, "GPU Element matrix\n");
for (d = 0; d < totDim; ++d) {
for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF, " %12.5e", PetscRealPart(elMat[d * totDim + f]));
PetscPrintf(PETSC_COMM_SELF, "\n");
}
}
}
PetscCall(PetscFree(elemMats));
PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
// move nest matrix to global JacP
PetscCall(MatGetSize(B, &nloc, NULL));
for (int i = 0; i < nloc; i++) {
PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
row = i + moffset;
PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
}
PetscCall(MatDestroy(&B));
PetscCall(PetscLogEventEnd(events[6], 0, 0, 0, 0));
elem_mats_idx += totDim * totDim * a_numCells[grid]; // this can be a stored offset?
} // grids
}
PetscCheck(elem_mats_idx == batch_sz * elem_mat_size_tot, PetscObjectComm((PetscObject)JacP), PETSC_ERR_PLIB, "elem_mats_idx != batch_sz*elem_mat_size_tot: %" PetscInt_FMT " %" PetscInt_FMT, elem_mats_idx, batch_sz * elem_mat_size_tot);
PetscCallCUDA(hipFree(d_elem_mats));
}
PetscFunctionReturn(0);
}
| 3dfdc0eccb3f5def292edaae33d50deb175ab2db.cu | /*
Implements the Landau kernel
*/
#include <petscconf.h>
#include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/
#include <petsclandau.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/aij/seq/aij.h>
#include <petscmat.h>
#include <petscdevice_cuda.h>
#include "../land_tensors.h"
#include <petscaijdevice.h>
PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps maps[], pointInterpolationP4est (*pointMaps)[LANDAU_MAX_Q_FACE], PetscInt Nf[], PetscInt Nq, PetscInt grid)
{
P4estVertexMaps h_maps;
PetscFunctionBegin;
h_maps.num_elements = maps[grid].num_elements;
h_maps.num_face = maps[grid].num_face;
h_maps.num_reduced = maps[grid].num_reduced;
h_maps.deviceType = maps[grid].deviceType;
h_maps.Nf = Nf[grid];
h_maps.numgrids = maps[grid].numgrids;
PetscCallCUDA(cudaMalloc((void **)&h_maps.c_maps, maps[grid].num_reduced * sizeof *pointMaps));
PetscCallCUDA(cudaMemcpy(h_maps.c_maps, maps[grid].c_maps, maps[grid].num_reduced * sizeof *pointMaps, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&h_maps.gIdx, maps[grid].num_elements * sizeof *maps[grid].gIdx));
PetscCallCUDA(cudaMemcpy(h_maps.gIdx, maps[grid].gIdx, maps[grid].num_elements * sizeof *maps[grid].gIdx, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&maps[grid].d_self, sizeof(P4estVertexMaps)));
PetscCallCUDA(cudaMemcpy(maps[grid].d_self, &h_maps, sizeof(P4estVertexMaps), cudaMemcpyHostToDevice));
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps maps[], PetscInt num_grids)
{
PetscFunctionBegin;
for (PetscInt grid = 0; grid < num_grids; grid++) {
P4estVertexMaps *d_maps = maps[grid].d_self, h_maps;
PetscCallCUDA(cudaMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), cudaMemcpyDeviceToHost));
PetscCallCUDA(cudaFree(h_maps.c_maps));
PetscCallCUDA(cudaFree(h_maps.gIdx));
PetscCallCUDA(cudaFree(d_maps));
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataSet(DM plex, const PetscInt Nq, const PetscInt batch_sz, const PetscInt num_grids, PetscInt a_numCells[], PetscInt a_species_offset[], PetscInt a_mat_offset[], PetscReal nu_alpha[], PetscReal nu_beta[], PetscReal a_invMass[], PetscReal a_invJ[], PetscReal a_x[], PetscReal a_y[], PetscReal a_z[], PetscReal a_w[], LandauStaticData *SData_d)
{
PetscTabulation *Tf;
PetscReal *BB, *DD;
PetscInt dim, Nb = Nq, szf = sizeof(PetscReal), szs = sizeof(PetscScalar), szi = sizeof(PetscInt);
PetscInt h_ip_offset[LANDAU_MAX_GRIDS + 1], h_ipf_offset[LANDAU_MAX_GRIDS + 1], h_elem_offset[LANDAU_MAX_GRIDS + 1], nip, IPfdf_sz, Nf;
PetscDS prob;
PetscFunctionBegin;
PetscCall(DMGetDimension(plex, &dim));
PetscCall(DMGetDS(plex, &prob));
PetscCheck(LANDAU_DIM == dim, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
PetscCall(PetscDSGetTabulation(prob, &Tf));
BB = Tf[0]->T[0];
DD = Tf[0]->T[1];
Nf = h_ip_offset[0] = h_ipf_offset[0] = h_elem_offset[0] = 0;
nip = 0;
IPfdf_sz = 0;
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt nfloc = a_species_offset[grid + 1] - a_species_offset[grid];
h_elem_offset[grid + 1] = h_elem_offset[grid] + a_numCells[grid];
nip += a_numCells[grid] * Nq;
h_ip_offset[grid + 1] = nip;
IPfdf_sz += Nq * nfloc * a_numCells[grid];
h_ipf_offset[grid + 1] = IPfdf_sz;
}
Nf = a_species_offset[num_grids];
{
PetscCallCUDA(cudaMalloc((void **)&SData_d->B, Nq * Nb * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->B, BB, Nq * Nb * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->D, Nq * Nb * dim * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->D, DD, Nq * Nb * dim * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->alpha, Nf * szf)); // kernel input
PetscCallCUDA(cudaMalloc((void **)&SData_d->beta, Nf * szf)); // kernel input
PetscCallCUDA(cudaMalloc((void **)&SData_d->invMass, Nf * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->alpha, nu_alpha, Nf * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(SData_d->beta, nu_beta, Nf * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(SData_d->invMass, a_invMass, Nf * szf, cudaMemcpyHostToDevice));
// collect geometry
PetscCallCUDA(cudaMalloc((void **)&SData_d->invJ, nip * dim * dim * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->invJ, a_invJ, nip * dim * dim * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->x, nip * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->x, a_x, nip * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->y, nip * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->y, a_y, nip * szf, cudaMemcpyHostToDevice));
#if LANDAU_DIM == 3
PetscCallCUDA(cudaMalloc((void **)&SData_d->z, nip * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->z, a_z, nip * szf, cudaMemcpyHostToDevice));
#endif
PetscCallCUDA(cudaMalloc((void **)&SData_d->w, nip * szf)); // kernel input
PetscCallCUDA(cudaMemcpy(SData_d->w, a_w, nip * szf, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->NCells, num_grids * szi));
PetscCallCUDA(cudaMemcpy(SData_d->NCells, a_numCells, num_grids * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->species_offset, (num_grids + 1) * szi));
PetscCallCUDA(cudaMemcpy(SData_d->species_offset, a_species_offset, (num_grids + 1) * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->mat_offset, (num_grids + 1) * szi));
PetscCallCUDA(cudaMemcpy(SData_d->mat_offset, a_mat_offset, (num_grids + 1) * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->ip_offset, (num_grids + 1) * szi));
PetscCallCUDA(cudaMemcpy(SData_d->ip_offset, h_ip_offset, (num_grids + 1) * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->ipf_offset, (num_grids + 1) * szi));
PetscCallCUDA(cudaMemcpy(SData_d->ipf_offset, h_ipf_offset, (num_grids + 1) * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->elem_offset, (num_grids + 1) * szi));
PetscCallCUDA(cudaMemcpy(SData_d->elem_offset, h_elem_offset, (num_grids + 1) * szi, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&SData_d->maps, num_grids * sizeof(P4estVertexMaps *)));
// allocate space for dynamic data once
PetscCallCUDA(cudaMalloc((void **)&SData_d->Eq_m, Nf * szf)); // this could be for each vertex (todo?)
PetscCallCUDA(cudaMalloc((void **)&SData_d->f, nip * Nf * szs * batch_sz)); // for each vertex in batch
PetscCallCUDA(cudaMalloc((void **)&SData_d->dfdx, nip * Nf * szs * batch_sz));
PetscCallCUDA(cudaMalloc((void **)&SData_d->dfdy, nip * Nf * szs * batch_sz));
#if LANDAU_DIM == 3
PetscCallCUDA(cudaMalloc((void **)&SData_d->dfdz, nip * Nf * szs * batch_sz));
#endif
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataClear(LandauStaticData *SData_d)
{
PetscFunctionBegin;
if (SData_d->alpha) {
PetscCallCUDA(cudaFree(SData_d->alpha));
SData_d->alpha = NULL;
PetscCallCUDA(cudaFree(SData_d->beta));
PetscCallCUDA(cudaFree(SData_d->invMass));
PetscCallCUDA(cudaFree(SData_d->B));
PetscCallCUDA(cudaFree(SData_d->D));
PetscCallCUDA(cudaFree(SData_d->invJ));
#if LANDAU_DIM == 3
PetscCallCUDA(cudaFree(SData_d->z));
#endif
PetscCallCUDA(cudaFree(SData_d->x));
PetscCallCUDA(cudaFree(SData_d->y));
PetscCallCUDA(cudaFree(SData_d->w));
// dynamic data
PetscCallCUDA(cudaFree(SData_d->Eq_m));
PetscCallCUDA(cudaFree(SData_d->f));
PetscCallCUDA(cudaFree(SData_d->dfdx));
PetscCallCUDA(cudaFree(SData_d->dfdy));
#if LANDAU_DIM == 3
PetscCallCUDA(cudaFree(SData_d->dfdz));
#endif
PetscCallCUDA(cudaFree(SData_d->NCells));
PetscCallCUDA(cudaFree(SData_d->species_offset));
PetscCallCUDA(cudaFree(SData_d->mat_offset));
PetscCallCUDA(cudaFree(SData_d->ip_offset));
PetscCallCUDA(cudaFree(SData_d->ipf_offset));
PetscCallCUDA(cudaFree(SData_d->elem_offset));
PetscCallCUDA(cudaFree(SData_d->maps));
}
PetscFunctionReturn(0);
}
//
// The GPU Landau kernel
//
__global__ void landau_form_fdf(const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal d_invJ[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar *d_vertex_f, P4estVertexMaps *d_maps[], PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM == 3
PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[]) // output
{
const PetscInt Nq = blockDim.y, myQi = threadIdx.y;
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x, IPf_sz_glb = d_ipf_offset[num_grids];
const PetscReal *Bq = &BB[myQi * Nb], *Dq = &DD[myQi * Nb * dim];
PetscInt grid = 0, f, d, b, e, q;
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_nip = d_numCells[grid] * Nq, loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset);
const PetscScalar *coef;
PetscReal u_x[LANDAU_DIM];
const PetscReal *invJ = &d_invJ[(d_ip_offset[grid] + loc_elem * Nq + myQi) * dim * dim];
PetscScalar coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQ];
if (!d_maps) {
coef = &d_vertex_f[b_id * IPf_sz_glb + d_ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // closure and IP indexing are the same
} else {
coef = coef_buff;
for (f = 0; f < loc_Nf; ++f) {
LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][f][0];
for (b = 0; b < Nb; ++b) {
PetscInt idx = Idxs[b];
if (idx >= 0) {
coef_buff[f * Nb + b] = d_vertex_f[idx + moffset];
} else {
idx = -idx - 1;
coef_buff[f * Nb + b] = 0;
for (q = 0; q < d_maps[grid]->num_face; q++) {
PetscInt id = d_maps[grid]->c_maps[idx][q].gid;
PetscReal scale = d_maps[grid]->c_maps[idx][q].scale;
if (id >= 0) coef_buff[f * Nb + b] += scale * d_vertex_f[id + moffset];
}
}
}
}
}
/* get f and df */
for (f = threadIdx.x; f < loc_Nf; f += blockDim.x) {
PetscReal refSpaceDer[LANDAU_DIM];
const PetscInt idx = b_id * IPf_sz_glb + d_ipf_offset[grid] + f * loc_nip + loc_elem * Nq + myQi;
d_f[idx] = 0.0;
for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
for (b = 0; b < Nb; ++b) {
const PetscInt cidx = b;
d_f[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
}
for (d = 0; d < dim; ++d) {
for (e = 0, u_x[d] = 0.0; e < dim; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
}
d_dfdx[idx] = u_x[0];
d_dfdy[idx] = u_x[1];
#if LANDAU_DIM == 3
d_dfdz[idx] = u_x[2];
#endif
}
}
}
__device__ void landau_jac_kernel(const PetscInt num_grids, const PetscInt jpidx, PetscInt nip_global, const PetscInt grid, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[], const PetscReal invJj[], const PetscInt Nftot, const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar *elemMat, P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, // output
PetscScalar s_fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory
PetscReal s_scale[][LANDAU_MAX_Q_FACE], PetscInt s_idx[][LANDAU_MAX_Q_FACE], PetscReal s_g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_nu_alpha[], PetscReal s_nu_beta[], PetscReal s_invMass[], PetscReal s_f[], PetscReal s_dfx[], PetscReal s_dfy[], PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[], // global memory
#if LANDAU_DIM == 3
const PetscReal zz[], PetscReal s_dfz[], PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[])
{
const PetscInt Nq = blockDim.y, myQi = threadIdx.y;
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x, IPf_sz_glb = d_ipf_offset[num_grids];
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset);
int delta, d, f, g, d2, dp, d3, fieldA, ipidx_b;
PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
#if LANDAU_DIM == 2
const PetscReal vj[3] = {xx[jpidx], yy[jpidx]};
constexpr int dim = 2;
#else
const PetscReal vj[3] = {xx[jpidx], yy[jpidx], zz[jpidx]};
constexpr int dim = 3;
#endif
const PetscInt f_off = d_species_offset[grid], Nb = Nq;
// create g2 & g3
for (f = threadIdx.x; f < loc_Nf; f += blockDim.x) {
for (d = 0; d < dim; d++) { // clear accumulation data D & K
s_gg2[d][myQi][f] = 0;
for (d2 = 0; d2 < dim; d2++) s_gg3[d][d2][myQi][f] = 0;
}
}
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] = 0;
#pragma unroll
for (d3 = 0; d3 < dim; d3++) gg3_temp[d2][d3] = 0;
}
if (threadIdx.y == 0) {
// copy species into shared memory
for (fieldA = threadIdx.x; fieldA < Nftot; fieldA += blockDim.x) {
s_nu_alpha[fieldA] = nu_alpha[fieldA];
s_nu_beta[fieldA] = nu_beta[fieldA];
s_invMass[fieldA] = invMass[fieldA];
}
}
__syncthreads();
// inner integral, collect gg2/3
for (ipidx_b = 0; ipidx_b < nip_global; ipidx_b += blockDim.x) {
const PetscInt ipidx = ipidx_b + threadIdx.x;
PetscInt f_off_r, grid_r, loc_Nf_r, nip_loc_r, ipidx_g, fieldB, IPf_idx_r;
__syncthreads();
if (ipidx < nip_global) {
grid_r = 0;
while (ipidx >= d_ip_offset[grid_r + 1]) grid_r++;
f_off_r = d_species_offset[grid_r];
ipidx_g = ipidx - d_ip_offset[grid_r];
nip_loc_r = d_numCells[grid_r] * Nq;
loc_Nf_r = d_species_offset[grid_r + 1] - d_species_offset[grid_r];
IPf_idx_r = b_id * IPf_sz_glb + d_ipf_offset[grid_r] + ipidx_g;
for (fieldB = threadIdx.y; fieldB < loc_Nf_r; fieldB += blockDim.y) {
const PetscInt idx = IPf_idx_r + fieldB * nip_loc_r;
s_f[fieldB * blockDim.x + threadIdx.x] = d_f[idx]; // all vector threads get copy of data
s_dfx[fieldB * blockDim.x + threadIdx.x] = d_dfdx[idx];
s_dfy[fieldB * blockDim.x + threadIdx.x] = d_dfdy[idx];
#if LANDAU_DIM == 3
s_dfz[fieldB * blockDim.x + threadIdx.x] = d_dfdz[idx];
#endif
}
}
__syncthreads();
if (ipidx < nip_global) {
const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
#if LANDAU_DIM == 2
PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
LandauTensor2D(vj, x, y, Ud, Uk, mask);
#else
PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
LandauTensor3D(vj, x, y, z, U, mask);
#endif
for (int fieldB = 0; fieldB < loc_Nf_r; fieldB++) {
temp1[0] += s_dfx[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
temp1[1] += s_dfy[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
#if LANDAU_DIM == 3
temp1[2] += s_dfz[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r] * s_invMass[fieldB + f_off_r];
#endif
temp2 += s_f[fieldB * blockDim.x + threadIdx.x] * s_nu_beta[fieldB + f_off_r];
}
temp1[0] *= wi;
temp1[1] *= wi;
#if LANDAU_DIM == 3
temp1[2] *= wi;
#endif
temp2 *= wi;
#if LANDAU_DIM == 2
#pragma unroll
for (d2 = 0; d2 < 2; d2++) {
#pragma unroll
for (d3 = 0; d3 < 2; ++d3) {
/* K = U * grad(f): g2=e: i,A */
gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
/* D = -U * (I \kron (fx)): g3=f: i,j,A */
gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
}
}
#else
#pragma unroll
for (d2 = 0; d2 < 3; ++d2) {
#pragma unroll
for (d3 = 0; d3 < 3; ++d3) {
/* K = U * grad(f): g2 = e: i,A */
gg2_temp[d2] += U[d2][d3] * temp1[d3];
/* D = -U * (I \kron (fx)): g3 = f: i,j,A */
gg3_temp[d2][d3] += U[d2][d3] * temp2;
}
}
#endif
}
} /* IPs */
/* reduce gg temp sums across threads */
for (delta = blockDim.x / 2; delta > 0; delta /= 2) {
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x);
#pragma unroll
for (d3 = 0; d3 < dim; d3++) gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x);
}
}
// add alpha and put in gg2/3
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) {
#pragma unroll
for (d2 = 0; d2 < dim; d2++) {
s_gg2[d2][myQi][fieldA] += gg2_temp[d2] * s_nu_alpha[fieldA + f_off];
#pragma unroll
for (d3 = 0; d3 < dim; d3++) s_gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3] * s_nu_alpha[fieldA + f_off] * s_invMass[fieldA + f_off];
}
}
__syncthreads();
/* add electric field term once per IP */
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) s_gg2[dim - 1][myQi][fieldA] += Eq_m[fieldA + f_off];
__syncthreads();
/* Jacobian transform - g2 */
for (fieldA = threadIdx.x; fieldA < loc_Nf; fieldA += blockDim.x) {
PetscReal wj = ww[jpidx];
for (d = 0; d < dim; ++d) {
s_g2[d][myQi][fieldA] = 0.0;
for (d2 = 0; d2 < dim; ++d2) {
s_g2[d][myQi][fieldA] += invJj[d * dim + d2] * s_gg2[d2][myQi][fieldA];
s_g3[d][d2][myQi][fieldA] = 0.0;
for (d3 = 0; d3 < dim; ++d3) {
for (dp = 0; dp < dim; ++dp) s_g3[d][d2][myQi][fieldA] += invJj[d * dim + d3] * s_gg3[d3][dp][myQi][fieldA] * invJj[d2 * dim + dp];
}
s_g3[d][d2][myQi][fieldA] *= wj;
}
s_g2[d][myQi][fieldA] *= wj;
}
}
__syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices
/* FE matrix construction */
{
int fieldA, d, qj, d2, q, idx, totDim = Nb * loc_Nf;
/* assemble */
for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0; qj < Nq; qj++) {
const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
for (d = 0; d < dim; ++d) {
t += DIq[f * dim + d] * s_g2[d][qj][fieldA] * BJq[g];
for (d2 = 0; d2 < dim; ++d2) t += DIq[f * dim + d] * s_g3[d][d2][qj][fieldA] * DIq[g * dim + d2];
}
}
if (elemMat) {
const PetscInt fOff = (fieldA * Nb + f) * totDim + fieldA * Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (s_fieldMats) {
PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE];
PetscInt nr, nc;
const LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx + moffset;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps[grid]->num_face; q++) {
if (d_maps[grid]->c_maps[idx][q].gid >= 0) s_idx[f][q] = d_maps[grid]->c_maps[idx][q].gid + moffset;
else s_idx[f][q] = -1;
s_scale[f][q] = d_maps[grid]->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps[grid]->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps[grid]->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) vals[q * nc + d] = s_scale[f][q] * s_scale[g][d] * s_fieldMats[f][g];
}
MatSetValuesDevice(d_mat, nr, s_idx[f], nc, s_idx[g], vals, ADD_VALUES);
}
}
__syncthreads();
}
}
}
}
//
// The CUDA Landau kernel
//
__global__ void __launch_bounds__(256, 2) landau_jacobian(const PetscInt nip_global, const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal invJj[], const PetscInt Nftot, const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal *const BB, const PetscReal *const DD, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[], PetscScalar d_elem_mats[], P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM == 3
const PetscReal zz[], PetscReal d_dfdz[],
#endif
const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_ipf_offset[], const PetscInt d_elem_offset[])
{
extern __shared__ PetscReal smem[];
int size = 0;
PetscReal(*s_g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM;
PetscReal(*s_g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES;
PetscReal(*s_gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM;
PetscReal(*s_gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal(*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) & smem[size];
size += LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES;
PetscReal *s_nu_alpha = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_nu_beta = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_invMass = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_f = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
PetscReal *s_dfx = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
PetscReal *s_dfy = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
#if LANDAU_DIM == 3
PetscReal *s_dfz = &smem[size];
size += blockDim.x * LANDAU_MAX_SPECIES;
#endif
PetscScalar(*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal(*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt(*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
const PetscInt b_elem_idx = blockIdx.y, b_id = blockIdx.x;
PetscInt Nq = blockDim.y, grid = 0; // Nq == Nb
PetscScalar *elemMat = NULL; /* my output */
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = d_ip_offset[grid] + myQi + loc_elem * Nq;
const PetscReal *invJ = &invJj[jpidx * dim * dim];
if (d_elem_mats) {
PetscInt totDim = loc_Nf * Nb;
elemMat = d_elem_mats; // start a beginning and get to my element matrix
for (PetscInt b_id2 = 0; b_id2 < b_id; b_id2++) {
for (PetscInt grid2 = 0; grid2 < num_grids; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
}
for (PetscInt grid2 = 0; grid2 < grid; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids, could be in an offset
}
elemMat += loc_elem * totDim * totDim; // index into local matrix & zero out
for (int i = threadIdx.x + threadIdx.y * blockDim.x; i < totDim * totDim; i += blockDim.x * blockDim.y) elemMat[i] = 0;
}
__syncthreads();
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar(*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_NQ;
s_scale = (PetscReal(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
s_idx = (PetscInt(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
__syncthreads();
landau_jac_kernel(num_grids, jpidx, nip_global, grid, xx, yy, ww, invJ, Nftot, nu_alpha, nu_beta, invMass, Eq_m, BB, DD, elemMat, d_maps, d_mat, *s_fieldMats, *s_scale, *s_idx, *s_g2, *s_g3, *s_gg2, *s_gg3, s_nu_alpha, s_nu_beta, s_invMass, s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
zz, s_dfz, d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
}
}
__global__ void __launch_bounds__(256, 4) landau_mass(const PetscInt dim, const PetscInt Nb, const PetscInt num_grids, const PetscReal d_w[], const PetscReal *const BB, const PetscReal *const DD, PetscScalar d_elem_mats[], P4estVertexMaps *d_maps[], PetscSplitCSRDataStructure d_mat, PetscReal shift, const PetscInt d_numCells[], const PetscInt d_species_offset[], const PetscInt d_mat_offset[], const PetscInt d_ip_offset[], const PetscInt d_elem_offset[])
{
extern __shared__ PetscReal smem[];
const PetscInt Nq = blockDim.y, b_elem_idx = blockIdx.y, b_id = blockIdx.x;
PetscScalar *elemMat = NULL; /* my output */
PetscInt fieldA, d, qj, q, idx, f, g, grid = 0, size = 0;
PetscScalar(*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal(*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt(*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar(*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_NQ;
s_scale = (PetscReal(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
s_idx = (PetscInt(*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) & smem[size];
size += LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
while (b_elem_idx >= d_elem_offset[grid + 1]) grid++;
{
const PetscInt loc_Nf = d_species_offset[grid + 1] - d_species_offset[grid], loc_elem = b_elem_idx - d_elem_offset[grid];
const PetscInt moffset = LAND_MOFFSET(b_id, grid, gridDim.x, num_grids, d_mat_offset), totDim = loc_Nf * Nq;
if (d_elem_mats) {
elemMat = d_elem_mats; // start a beginning
for (PetscInt b_id2 = 0; b_id2 < b_id; b_id2++) {
for (PetscInt grid2 = 0; grid2 < num_grids; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
}
for (PetscInt grid2 = 0; grid2 < grid; grid2++) {
PetscInt Nfloc2 = d_species_offset[grid2 + 1] - d_species_offset[grid2], totDim2 = Nfloc2 * Nb;
elemMat += d_numCells[grid2] * totDim2 * totDim2; // jump past grids,could be in an offset
}
elemMat += loc_elem * totDim * totDim;
for (int i = threadIdx.x + threadIdx.y * blockDim.x; i < totDim * totDim; i += blockDim.x * blockDim.y) elemMat[i] = 0;
}
__syncthreads();
/* FE mass matrix construction */
for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE];
PetscInt nr, nc;
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0; qj < Nq; qj++) {
const PetscReal *BJq = &BB[qj * Nb];
const PetscInt jpidx = d_ip_offset[grid] + qj + loc_elem * Nq;
if (dim == 2) {
t += BJq[f] * d_w[jpidx] * shift * BJq[g] * 2. * PETSC_PI;
} else {
t += BJq[f] * d_w[jpidx] * shift * BJq[g];
}
}
if (elemMat) {
const PetscInt fOff = (fieldA * Nb + f) * totDim + fieldA * Nb + g;
elemMat[fOff] += t; // ????
} else (*s_fieldMats)[f][g] = t;
}
}
if (!elemMat) {
const LandauIdx *const Idxs = &d_maps[grid]->gIdx[loc_elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
(*s_idx)[f][0] = idx + moffset;
(*s_scale)[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps[grid]->num_face; q++) {
if (d_maps[grid]->c_maps[idx][q].gid >= 0) (*s_idx)[f][q] = d_maps[grid]->c_maps[idx][q].gid + moffset;
else (*s_idx)[f][q] = -1;
(*s_scale)[f][q] = d_maps[grid]->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps[grid]->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps[grid]->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) vals[q * nc + d] = (*s_scale)[f][q] * (*s_scale)[g][d] * (*s_fieldMats)[f][g];
}
MatSetValuesDevice(d_mat, nr, (*s_idx)[f], nc, (*s_idx)[g], vals, ADD_VALUES);
}
}
}
__syncthreads();
}
}
}
PetscErrorCode LandauCUDAJacobian(DM plex[], const PetscInt Nq, const PetscInt batch_sz, const PetscInt num_grids, const PetscInt a_numCells[], PetscReal a_Eq_m[], PetscScalar a_elem_closure[], const PetscScalar a_xarray[], const LandauStaticData *SData_d, const PetscReal shift, const PetscLogEvent events[], const PetscInt a_mat_offset[], const PetscInt a_species_offset[], Mat subJ[], Mat JacP)
{
cudaError_t cerr;
PetscInt Nb = Nq, dim, nip_global, num_cells_batch, elem_mat_size_tot;
PetscInt *d_numCells, *d_species_offset, *d_mat_offset, *d_ip_offset, *d_ipf_offset, *d_elem_offset;
PetscInt szf = sizeof(PetscReal), szs = sizeof(PetscScalar), Nftot = a_species_offset[num_grids];
PetscReal *d_BB = NULL, *d_DD = NULL, *d_invJj = NULL, *d_nu_alpha = NULL, *d_nu_beta = NULL, *d_invMass = NULL, *d_Eq_m = NULL, *d_x = NULL, *d_y = NULL, *d_w = NULL;
PetscScalar *d_elem_mats = NULL, *d_vertex_f = NULL;
PetscReal *d_f = NULL, *d_dfdx = NULL, *d_dfdy = NULL;
#if LANDAU_DIM == 3
PetscReal *d_dfdz = NULL, *d_z = NULL;
#endif
LandauCtx *ctx;
PetscSplitCSRDataStructure d_mat = NULL;
P4estVertexMaps **d_maps, *maps[LANDAU_MAX_GRIDS];
int nnn = 256 / Nq; // machine dependent
PetscContainer container;
PetscFunctionBegin;
PetscCall(PetscLogEventBegin(events[3], 0, 0, 0, 0));
while (nnn & nnn - 1) nnn = nnn & nnn - 1;
if (nnn > 16) nnn = 16;
PetscCall(DMGetApplicationContext(plex[0], &ctx));
PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
PetscCall(DMGetDimension(plex[0], &dim));
PetscCheck(dim == LANDAU_DIM, PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %d != dim %" PetscInt_FMT, LANDAU_DIM, dim);
if (ctx->gpu_assembly) {
PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
if (container) { // not here first call
static int init = 0; // hack. just do every time, or put in setup (but that is in base class code), or add init_maps flag
if (!init++) {
P4estVertexMaps *h_maps = NULL;
PetscCall(PetscContainerGetPointer(container, (void **)&h_maps));
for (PetscInt grid = 0; grid < num_grids; grid++) {
if (h_maps[grid].d_self) {
maps[grid] = h_maps[grid].d_self;
} else {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container");
}
}
PetscCallCUDA(cudaMemcpy(SData_d->maps, maps, num_grids * sizeof(P4estVertexMaps *), cudaMemcpyHostToDevice));
}
d_maps = (P4estVertexMaps **)SData_d->maps;
// this does the setup the first time called
PetscCall(MatCUSPARSEGetDeviceMatWrite(JacP, &d_mat));
} else {
d_maps = NULL;
}
} else {
container = NULL;
d_maps = NULL;
}
PetscCall(PetscLogEventEnd(events[3], 0, 0, 0, 0));
{
PetscInt elem_mat_size = 0;
nip_global = num_cells_batch = 0;
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt Nfloc = a_species_offset[grid + 1] - a_species_offset[grid], totDim = Nfloc * Nb;
nip_global += a_numCells[grid] * Nq;
num_cells_batch += a_numCells[grid]; // is in d_elem_offset, but not on host
elem_mat_size += a_numCells[grid] * totDim * totDim; // could save in an offset here -- batch major ordering
}
elem_mat_size_tot = d_maps ? 0 : elem_mat_size;
}
dim3 dimGrid(batch_sz, num_cells_batch);
if (elem_mat_size_tot) {
PetscCallCUDA(cudaMalloc((void **)&d_elem_mats, batch_sz * elem_mat_size_tot * szs)); // kernel output - first call is on CPU
} else d_elem_mats = NULL;
// create data
d_BB = (PetscReal *)SData_d->B;
d_DD = (PetscReal *)SData_d->D;
if (a_elem_closure || a_xarray) { // form f and df
PetscCall(PetscLogEventBegin(events[1], 0, 0, 0, 0));
PetscCallCUDA(cudaMemcpy(SData_d->Eq_m, a_Eq_m, Nftot * szf, cudaMemcpyHostToDevice));
d_invJj = (PetscReal *)SData_d->invJ;
d_nu_alpha = (PetscReal *)SData_d->alpha;
d_nu_beta = (PetscReal *)SData_d->beta;
d_invMass = (PetscReal *)SData_d->invMass;
d_x = (PetscReal *)SData_d->x;
d_y = (PetscReal *)SData_d->y;
d_w = (PetscReal *)SData_d->w;
d_Eq_m = (PetscReal *)SData_d->Eq_m;
d_dfdx = (PetscReal *)SData_d->dfdx;
d_dfdy = (PetscReal *)SData_d->dfdy;
#if LANDAU_DIM == 3
d_dfdz = (PetscReal *)SData_d->dfdz;
d_z = (PetscReal *)SData_d->z;
#endif
d_f = (PetscReal *)SData_d->f;
// get a d_vertex_f
if (a_elem_closure) {
PetscInt closure_sz = 0; // argh, don't have this on the host!!!
for (PetscInt grid = 0; grid < num_grids; grid++) {
PetscInt nfloc = a_species_offset[grid + 1] - a_species_offset[grid];
closure_sz += Nq * nfloc * a_numCells[grid];
}
closure_sz *= batch_sz;
PetscCallCUDA(cudaMalloc((void **)&d_vertex_f, closure_sz * sizeof(*a_elem_closure)));
PetscCallCUDA(cudaMemcpy(d_vertex_f, a_elem_closure, closure_sz * sizeof(*a_elem_closure), cudaMemcpyHostToDevice));
} else {
d_vertex_f = (PetscScalar *)a_xarray;
}
PetscCall(PetscLogEventEnd(events[1], 0, 0, 0, 0));
} else {
d_w = (PetscReal *)SData_d->w; // mass just needs the weights
}
//
d_numCells = (PetscInt *)SData_d->NCells; // redundant -- remove
d_species_offset = (PetscInt *)SData_d->species_offset;
d_mat_offset = (PetscInt *)SData_d->mat_offset;
d_ip_offset = (PetscInt *)SData_d->ip_offset;
d_ipf_offset = (PetscInt *)SData_d->ipf_offset;
d_elem_offset = (PetscInt *)SData_d->elem_offset;
if (a_elem_closure || a_xarray) { // form f and df
dim3 dimBlockFDF(nnn > Nftot ? Nftot : nnn, Nq), dimBlock((nip_global > nnn) ? nnn : nip_global, Nq);
PetscCall(PetscLogEventBegin(events[8], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
PetscCall(PetscInfo(plex[0], "Form F and dF/dx vectors: nip_global=%" PetscInt_FMT " num_grids=%" PetscInt_FMT "\n", nip_global, num_grids));
landau_form_fdf<<<dimGrid, dimBlockFDF>>>(dim, Nb, num_grids, d_invJj, d_BB, d_DD, d_vertex_f, d_maps, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
PetscCUDACheckLaunch;
PetscCall(PetscLogGpuFlops(batch_sz * nip_global * (PetscLogDouble)(2 * Nb * (1 + dim))));
if (a_elem_closure) {
PetscCallCUDA(cudaFree(d_vertex_f));
d_vertex_f = NULL;
}
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[8], 0, 0, 0, 0));
// Jacobian
PetscCall(PetscLogEventBegin(events[4], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
PetscCall(PetscLogGpuFlops(batch_sz * nip_global * (PetscLogDouble)(a_elem_closure ? (nip_global * (11 * Nftot + 4 * dim * dim) + 6 * Nftot * dim * dim * dim + 10 * Nftot * dim * dim + 4 * Nftot * dim + Nb * Nftot * Nb * Nq * dim * dim * 5) : Nb * Nftot * Nb * Nq * 4)));
PetscInt ii = 2 * LANDAU_MAX_NQ * LANDAU_MAX_SPECIES * LANDAU_DIM * (1 + LANDAU_DIM) + 3 * LANDAU_MAX_SPECIES + (1 + LANDAU_DIM) * dimBlock.x * LANDAU_MAX_SPECIES + LANDAU_MAX_NQ * LANDAU_MAX_NQ + 2 * LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
if (ii * szf >= 49152) {
cerr = cudaFuncSetAttribute(landau_jacobian, cudaFuncAttributeMaxDynamicSharedMemorySize, 98304);
PetscCallCUDA(cerr);
}
PetscCall(PetscInfo(plex[0], "Jacobian shared memory size: %" PetscInt_FMT " bytes, d_elem_mats=%p d_maps=%p\n", ii, d_elem_mats, d_maps));
landau_jacobian<<<dimGrid, dimBlock, ii * szf>>>(nip_global, dim, Nb, num_grids, d_invJj, Nftot, d_nu_alpha, d_nu_beta, d_invMass, d_Eq_m, d_BB, d_DD, d_x, d_y, d_w, d_elem_mats, d_maps, d_mat, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM == 3
d_z, d_dfdz,
#endif
d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_ipf_offset, d_elem_offset);
PetscCUDACheckLaunch; // has sync
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[4], 0, 0, 0, 0));
} else { // mass
dim3 dimBlock(nnn, Nq);
PetscInt ii = LANDAU_MAX_NQ * LANDAU_MAX_NQ + 2 * LANDAU_MAX_NQ * LANDAU_MAX_Q_FACE;
if (ii * szf >= 49152) {
cerr = cudaFuncSetAttribute(landau_mass, cudaFuncAttributeMaxDynamicSharedMemorySize, 98304);
PetscCallCUDA(cerr);
}
PetscCall(PetscInfo(plex[0], "Mass d_maps = %p. Nq=%" PetscInt_FMT ", vector size %d num_cells_batch=%" PetscInt_FMT ", %" PetscInt_FMT " shared memory words\n", d_maps, Nq, nnn, num_cells_batch, ii));
PetscCall(PetscLogEventBegin(events[16], 0, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
landau_mass<<<dimGrid, dimBlock, ii * szf>>>(dim, Nb, num_grids, d_w, d_BB, d_DD, d_elem_mats, d_maps, d_mat, shift, d_numCells, d_species_offset, d_mat_offset, d_ip_offset, d_elem_offset);
PetscCUDACheckLaunch; // has sync
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(events[16], 0, 0, 0, 0));
}
// First time assembly with or without GPU assembly
if (d_elem_mats) {
PetscInt elem_mats_idx = 0;
for (PetscInt b_id = 0; b_id < batch_sz; b_id++) { // OpenMP (once)
for (PetscInt grid = 0; grid < num_grids; grid++) { // elem_mats_idx += totDim*totDim*a_numCells[grid];
const PetscInt Nfloc = a_species_offset[grid + 1] - a_species_offset[grid], totDim = Nfloc * Nq;
PetscScalar *elemMats = NULL, *elMat;
PetscSection section, globalSection;
PetscInt cStart, cEnd, ej;
PetscInt moffset = LAND_MOFFSET(b_id, grid, batch_sz, num_grids, a_mat_offset), nloc, nzl, colbuf[1024], row;
const PetscInt *cols;
const PetscScalar *vals;
Mat B = subJ[LAND_PACK_IDX(b_id, grid)];
PetscCall(PetscLogEventBegin(events[5], 0, 0, 0, 0));
PetscCall(DMPlexGetHeightStratum(plex[grid], 0, &cStart, &cEnd));
PetscCall(DMGetLocalSection(plex[grid], §ion));
PetscCall(DMGetGlobalSection(plex[grid], &globalSection));
PetscCall(PetscMalloc1(totDim * totDim * a_numCells[grid], &elemMats));
PetscCallCUDA(cudaMemcpy(elemMats, &d_elem_mats[elem_mats_idx], totDim * totDim * a_numCells[grid] * sizeof(*elemMats), cudaMemcpyDeviceToHost));
PetscCall(PetscLogEventEnd(events[5], 0, 0, 0, 0));
PetscCall(PetscLogEventBegin(events[6], 0, 0, 0, 0));
for (ej = cStart, elMat = elemMats; ej < cEnd; ++ej, elMat += totDim * totDim) {
PetscCall(DMPlexMatSetClosure(plex[grid], section, globalSection, B, ej, elMat, ADD_VALUES));
if (ej == -1) {
int d, f;
PetscPrintf(PETSC_COMM_SELF, "GPU Element matrix\n");
for (d = 0; d < totDim; ++d) {
for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF, " %12.5e", PetscRealPart(elMat[d * totDim + f]));
PetscPrintf(PETSC_COMM_SELF, "\n");
}
}
}
PetscCall(PetscFree(elemMats));
PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
// move nest matrix to global JacP
PetscCall(MatGetSize(B, &nloc, NULL));
for (int i = 0; i < nloc; i++) {
PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
row = i + moffset;
PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
}
PetscCall(MatDestroy(&B));
PetscCall(PetscLogEventEnd(events[6], 0, 0, 0, 0));
elem_mats_idx += totDim * totDim * a_numCells[grid]; // this can be a stored offset?
} // grids
}
PetscCheck(elem_mats_idx == batch_sz * elem_mat_size_tot, PetscObjectComm((PetscObject)JacP), PETSC_ERR_PLIB, "elem_mats_idx != batch_sz*elem_mat_size_tot: %" PetscInt_FMT " %" PetscInt_FMT, elem_mats_idx, batch_sz * elem_mat_size_tot);
PetscCallCUDA(cudaFree(d_elem_mats));
}
PetscFunctionReturn(0);
}
|
16425102c2dd1906784c6704ffac67bfc1ec3e23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------------------------------------------------------------------------
// prefix "g" for pointer pointing to "Global" memory space in device
// prefix "s" for pointer pointing to "Shared" memory space in device
//
// prefix "I/J" for varialbe of "I/J" particles
// each thread calculates the acceleration, and jerk of "(I_Base+tx)th" "I" particle
// from all "J" particles
//
// "I" particles are packed into "N/GroupSize_I" "IGroup"s with "GroupSize_I" particles within each group
// "J" particles are packed into "N/GroupSize_J" "JGroup"s with "GroupSize_J" particles within each group
// Variables Definition:
// NSplit : number of times to split
// Segment : a segment of threads containing all I particles and ceil to a multiple of block size
// Pseudo-Particle : the pseudo IDs of I-particles when ceiling the size of segment to a multiple of block size
// : eg. Ni_perSeg = 128, Ni = 126 --> Pseudo-Particle Ids = 126,127
// Ni_perSeg : number of I particles per segment
// eg. BLOCK_SIZE=128, Ni=129~256 --> Ni_perSeg = 128*2 = 256
// eg. BLOCK_SIZE=64, Ni=129~192 --> Ni_perSeg = 64*3 = 192
// Ni_allSeg : total number of I particles in all segments (including the pseudo-particles)
// NBlock_perSeg : number of blocks in each segment
// NthSeg : the segment being calculated (0, 1, ... NSplit-1)
// Nj_afterSplit_List : number of J-particles calculated by each segment
//---------------------------------------------------------------------------------------------------
#include "Dori.h"
#define I_Start __umul24(bx, BLOCK_SIZE)
#define J_Start __umul24(NthSeg, Nj_afterSplit_List[0])
#define I_End Ni_allSeg
#define J_End ( J_Start + Nj_afterSplit_List[NthSeg] )
#define GroupSize_I GRID_SIZE * BLOCK_SIZE
#define GroupSize_J BLOCK_SIZE
__global__ void CUCAL_Acc_Jerk_Split( const int Nj, real gJ_Mass[], real gJ_Pos[][3],
real gJ_Vel[][3], const int Ni, real gI_Pos[][3], real gI_Vel[][3],
real gI_Acc[][3], real gI_Jerk[][3], const real Eps2,
const unsigned int NSplit, const unsigned int NBlock_perSeg,
const unsigned int Ni_perSeg, const unsigned int Ni_allSeg,
const unsigned int Nj_afterSplit_List[] )
{
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x;
__shared__ real sJ_Mass [BLOCK_SIZE];
__shared__ real sJ_Pos_x[BLOCK_SIZE];
__shared__ real sJ_Pos_y[BLOCK_SIZE];
__shared__ real sJ_Pos_z[BLOCK_SIZE];
__shared__ real sJ_Vel_x[BLOCK_SIZE];
__shared__ real sJ_Vel_y[BLOCK_SIZE];
__shared__ real sJ_Vel_z[BLOCK_SIZE];
int I_Iter = 0;
// (I/J)_Base : Base Address for (I/J)Group
for ( int I_Base=I_Start; I_Base<I_End; I_Base+=GroupSize_I, I_Iter++ )
{
real_acc Acc [3] = { (real_acc)0.0, (real_acc)0.0, (real_acc)0.0 };
real_acc Jerk[3] = { (real_acc)0.0, (real_acc)0.0, (real_acc)0.0 };
int NthSeg = (bx+I_Iter*GRID_SIZE) / NBlock_perSeg;
int I_Seq = I_Base+tx;
int ii = I_Seq%Ni_perSeg;
int i = ii%Ni; // prevent from loading the pseudo-I-particles
real I_Pos_x = gI_Pos[i][0];
real I_Pos_y = gI_Pos[i][1];
real I_Pos_z = gI_Pos[i][2];
real I_Vel_x = gI_Vel[i][0];
real I_Vel_y = gI_Vel[i][1];
real I_Vel_z = gI_Vel[i][2];
for (int J_Base=J_Start; J_Base<J_End; J_Base+=GroupSize_J)
{
int jj = J_Base+tx;
int j = jj%Nj; // deal with the case that Nj is not a multiple of block size
sJ_Mass [tx] = gJ_Mass[j];
sJ_Pos_x[tx] = gJ_Pos [j][0];
sJ_Pos_y[tx] = gJ_Pos [j][1];
sJ_Pos_z[tx] = gJ_Pos [j][2];
sJ_Vel_x[tx] = gJ_Vel [j][0];
sJ_Vel_y[tx] = gJ_Vel [j][1];
sJ_Vel_z[tx] = gJ_Vel [j][2];
__syncthreads();
// k : kth particle in JGroup
for (int k=0; k<GroupSize_J; k++)
{
# ifndef N_IS_MULTIPLE_OF_BS
int kk = J_Base+k;
# endif
// evaluate the gravitational acceleration and jerk
//---------------------------------------------------------------------
real dx = sJ_Pos_x[k] - I_Pos_x;
real dy = sJ_Pos_y[k] - I_Pos_y;
real dz = sJ_Pos_z[k] - I_Pos_z;
# ifdef SOFTEN
real R2 = dx*dx + Eps2;
# else
real R2 = dx*dx;
# endif
R2 += dy*dy;
R2 += dz*dz;
real Rinv = (real)1.0 / SQRT(R2);
real R2inv = Rinv*Rinv;
real R3inv = R2inv*Rinv;
real MR3inv = sJ_Mass[k]*R3inv;
real dVx = sJ_Vel_x[k] - I_Vel_x;
real dVy = sJ_Vel_y[k] - I_Vel_y;
real dVz = sJ_Vel_z[k] - I_Vel_z;
real dR_dot_dV = dx*dVx + dy*dVy + dz*dVz;
real Temp = -(real)3.0*dR_dot_dV*R2inv;
# ifndef N_IS_MULTIPLE_OF_BS
if ( kk < J_End )
{
# endif
Acc[0] += MR3inv*dx;
Acc[1] += MR3inv*dy;
Acc[2] += MR3inv*dz;
Jerk[0] += MR3inv*( dVx + Temp*dx );
Jerk[1] += MR3inv*( dVy + Temp*dy );
Jerk[2] += MR3inv*( dVz + Temp*dz );
# ifndef N_IS_MULTIPLE_OF_BS
}
# endif
} // for (int k=0; k<GroupSize_J; k++)
__syncthreads();
} // for (int J_Base=J_Start; J_Base<J_End; J_Base+=GroupSize_J)
if ( ii < Ni )
{
const unsigned int SaveIndex = ii + NthSeg*Ni;
gI_Acc [SaveIndex][0] = Acc [0];
gI_Acc [SaveIndex][1] = Acc [1];
gI_Acc [SaveIndex][2] = Acc [2];
gI_Jerk[SaveIndex][0] = Jerk[0];
gI_Jerk[SaveIndex][1] = Jerk[1];
gI_Jerk[SaveIndex][2] = Jerk[2];
}
} // for ( int I_Base=I_Start; I_Base<I_End; I_Base+=GroupSize_I, I_Iter++ )
}
| 16425102c2dd1906784c6704ffac67bfc1ec3e23.cu | //---------------------------------------------------------------------------------------------------
// prefix "g" for pointer pointing to "Global" memory space in device
// prefix "s" for pointer pointing to "Shared" memory space in device
//
// prefix "I/J" for varialbe of "I/J" particles
// each thread calculates the acceleration, and jerk of "(I_Base+tx)th" "I" particle
// from all "J" particles
//
// "I" particles are packed into "N/GroupSize_I" "IGroup"s with "GroupSize_I" particles within each group
// "J" particles are packed into "N/GroupSize_J" "JGroup"s with "GroupSize_J" particles within each group
// Variables Definition:
// NSplit : number of times to split
// Segment : a segment of threads containing all I particles and ceil to a multiple of block size
// Pseudo-Particle : the pseudo IDs of I-particles when ceiling the size of segment to a multiple of block size
// : eg. Ni_perSeg = 128, Ni = 126 --> Pseudo-Particle Ids = 126,127
// Ni_perSeg : number of I particles per segment
// eg. BLOCK_SIZE=128, Ni=129~256 --> Ni_perSeg = 128*2 = 256
// eg. BLOCK_SIZE=64, Ni=129~192 --> Ni_perSeg = 64*3 = 192
// Ni_allSeg : total number of I particles in all segments (including the pseudo-particles)
// NBlock_perSeg : number of blocks in each segment
// NthSeg : the segment being calculated (0, 1, ... NSplit-1)
// Nj_afterSplit_List : number of J-particles calculated by each segment
//---------------------------------------------------------------------------------------------------
#include "Dori.h"
#define I_Start __umul24(bx, BLOCK_SIZE)
#define J_Start __umul24(NthSeg, Nj_afterSplit_List[0])
#define I_End Ni_allSeg
#define J_End ( J_Start + Nj_afterSplit_List[NthSeg] )
#define GroupSize_I GRID_SIZE * BLOCK_SIZE
#define GroupSize_J BLOCK_SIZE
__global__ void CUCAL_Acc_Jerk_Split( const int Nj, real gJ_Mass[], real gJ_Pos[][3],
real gJ_Vel[][3], const int Ni, real gI_Pos[][3], real gI_Vel[][3],
real gI_Acc[][3], real gI_Jerk[][3], const real Eps2,
const unsigned int NSplit, const unsigned int NBlock_perSeg,
const unsigned int Ni_perSeg, const unsigned int Ni_allSeg,
const unsigned int Nj_afterSplit_List[] )
{
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x;
__shared__ real sJ_Mass [BLOCK_SIZE];
__shared__ real sJ_Pos_x[BLOCK_SIZE];
__shared__ real sJ_Pos_y[BLOCK_SIZE];
__shared__ real sJ_Pos_z[BLOCK_SIZE];
__shared__ real sJ_Vel_x[BLOCK_SIZE];
__shared__ real sJ_Vel_y[BLOCK_SIZE];
__shared__ real sJ_Vel_z[BLOCK_SIZE];
int I_Iter = 0;
// (I/J)_Base : Base Address for (I/J)Group
for ( int I_Base=I_Start; I_Base<I_End; I_Base+=GroupSize_I, I_Iter++ )
{
real_acc Acc [3] = { (real_acc)0.0, (real_acc)0.0, (real_acc)0.0 };
real_acc Jerk[3] = { (real_acc)0.0, (real_acc)0.0, (real_acc)0.0 };
int NthSeg = (bx+I_Iter*GRID_SIZE) / NBlock_perSeg;
int I_Seq = I_Base+tx;
int ii = I_Seq%Ni_perSeg;
int i = ii%Ni; // prevent from loading the pseudo-I-particles
real I_Pos_x = gI_Pos[i][0];
real I_Pos_y = gI_Pos[i][1];
real I_Pos_z = gI_Pos[i][2];
real I_Vel_x = gI_Vel[i][0];
real I_Vel_y = gI_Vel[i][1];
real I_Vel_z = gI_Vel[i][2];
for (int J_Base=J_Start; J_Base<J_End; J_Base+=GroupSize_J)
{
int jj = J_Base+tx;
int j = jj%Nj; // deal with the case that Nj is not a multiple of block size
sJ_Mass [tx] = gJ_Mass[j];
sJ_Pos_x[tx] = gJ_Pos [j][0];
sJ_Pos_y[tx] = gJ_Pos [j][1];
sJ_Pos_z[tx] = gJ_Pos [j][2];
sJ_Vel_x[tx] = gJ_Vel [j][0];
sJ_Vel_y[tx] = gJ_Vel [j][1];
sJ_Vel_z[tx] = gJ_Vel [j][2];
__syncthreads();
// k : kth particle in JGroup
for (int k=0; k<GroupSize_J; k++)
{
# ifndef N_IS_MULTIPLE_OF_BS
int kk = J_Base+k;
# endif
// evaluate the gravitational acceleration and jerk
//---------------------------------------------------------------------
real dx = sJ_Pos_x[k] - I_Pos_x;
real dy = sJ_Pos_y[k] - I_Pos_y;
real dz = sJ_Pos_z[k] - I_Pos_z;
# ifdef SOFTEN
real R2 = dx*dx + Eps2;
# else
real R2 = dx*dx;
# endif
R2 += dy*dy;
R2 += dz*dz;
real Rinv = (real)1.0 / SQRT(R2);
real R2inv = Rinv*Rinv;
real R3inv = R2inv*Rinv;
real MR3inv = sJ_Mass[k]*R3inv;
real dVx = sJ_Vel_x[k] - I_Vel_x;
real dVy = sJ_Vel_y[k] - I_Vel_y;
real dVz = sJ_Vel_z[k] - I_Vel_z;
real dR_dot_dV = dx*dVx + dy*dVy + dz*dVz;
real Temp = -(real)3.0*dR_dot_dV*R2inv;
# ifndef N_IS_MULTIPLE_OF_BS
if ( kk < J_End )
{
# endif
Acc[0] += MR3inv*dx;
Acc[1] += MR3inv*dy;
Acc[2] += MR3inv*dz;
Jerk[0] += MR3inv*( dVx + Temp*dx );
Jerk[1] += MR3inv*( dVy + Temp*dy );
Jerk[2] += MR3inv*( dVz + Temp*dz );
# ifndef N_IS_MULTIPLE_OF_BS
}
# endif
} // for (int k=0; k<GroupSize_J; k++)
__syncthreads();
} // for (int J_Base=J_Start; J_Base<J_End; J_Base+=GroupSize_J)
if ( ii < Ni )
{
const unsigned int SaveIndex = ii + NthSeg*Ni;
gI_Acc [SaveIndex][0] = Acc [0];
gI_Acc [SaveIndex][1] = Acc [1];
gI_Acc [SaveIndex][2] = Acc [2];
gI_Jerk[SaveIndex][0] = Jerk[0];
gI_Jerk[SaveIndex][1] = Jerk[1];
gI_Jerk[SaveIndex][2] = Jerk[2];
}
} // for ( int I_Base=I_Start; I_Base<I_End; I_Base+=GroupSize_I, I_Iter++ )
}
|
1825b89314c803bee43e0da8dee1905b74720ee5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
using namespace std;
__global__ void hello()
{
printf("hello from block %d %d thread %d %d \n", blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y);
}
int main()
{
dim3 dimBlock(8, 16);
dim3 dimGrid(2, 4);
hello << <dimGrid, dimBlock >> > ();
return 0;
} | 1825b89314c803bee43e0da8dee1905b74720ee5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <time.h>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
using namespace std;
__global__ void hello()
{
printf("hello from block %d %d thread %d %d \n", blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y);
}
int main()
{
dim3 dimBlock(8, 16);
dim3 dimGrid(2, 4);
hello << <dimGrid, dimBlock >> > ();
return 0;
} |
1bc1381aaba7c2ef72c6de4ad294ad95e79f9b09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} | 1bc1381aaba7c2ef72c6de4ad294ad95e79f9b09.cu | #include "includes.h"
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
23b2b3b34f40df92657639917ee8cf27ceba5f28.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convertPixelFormat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *inputBgra = NULL;
hipMalloc(&inputBgra, XSIZE*YSIZE);
uint8_t *outputYuv = NULL;
hipMalloc(&outputYuv, XSIZE*YSIZE);
int numPixels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convertPixelFormat), dim3(gridBlock),dim3(threadBlock), 0, 0, inputBgra,outputYuv,numPixels);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convertPixelFormat), dim3(gridBlock),dim3(threadBlock), 0, 0, inputBgra,outputYuv,numPixels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convertPixelFormat), dim3(gridBlock),dim3(threadBlock), 0, 0, inputBgra,outputYuv,numPixels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 23b2b3b34f40df92657639917ee8cf27ceba5f28.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convertPixelFormat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *inputBgra = NULL;
cudaMalloc(&inputBgra, XSIZE*YSIZE);
uint8_t *outputYuv = NULL;
cudaMalloc(&outputYuv, XSIZE*YSIZE);
int numPixels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convertPixelFormat<<<gridBlock,threadBlock>>>(inputBgra,outputYuv,numPixels);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convertPixelFormat<<<gridBlock,threadBlock>>>(inputBgra,outputYuv,numPixels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convertPixelFormat<<<gridBlock,threadBlock>>>(inputBgra,outputYuv,numPixels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9270f730f7b5c92a23d027f5b6696ece16ade3d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3,
const double *x4, const double *q, double *adt) {
double dx, dy, ri, u, v, c;
ri = 1.0f / q[0];
u = ri * q[1];
v = ri * q[2];
c = sqrt(gam * gm1 * (ri * q[3] - 0.5f * (u * u + v * v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
*adt = (*adt) * (1.0f / cfl);
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1);
}
}
//host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
| 9270f730f7b5c92a23d027f5b6696ece16ade3d2.cu | //
// auto-generated by op2.py
//
//user function
__device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3,
const double *x4, const double *q, double *adt) {
double dx, dy, ri, u, v, c;
ri = 1.0f / q[0];
u = ri * q[1];
v = ri * q[2];
c = sqrt(gam * gm1 * (ri * q[3] - 0.5f * (u * u + v * v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
*adt = (*adt) * (1.0f / cfl);
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1);
}
}
//host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_adt_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
|
a0df9c709e3ee17f65b0a29c6487c32f6aeff191.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "main.h"
#include "svtsim_functions.h"
#include "functionkernel.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/scatter.h>
#include <thrust/functional.h>
#include <thrust/binary_search.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
// typedefs for shorthand
typedef thrust::tuple<unsigned int, unsigned int> DataPair;
// (layer, out1, out2, out3)
typedef thrust::tuple<unsigned int, unsigned int,
unsigned int, unsigned int> UnpackTuple;
typedef thrust::device_vector<unsigned int>::iterator IntIterator;
typedef thrust::tuple<IntIterator, IntIterator,
IntIterator, IntIterator> IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
struct Hitmap {
int hitmap[6];
};
typedef thrust::device_vector<Hitmap> HitmapVector;
#define COLUMNS 3 //var for testkernel
#define ROWS 2 //var for testkernel
#define N_BLOCKS 1
#define N_THREADS_PER_BLOCK 16
#define CUDA_TIMING
#define DEBUG
#define MAX(x,y) ((x)>(y) ? (x):(y))
// CUDA timer macros
hipEvent_t c_start, c_stop;
inline void CTSTART() {
#ifdef CUDA_TIMING
hipEventCreate(&c_start);
hipEventCreate(&c_stop);
hipEventRecord(c_start, 0);
#endif
}
inline void CTSTOP(const char *file) {
#ifdef CUDA_TIMING
hipEventRecord(c_stop, 0);
hipEventSynchronize(c_stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, c_start, c_stop);
elapsedTime *= 1000.0; // ms to us
char filePath[100];
sprintf(filePath, "timer/%s.txt", file);
FILE *outFile = fopen(filePath, "a");
if (outFile != NULL) {
fprintf(outFile, "%f\n", elapsedTime);
fclose(outFile);
} else {
printf("Warning: cannot open %s\n", filePath);
}
#endif
}
// CUDA variables
int *ids, *out1, *out2, *out3;
int *d_ids, *d_out1, *d_out2, *d_out3;
unsigned int *d_data_in;
long sizeW;
#define gf_mask_gpu(x) (d_gf_maskdata[(x)])
__constant__ int d_gf_maskdata[33] = {
0x00000000,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
0x0001ffff, 0x0003ffff, 0x0007ffff, 0x000fffff,
0x001fffff, 0x003fffff, 0x007fffff, 0x00ffffff,
0x01ffffff, 0x03ffffff, 0x07ffffff, 0x0fffffff,
0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff
};
void GPU_Init(int n_words)
{
sizeW = sizeof(int) * n_words;
ids = (int *)malloc(sizeW);
out1 = (int *)malloc(sizeW);
out2 = (int *)malloc(sizeW);
out3 = (int *)malloc(sizeW);
hipMalloc((void **)&d_data_in, sizeW);
hipMalloc((void **)&d_ids, sizeW);
hipMalloc((void **)&d_out1, sizeW);
hipMalloc((void **)&d_out2, sizeW);
hipMalloc((void **)&d_out3, sizeW);
}
void GPU_Destroy()
{
free(ids);
free(out1);
free(out2);
free(out3);
hipFree(d_data_in);
hipFree(d_ids);
hipFree(d_out1);
hipFree(d_out2);
hipFree(d_out3);
}
template <typename Vector>
void print(const char *title, const Vector &v)
{
std::cout << title << ": ";
for(size_t i = 0; i < v.size(); i++)
std::cout << v[i] << " ";
std::cout << "\n";
}
// unpacking function itself. unpack the passed-in int to the tuple.
struct unpacker : public thrust::unary_function<DataPair, UnpackTuple> {
/* parallel word_decode kernel.
each word is decoded and layer (id) and output values are set.
we only use 3 output arrays since depending on the layer,
we only need 3 different values. this saves allocating/copying empty arrays
format (out1, out2, out3):
id < XFT_LYR: zid, lcl, hit
id == XFT_LYR: crv, crv_sign, phi
id == IP_LYR: sector, amroad, 0
id == EE_LYR: ee_word
*/
__host__ __device__
UnpackTuple operator()(DataPair t) {
unsigned int word = thrust::get<0>(t);
unsigned int prev_word = thrust::get<1>(t);
unsigned int val1 = 0, val2 = 0, val3 = 0;
int ee, ep, lyr;
lyr = -999; /* Any invalid numbers != 0-7 */
/*
if (word > gf_mask_gpu(SVT_WORD_WIDTH)) {
//printf("gf_iword_decode: Input data is larger than the maximum SVT word");
//return SVTSIM_GF_ERR;
return;
}
*/
/* check if this is a EP or EE word */
ee = (word >> SVT_EE_BIT) & gf_mask_gpu(1);
ep = (word >> SVT_EP_BIT) & gf_mask_gpu(1);
// check if this is the second XFT word
//int prev_word = (idx==0) ? 0 : words[idx-1];
bool xft = ((prev_word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH)) == XFT_LYR ? 1 : 0;
if (ee && ep) { /* End of Event word */
val1 = word; // ee_word
//*parity_in = (word >> SVT_PAR_BIT) & gf_mask_gpu(1);
lyr = EE_LYR;
} else if (ee) { /* only EE bit ON is error condition */
//*err |= (1 << UNKNOWN_ERR);
lyr = EE_LYR; /* We have to check */
} else if (ep) { /* End of Packet word */
lyr = EP_LYR;
val1 = 6; // sector
/* *sector = (word >> SVT_SECT_LSB) & gf_mask_gpu(SVT_SECT_WIDTH); */
val2 = word & gf_mask_gpu(AMROAD_WORD_WIDTH); // amroad
} else if (xft) { /* Second XFT word */
val1 = (word >> SVT_CRV_LSB) & gf_mask_gpu(SVT_CRV_WIDTH); // crv
val2 = (word >> (SVT_CRV_LSB + SVT_CRV_WIDTH)) & gf_mask_gpu(1); // crv_sign
val3 = word & gf_mask_gpu(SVT_PHI_WIDTH); // phi
lyr = XFT_LYR_2;
} else { /* SVX hits or the first XFT word */
lyr = (word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH);
if (lyr == XFT_LYRID) lyr = XFT_LYR; // probably don't need - stp
val1 = (word >> SVT_Z_LSB) & gf_mask_gpu(SVT_Z_WIDTH); // zid
val2 = (word >> SVT_LCLS_BIT) & gf_mask_gpu(1); // lcl
val3 = word & gf_mask_gpu(SVT_HIT_WIDTH); // hit
}
return thrust::make_tuple(lyr,val1,val2,val3);
}
};
struct isNewRoad : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
//return id == EP_LYR;
return id == EP_LYR || id == EE_LYR;
}
};
struct isNewHit : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
return id < XFT_LYR || id == XFT_LYR_2;
}
};
struct isNewEvt : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
return id == EE_LYR;
}
};
struct lhitToHitmap : public thrust::unary_function<DataPair, Hitmap> {
__host__ __device__ Hitmap operator()(const DataPair &t) {
int layer = thrust::get<0>(t);
int lhit = thrust::get<1>(t);
if (layer == XFT_LYR_2) layer = XFT_LYR;
Hitmap h;
for (int i=0; i<=XFT_LYR; i++)
h.hitmap[i] = layer == i ? lhit : 0;
return h;
}
};
struct tupleSecond {// : public thrust::unary_function<T, bool> {
template <typename T>
__host__ __device__ bool operator()(const T &t) {
return thrust::get<1>(t);
}
};
struct isEqualLayer : public thrust::binary_function<unsigned int, unsigned int, bool>
{
__host__ __device__ bool operator()(const unsigned int &a, const unsigned int &b) {
return a == b || ((a == XFT_LYR || a == XFT_LYR_2) && (b == XFT_LYR || b == XFT_LYR_2));
}
};
struct layerHitMultiply
{
template <typename T>
__host__ __device__ T operator()(const T &a, const T &b) {
//return a * (b>1 ? b:1);
return MAX(a,1) * MAX(b,1);
}
};
struct hitmapAccumulate
{
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) {
Hitmap r;
for (int i=0; i<=XFT_LYR; i++)
r.hitmap[i] = MAX(a.hitmap[i], b.hitmap[i]);
return r;
}
};
struct hitmapComb : public thrust::unary_function<DataPair,Hitmap>
{
Hitmap *d_hitmap;
hitmapComb(Hitmap *_hm) : d_hitmap(_hm) {} // constructor
template <typename T>
__host__ __device__ Hitmap operator()(const T& t) {
unsigned int road = thrust::get<0>(t);
unsigned int ic = thrust::get<1>(t) -1;
Hitmap hm = d_hitmap[road];
Hitmap r;
for (int i=0; i<=XFT_LYR; i++) {
int nh = hm.hitmap[i];
if (nh == 0) {
r.hitmap[i] = 0;
} else {
r.hitmap[i] = ic % nh + 1;
ic /= nh;
}
}
return r;
}
};
struct hitmapAbsoluteIndices : public thrust::unary_function<DataPair,Hitmap>
{
Hitmap *d_hitmap;
unsigned int *d_road_indices;
hitmapAbsoluteIndices(Hitmap *_hm, unsigned int *_ri) : d_hitmap(_hm), d_road_indices(_ri) {} // constructor
template <typename T>
__host__ __device__ Hitmap operator()(const T& t) {
unsigned int road = thrust::get<0>(t);
Hitmap hm_c = thrust::get<1>(t);
Hitmap hm = d_hitmap[road];
int offset = d_road_indices[road];
Hitmap r;
int ihits = 0;
for (int i=0; i<=XFT_LYR; i++) {
int ih = hm_c.hitmap[i] - 1;
if (i == XFT_LYR) ih += 1+ih; // to account for unused first XFT word
if (ih < 0) r.hitmap[i] = -1;
else r.hitmap[i] = offset + ihits + ih;
ihits += hm.hitmap[i];
}
return r;
}
};
// BinaryPredicate for the head flag segment representation
// equivalent to thrust::not2(thrust::project2nd<int,int>()));
template <typename HeadFlagType>
struct head_flag_predicate : public thrust::binary_function<HeadFlagType,HeadFlagType,bool>
{
__host__ __device__
bool operator()(HeadFlagType left, HeadFlagType right) const {
return !right;
}
};
struct fill_tf_gpu
{
tf_arrays_t tf; // pointer in device memory
fill_tf_gpu(tf_arrays_t _tf) : tf(_tf) {} // constructor
template <typename Tuple>
__device__ void operator()(Tuple t) {
unsigned int id = thrust::get<0>(t);
unsigned int id_next = thrust::get<1>(t);
unsigned int out1 = thrust::get<2>(t);
unsigned int out2 = thrust::get<3>(t);
unsigned int out3 = thrust::get<4>(t);
unsigned int evt = thrust::get<5>(t);
unsigned int road = thrust::get<6>(t);
unsigned int rhit = thrust::get<7>(t) -1;
unsigned int lhit = thrust::get<8>(t) -1;
// SVX Data
if (id < XFT_LYR) {
int zid = out1;
int lcl = out2;
int hit = out3;
tf->evt_hit[evt][road][id][lhit] = hit;
tf->evt_hitZ[evt][road][id][lhit] = zid;
tf->evt_lcl[evt][road][id][lhit] = lcl;
tf->evt_lclforcut[evt][road][id][lhit] = lcl;
tf->evt_layerZ[evt][road][id] = zid;
if (rhit == 0) {
atomicOr(&tf->evt_zid[evt][road], zid & gf_mask_gpu(GF_SUBZ_WIDTH));
} else if (id_next == XFT_LYR) {
atomicOr(&tf->evt_zid[evt][road], (zid & gf_mask_gpu(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH);
}
//tf->evt_nhits[evt][road][id]++;
atomicAdd(&tf->evt_nhits[evt][road][id], 1);
// Error Checking
if (lhit == MAX_HIT) tf->evt_err[evt][road] |= (1 << OFLOW_HIT_BIT);
//if (id < id_last) tf->evt_err[evt][road] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR) {
// we ignore but leave here to not trigger 'else' case - stp
} else if (id == XFT_LYR_2) {
id = XFT_LYR; // for XFT_LYR_2 kludge - stp
int crv = out1;
int crv_sign = out2;
int phi = out3;
tf->evt_crv[evt][road][lhit] = crv;
tf->evt_crv_sign[evt][road][lhit] = crv_sign;
tf->evt_phi[evt][road][lhit] = phi;
//tf->evt_nhits[evt][road][id]++;
atomicAdd(&tf->evt_nhits[evt][road][id], 1);
// Error Checking
if (lhit == MAX_HIT) tf->evt_err[evt][road] |= (1 << OFLOW_HIT_BIT);
//if (id < id_last) tf->evt_err[evt][road] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1;
int amroad = out2;
tf->evt_cable_sect[evt][road] = sector;
tf->evt_sect[evt][road] = sector;
tf->evt_road[evt][road] = amroad;
tf->evt_err_sum[evt] |= tf->evt_err[evt][road];
//tf->evt_nroads[evt]++;
atomicAdd(&tf->evt_nroads[evt], 1);
if (road > MAXROAD) {
;
//printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
//printf("You reached that limit evt->road = %d\n",road);
}
//for (id = 0; id <= XFT_LYR; id++)
// tf->evt_nhits[evt][road][id] = 0;
//tf->evt_err[evt][road] = 0;
//tf->evt_zid[evt][road] = -1;
} else if (id == EE_LYR) {
int ee_word = out1;
tf->evt_ee_word[evt] = ee_word;
//tf->totEvts++;
atomicAdd(&tf->totEvts, 1);
} else {
//printf("Error INV_DATA_BIT: layer = %u\n", id);
tf->evt_err[evt][road] |= (1 << INV_DATA_BIT);
}
}
};
__global__ void
k_word_decode(int N, unsigned int *words, int *ids, int *out1, int *out2, int *out3)
{
/* parallel word_decode kernel.
each word is decoded and layer (id) and output values are set.
we only use 3 output arrays since depending on the layer,
we only need 3 different values. this saves allocating/copying empty arrays
format (out1, out2, out3):
id < XFT_LYR: zid, lcl, hit
id == XFT_LYR: crv, crv_sign, phi
id == IP_LYR: sector, amroad, 0
id == EE_LYR: ee_word
*/
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) return;
//ids[idx] = idx; return;
int word = words[idx];
int ee, ep, lyr;
lyr = -999; /* Any invalid numbers != 0-7 */
out1[idx] = 0;
out2[idx] = 0;
out3[idx] = 0;
if (word > gf_mask_gpu(SVT_WORD_WIDTH)) {
//printf("gf_iword_decode: Input data is larger than the maximum SVT word");
//return SVTSIM_GF_ERR;
ids[idx] = lyr;
return;
}
/* check if this is a EP or EE word */
ee = (word >> SVT_EE_BIT) & gf_mask_gpu(1);
ep = (word >> SVT_EP_BIT) & gf_mask_gpu(1);
// check if this is the second XFT word
int prev_word = (idx==0) ? 0 : words[idx-1];
bool xft = ((prev_word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH)) == XFT_LYR ? 1 : 0;
if (ee && ep) { /* End of Event word */
out1[idx] = word; // ee_word
//*parity_in = (word >> SVT_PAR_BIT) & gf_mask_gpu(1);
lyr = EE_LYR;
} else if (ee) { /* only EE bit ON is error condition */
//*err |= (1 << UNKNOWN_ERR);
lyr = EE_LYR; /* We have to check */
} else if (ep) { /* End of Packet word */
lyr = EP_LYR;
out1[idx] = 6; // sector
/* *sector = (word >> SVT_SECT_LSB) & gf_mask_gpu(SVT_SECT_WIDTH); */
out2[idx] = word & gf_mask_gpu(AMROAD_WORD_WIDTH); // amroad
} else if (xft) { /* Second XFT word */
out1[idx] = (word >> SVT_CRV_LSB) & gf_mask_gpu(SVT_CRV_WIDTH); // crv
out2[idx] = (word >> (SVT_CRV_LSB + SVT_CRV_WIDTH)) & gf_mask_gpu(1); // crv_sign
out3[idx] = word & gf_mask_gpu(SVT_PHI_WIDTH); // phi
lyr = XFT_LYR_2;
} else { /* SVX hits or the first XFT word */
lyr = (word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH);
if (lyr == XFT_LYRID) lyr = XFT_LYR; // probably don't need - stp
out1[idx] = (word >> SVT_Z_LSB) & gf_mask_gpu(SVT_Z_WIDTH); // zid
out2[idx] = (word >> SVT_LCLS_BIT) & gf_mask_gpu(1); // lcl
out3[idx] = word & gf_mask_gpu(SVT_HIT_WIDTH); // hit
}
ids[idx] = lyr;
}
void scan_threads_per_block_fep(int n_words, unsigned int *words, int *ids, int *out1, int *out2, int *out3)
{
hipEvent_t c_start, c_stop;
hipEventCreate(&c_start);
hipEventCreate(&c_stop);
int step = 2; //64;
int n_threads_max;
float elapsedTime, totalTime = 0;
int i;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
n_threads_max = deviceProp.maxThreadsPerBlock;
printf("n_threads_max = %d\n", n_threads_max);
// call once without timing to be sure GPU is initialized
i = n_threads_max;
hipLaunchKernelGGL(( k_word_decode) , dim3((n_words+i-1)/i), dim3(i), 0, 0,
n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
for (i=1; i<n_threads_max; i+=step) {
int j, n_iter = 10;
totalTime = 0;
for (j = 0; j < n_iter; j++) {
hipEventRecord(c_start, 0);
hipLaunchKernelGGL(( k_word_decode) , dim3((n_words+i-1)/i), dim3(i), 0, 0,
n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
hipEventRecord(c_stop, 0);
hipEventSynchronize(c_stop);
hipEventElapsedTime(&elapsedTime, c_start, c_stop);
elapsedTime *= 1000.0; // ms to us
totalTime += elapsedTime;
}
elapsedTime = totalTime / n_iter;
FILE *outFile = fopen("threads_scan.txt", "a");
fprintf(outFile, "%d\t%f\n", i, elapsedTime);
fclose(outFile);
if (i==1) i = 0;
}
}
void launchFepUnpackKernel(tf_arrays_t tf, unsigned int *data_in, int n_words)
{
/* initializing arrays */
int ie, id;
tf->totEvts = 0;
for (ie = 0; ie < NEVTS; ie++) {
tf->evt_nroads[ie] = 0;
tf->evt_err_sum[ie] = 0;
for (id = 0; id <= NSVX_PLANE; id++) {
tf->evt_layerZ[ie][tf->evt_nroads[ie]][id] = 0;
}
for (id = 0; id <= XFT_LYR; id++) {
tf->evt_nhits[ie][tf->evt_nroads[ie]][id] = 0;
}
tf->evt_err[ie][tf->evt_nroads[ie]] = 0;
//tf->evt_zid[ie][tf->evt_nroads[ie]] = -1;
tf->evt_zid[ie][tf->evt_nroads[ie]] = 0; // we need to or these - stp
// printf("tf->evt_nroads[%d] = %d, tf->evt_zid[%d][tf->evt_nroads[%d]] = %d\n", ie, tf->evt_nroads[ie], ie, ie, tf->evt_zid[ie][tf->evt_nroads[ie]]);
//}
}
CTSTART();
// Copy data to the Device
hipMemcpy(d_data_in, data_in, sizeW, hipMemcpyHostToDevice);
CTSTOP("copyWordsToDevice_CUDA");
//scan_threads_per_block_fep(n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
CTSTART();
hipLaunchKernelGGL(( k_word_decode) , dim3((n_words+N_THREADS_PER_BLOCK-1)/N_THREADS_PER_BLOCK), dim3(N_THREADS_PER_BLOCK), 0, 0,
n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
CTSTOP("k_word_decode");
// Copy output to the Host
CTSTART();
hipMemcpy(ids, d_ids, sizeW, hipMemcpyDeviceToHost);
hipMemcpy(out1, d_out1, sizeW, hipMemcpyDeviceToHost);
hipMemcpy(out2, d_out2, sizeW, hipMemcpyDeviceToHost);
hipMemcpy(out3, d_out3, sizeW, hipMemcpyDeviceToHost);
CTSTOP("k_word_decode_copyToHost");
//////////////////// also do with THRUST
// input data
thrust::device_vector<unsigned int> d_vec(n_words); // this would be done in the GPU_Init()
CTSTART();
thrust::copy(data_in, data_in + n_words, d_vec.begin());
CTSTOP("copyWordsToDevice_thrust");
// output vectors
thrust::device_vector<unsigned int> d_idt(n_words);
thrust::device_vector<unsigned int> d_out1t(n_words);
thrust::device_vector<unsigned int> d_out2t(n_words);
thrust::device_vector<unsigned int> d_out3t(n_words);
// unpack
CTSTART();
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_vec.begin(), d_vec.begin()-1)),
thrust::make_zip_iterator(thrust::make_tuple(d_vec.end(), d_vec.end()-1)),
thrust::make_zip_iterator(thrust::make_tuple(d_idt.begin(), d_out1t.begin(),
d_out2t.begin(), d_out3t.begin())),
unpacker());
CTSTOP("thrust_unpacker");
// copy to CPU for verification
thrust::host_vector<unsigned int> h_test0 = d_idt;
thrust::host_vector<unsigned int> h_test1 = d_out1t;
thrust::host_vector<unsigned int> h_test2 = d_out2t;
thrust::host_vector<unsigned int> h_test3 = d_out3t;
/*
int ndiff = 0;
for (int i=0; i<n_words; i++) {
if (h_test0[i] != ids[i]) ndiff++;
if (h_test1[i] != out1[i]) ndiff++;
if (h_test2[i] != out2[i]) ndiff++;
if (h_test3[i] != out3[i]) ndiff++;
}
printf("ndiff = %d\n", ndiff);
printf("nmatch = %d\n", 4*n_words - ndiff);
*/
//// fill nevt, nroad, nhit arrays
//// want to restart counting according to evt > road > layer > hit
thrust::device_vector<unsigned int> d_evt(n_words);
thrust::device_vector<unsigned int> d_road(n_words);
thrust::device_vector<unsigned int> d_rhit(n_words);
thrust::device_vector<unsigned int> d_lhit(n_words);
CTSTART();
thrust::transform(d_idt.begin(), d_idt.end(), d_road.begin(), isNewRoad());
CTSTOP("scans_singleTransform");
CTSTART();
thrust::exclusive_scan(
thrust::make_transform_iterator(d_idt.begin(), isNewEvt()),
thrust::make_transform_iterator(d_idt.end(), isNewEvt()),
d_evt.begin());
CTSTOP("scans_exclusive_scan");
CTSTART();
thrust::exclusive_scan_by_key(
d_evt.begin(), d_evt.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewRoad()), // vals
d_road.begin());
CTSTOP("scans_exclusive_scan_by_key");
CTSTART();
thrust::inclusive_scan_by_key(
d_road.begin(), d_road.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_rhit.begin());
CTSTOP("scans_inclusive_scan_by_key_rhit");
CTSTART();
thrust::inclusive_scan_by_key(
d_idt.begin(), d_idt.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_lhit.begin(),
isEqualLayer()); // binary predicate
CTSTOP("scans_inclusive_scan_by_key_lhit");
//// alternate method of segmenting based on flags instead of scans
thrust::device_vector<unsigned int> d_evt_flag(n_words);
thrust::device_vector<unsigned int> d_road_flag(n_words);
//thrust::device_vector<unsigned int> d_rhit_flag(n_words);
//thrust::device_vector<unsigned int> d_lhit_flag(n_words);
thrust::transform(d_idt.begin(), d_idt.end(), d_evt_flag.begin(), isNewEvt());
thrust::transform(d_idt.begin(), d_idt.end(), d_road_flag.begin(), isNewRoad());
CTSTART();
// can do key-based operations on flags instead of scans
thrust::inclusive_scan_by_key(
d_road_flag.begin(), d_road_flag.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_rhit.begin(),
head_flag_predicate<unsigned int>());
CTSTOP("scan_inclusive_scan_by_key_rhit_flags");
//// calculate number of combinations per road
// for the size of these, only need n_roads, but might be slower(?) to wait for
// that result to come back to CPU
thrust::device_vector<unsigned int> d_roadKey(n_words); // will soon only take up n_roads
thrust::device_vector<unsigned int> d_ncomb(n_words); // will soon only take up n_roads
CTSTART();
size_t n_roads = thrust::reduce_by_key(
d_road.begin(), d_road.end(), // keys
d_lhit.begin(), // vals
d_roadKey.begin(), // keys output
d_ncomb.begin(), // vals output
thrust::equal_to<int>(), // binary predicate
layerHitMultiply() // binary operator
).first - d_roadKey.begin(); // new output size
CTSTOP("reduce_by_key");
#ifdef DEBUG
for (int i=0; i<n_words; i++) {
unsigned int evt = d_evt[i];
unsigned int road = d_road[i];
unsigned int rhit = d_rhit[i];
unsigned int lhit = d_lhit[i];
printf("%.6x\tevt = %d\troad = %d\trhit = %d\tlayer = %d\tlhit = %d\tout=(%.6x,%.6x,%.6x)\n", data_in[i], evt, road, rhit, h_test0[i], lhit, h_test1[i], h_test2[i], h_test3[i]);
}
#endif
#ifdef DEBUG
for (int i=0; i<n_roads; i++) {
unsigned int road = d_roadKey[i];
unsigned int ncomb = d_ncomb[i];
printf("road %d has %d combinations\n", road, ncomb);
}
#endif
// get global road offset indices
CTSTART();
thrust::device_vector<unsigned int> d_road_indices(n_roads);
thrust::copy_if(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(1), d_road_flag.begin())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(n_words+1), d_road_flag.end())),
thrust::make_zip_iterator(thrust::make_tuple(d_road_indices.begin(), thrust::constant_iterator<int>(0))),
tupleSecond());
CTSTOP("road_indices");
#ifdef DEBUG
print("road_indices", d_road_indices);
#endif
CTSTART();
thrust::device_vector<unsigned int> d_ncomb_scan(n_roads);
thrust::inclusive_scan(d_ncomb.begin(), d_ncomb.begin() + n_roads, d_ncomb_scan.begin());
//unsigned int n_combs = thrust::reduce(d_ncomb.begin(), d_ncomb.end());
unsigned int n_combs = d_ncomb_scan.back();
#ifdef DEBUG
printf("total combinations: %d\n", n_combs);
#endif
// get the combination indices. might be able to do this better with an iterator, like
// https://github.com/thrust/thrust/blob/master/examples/repeated_range.cu
thrust::device_vector<unsigned int> d_indices(n_combs);
thrust::lower_bound(d_ncomb_scan.begin(), d_ncomb_scan.end(),
thrust::counting_iterator<unsigned int>(1),
thrust::counting_iterator<unsigned int>(n_combs + 1),
d_indices.begin());
thrust::device_vector<unsigned int> d_indices_road(d_indices);
#ifdef DEBUG
print("indices_road", d_indices_road);
#endif
/* // can also do with a scan but will take longer
thrust::inclusive_scan_by_key(
d_indices.begin(), d_indices.end(),
thrust::constant_iterator<int>(1),
d_indices.begin());
*/
thrust::gather(d_indices.begin(), d_indices.end(),
d_ncomb_scan.begin(),
d_indices.begin());
thrust::transform(d_indices.begin(), d_indices.end(),
thrust::constant_iterator<int>(*d_ncomb_scan.begin()),
d_indices.begin(),
thrust::minus<int>());
thrust::transform(thrust::counting_iterator<int>(1),
thrust::counting_iterator<int>(n_combs + 1),
d_indices.begin(),
d_indices.begin(),
thrust::minus<int>());
CTSTOP("indices");
#ifdef DEBUG
print("ncomb_scan", d_ncomb_scan);
#endif
#ifdef DEBUG
printf("indices: ");
for (int i=0; i<n_combs; i++) {
unsigned int index = d_indices[i];
printf("%d ", index);
}
printf("\n");
#endif
CTSTART();
HitmapVector d_hitmap(n_roads);
// faster way would be to copy_if (layer,lhit) according to the isNewLayer flag to grab the last lhit
// then reduce_by_key(road) and write into tuple (no collision -> no MAX() needed)
thrust::reduce_by_key(
d_road.begin(), d_road.end(), // keys
thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(d_idt.begin(), d_lhit.begin())),
lhitToHitmap()), // vals
d_roadKey.begin(), // keys output
d_hitmap.begin(), // vals output
thrust::equal_to<int>(), // binary predicate
hitmapAccumulate()); // binary operator
CTSTOP("hitmaps");
#ifdef DEBUG
for (int i=0; i<n_roads; i++) {
Hitmap t = d_hitmap[i];
printf("road = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", i,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
// get combination hitmaps
CTSTART();
HitmapVector d_hitmap_combs(n_combs);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.begin(), d_indices.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.end(), d_indices.end())),
d_hitmap_combs.begin(),
hitmapComb(thrust::raw_pointer_cast(&d_hitmap[0])));
CTSTOP("hitmapComb");
#ifdef DEBUG
for (int i=0; i<n_combs; i++) {
unsigned int road = d_indices_road[i];
uint comb = d_indices[i];
Hitmap t = d_hitmap_combs[i];
printf("road = %d, comb = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", road, comb,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
// get absolute hit indices in the word data list
CTSTART();
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.begin(), d_hitmap_combs.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.end(), d_hitmap_combs.end())),
d_hitmap_combs.begin(),
hitmapAbsoluteIndices(
thrust::raw_pointer_cast(&d_hitmap[0]),
thrust::raw_pointer_cast(&d_road_indices[0])));
CTSTOP("hitmapCombAbs");
#ifdef DEBUG
printf("\nabsolute combinations:\n");
for (int i=0; i<n_combs; i++) {
unsigned int road = d_indices_road[i];
uint comb = d_indices[i];
Hitmap t = d_hitmap_combs[i];
printf("road = %d, comb = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", road, comb,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
///////////////// fill tf on GPU
// Copy tf to the Device
long tfSize = sizeof(struct tf_arrays);
tf_arrays_t d_tf;
hipMalloc((void **)&d_tf, tfSize);
hipMemcpy(d_tf, tf, tfSize, hipMemcpyHostToDevice);
CTSTART();
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
d_idt.begin(), d_idt.begin()+1, d_out1t.begin(), d_out2t.begin(), d_out3t.begin(),
d_evt.begin(), d_road.begin(), d_rhit.begin(), d_lhit.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_idt.end(), d_idt.end()+1, d_out1t.end(), d_out2t.end(), d_out3t.end(),
d_evt.end(), d_road.end(), d_rhit.end(), d_lhit.end())),
fill_tf_gpu(d_tf));
CTSTOP("thrust_fill");
/*
// Copy tf to the Host
CTSTART();
hipMemcpy(tf, d_tf, tfSize, hipMemcpyDeviceToHost);
CTSTOP("copyTFtoHost");
// for informational purposes
CTSTART();
hipMemcpy(d_tf, tf, tfSize, hipMemcpyHostToDevice);
CTSTOP("copyTFtoDevice");
*/
hipFree(d_tf);
///////////////// now fill tf (gf_fep_unpack)
for (ie = 0; ie < NEVTS; ie++) {
tf->evt_zid[ie][tf->evt_nroads[ie]] = -1; // because we set it to 0 for GPU version
}
CTSTART();
int id_last = -1;
int evt = EVT;
unsigned int *data = (unsigned int *) data_in;
for (int i = 0; i < n_words; i++) {
id = ids[i];
bool gf_xft = 0;
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = tf->evt_nroads[evt];
int nhits = tf->evt_nhits[evt][nroads][id];
// SVX Data
if (id < XFT_LYR) {
int zid = out1[i];
int lcl = out2[i];
int hit = out3[i];
tf->evt_hit[evt][nroads][id][nhits] = hit;
tf->evt_hitZ[evt][nroads][id][nhits] = zid;
tf->evt_lcl[evt][nroads][id][nhits] = lcl;
tf->evt_lclforcut[evt][nroads][id][nhits] = lcl;
tf->evt_layerZ[evt][nroads][id] = zid;
if (tf->evt_zid[evt][nroads] == -1) {
tf->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
tf->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (tf->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++tf->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) tf->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) tf->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = out1[i];
int crv_sign = out2[i];
int phi = out3[i];
tf->evt_crv[evt][nroads][nhits] = crv;
tf->evt_crv_sign[evt][nroads][nhits] = crv_sign;
tf->evt_phi[evt][nroads][nhits] = phi;
nhits = ++tf->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) tf->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) tf->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1[i];
int amroad = out2[i];
tf->evt_cable_sect[evt][nroads] = sector;
tf->evt_sect[evt][nroads] = sector;
tf->evt_road[evt][nroads] = amroad;
tf->evt_err_sum[evt] |= tf->evt_err[evt][nroads];
nroads = ++tf->evt_nroads[evt];
if (nroads > MAXROAD) {
printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
printf("You reached that limit evt->nroads = %d\n",nroads);
}
for (id = 0; id <= XFT_LYR; id++)
tf->evt_nhits[evt][nroads][id] = 0;
tf->evt_err[evt][nroads] = 0;
tf->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
int ee_word = out1[i];
tf->evt_ee_word[evt] = ee_word;
tf->totEvts++;
evt++;
id = -1; id_last = -1;
} else {
printf("Error INV_DATA_BIT: layer = %u\n", id);
tf->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
} //end loop on input words
CTSTOP("fill_CPU");
}
///////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kTestKernel_tf(tf_arrays_t tf)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
tf->gf_emsk = 1; // test - stp
}
void launchTestKernel_tf(tf_arrays_t tf, unsigned int *data_in, int n_words)
{
printf("sizeof(tf_arrays) = %u\n", sizeof(struct tf_arrays));
printf("sizeof(tf) = %u\n", sizeof(tf));
//printf("%d\n", tf);
//printf("before kernel: %u\n", tf->dummy);
printf("before kernel: %u\n", tf->gf_emsk);
long tfSize = sizeof(struct tf_arrays);
// Allocate device tf array
tf_arrays_t d_tf;
hipMalloc((void **)&d_tf, tfSize);
// Copy tf to the Device
hipMemcpy(d_tf, tf, tfSize, hipMemcpyHostToDevice);
// Kernel
hipLaunchKernelGGL(( kTestKernel_tf), dim3(n_words), dim3(1), 0, 0, d_tf);
// Copy tf to the Host
hipMemcpy(tf, d_tf, tfSize, hipMemcpyDeviceToHost);
printf("after kernel: %u\n", tf->gf_emsk);
}
///////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kFepComb(unsigned int *data_out, unsigned int *data_in)
{
/*
This function calculates all the combinations of hits given a certain road.
For each road we can have multiple hits per layer.
the input of this function is the set of "evt_" arrays, the output is:
int fep_ncmb[NEVTS][MAXROAD];
int fep_hit[NEVTS][MAXROAD][MAXCOMB][NSVX_PLANE];
int fep_phi[NEVTS][MAXROAD][MAXCOMB];
int fep_crv[NEVTS][MAXROAD][MAXCOMB];
int fep_lcl[NEVTS][MAXROAD][MAXCOMB];
int fep_lclforcut[NEVTS][MAXROAD][MAXCOMB];
int fep_hitmap[NEVTS][MAXROAD][MAXCOMB];
int fep_zid[NEVTS][MAXROAD];
int fep_road[NEVTS][MAXROAD];
int fep_sect[NEVTS][MAXROAD];
int fep_cable_sect[NEVTS][MAXROAD];
int fep_err[NEVTS][MAXROAD][MAXCOMB][MAXCOMB5H];
int fep_crv_sign[NEVTS][MAXROAD][MAXCOMB];
int fep_ncomb5h[NEVTS][MAXROAD][MAXCOMB];
int fep_hitZ[NEVTS][MAXROAD][MAXCOMB][NSVX_PLANE];
int fep_nroads[NEVTS];
int fep_ee_word[NEVTS];
int fep_err_sum[NEVTS];
*/
}
void launchFepComb(unsigned int *data_res, unsigned int *data_in)
{
hipLaunchKernelGGL(( kFepComb) , dim3(N_BLOCKS), dim3(N_THREADS_PER_BLOCK), 0, 0, data_res, data_in);
}
/////////////////////////////////////////////////////////////////////////////////
__global__ void
kFit(int *fit_fit_dev, int *fep_ncmb_dev)
{
/*
This function, for each road, for each combination:
- retrieves the correct constant set, based on
tf->fep_hitmap[ie][ir][ic], tf->fep_lcl[ie][ir][ic], tf->fep_zid[ie][ir]
- performs the scalar product.
It handles the 5/5 tracks as well (for each 5/5 track, 5 4/5 fits are run, and only the
best is kept).
The inputs of this function are the fep arrays, the output are:
long long int fit_fit[NEVTS][6][MAXROAD][MAXCOMB][MAXCOMB5H];
int fit_err[NEVTS][MAXROAD][MAXCOMB][MAXCOMB5H];
int fit_err_sum[NEVTS];
All the arrays needed by the function (constants, etc..) need to be stored on
memory easily accessible by the GPU.
*/
/*
int ir, ic, ip, ih, ihit, il, i;
int rc = 1;
int hit[SVTNHITS];
long long int coeff[NFITTER][SVTNHITS];
int coe_addr, int_addr; // Address for coefficients and intercept
int mka_addr; // Address for MKADDR memory
long long int theintcp = 0;
int sign_crv = 0;
int which, lwhich;
int iz;
int newhitmap;
int g = 0;
int p0[6], ix[7];
int ie;
// struct fep_out *fep;
//struct fit_out *trk;
int map[7][7] = {
{ 0, 1, 2, 3, -1, 4, 5 }, // 01235
{ 0, 1, 2, -1, 3, 4, 5 }, // 01245
{ 0, 1, -1, 2, 3, 4, 5 }, // 01345
{ 0, -1, 1, 2, 3, 4, 5 }, // 02345
{ -1, 0, 1, 2, 3, 4, 5 }, // 12345
{ 0, 1, 2, 3, -1, 4, 5 }, // (??)
{ 0, 1, 2, 3, -1, 4, 5 } // (??)
};
*/
/* --------- Executable starts here ------------ */
//the following are just test...
// ie =blockIdx.x;
// if(ie != 0) ie = ie + (MAXROAD-1);
// for(ir = 0; ir < MAXROAD; ir++) {
// i = ie+ir;
// fit_fit_dev[i] = fep_ncmb_dev[i];
// }
//
// fit_fit_dev[ie] = fep_nroads_dev[ie];
//
// int x = blockIdx.x;
//
// i=0;
// for(ir = 0; ir < 100; ir++) {
// i = blockIdx.x + 100*ir;
//
// fit_fit_dev[i] = fep_ncmb_dev[i];
// }
}
void launchFitKernel(int *fit_fit_dev, int *fep_ncmb_dev)
{
hipLaunchKernelGGL(( kFit) , dim3(NEVTS), dim3(1), 0, 0, fit_fit_dev, fep_ncmb_dev);
}
//////////////////////////////////////////////////////////
__global__ void kTestKernel(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y;
int i;
for (y = 0; y < ROWS; y++) {
i = x + (COLUMNS * y);
c[i] = a[i] + b[i];
}
}
void launchTestKernel(int *dev_a, int *dev_b, int *dev_c)
{
hipLaunchKernelGGL(( kTestKernel) , dim3(COLUMNS), dim3(1), 0, 0, dev_a, dev_b, dev_c);
}
| a0df9c709e3ee17f65b0a29c6487c32f6aeff191.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "main.h"
#include "svtsim_functions.h"
#include "functionkernel.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/scatter.h>
#include <thrust/functional.h>
#include <thrust/binary_search.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
// typedefs for shorthand
typedef thrust::tuple<unsigned int, unsigned int> DataPair;
// (layer, out1, out2, out3)
typedef thrust::tuple<unsigned int, unsigned int,
unsigned int, unsigned int> UnpackTuple;
typedef thrust::device_vector<unsigned int>::iterator IntIterator;
typedef thrust::tuple<IntIterator, IntIterator,
IntIterator, IntIterator> IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
struct Hitmap {
int hitmap[6];
};
typedef thrust::device_vector<Hitmap> HitmapVector;
#define COLUMNS 3 //var for testkernel
#define ROWS 2 //var for testkernel
#define N_BLOCKS 1
#define N_THREADS_PER_BLOCK 16
#define CUDA_TIMING
#define DEBUG
#define MAX(x,y) ((x)>(y) ? (x):(y))
// CUDA timer macros
cudaEvent_t c_start, c_stop;
inline void CTSTART() {
#ifdef CUDA_TIMING
cudaEventCreate(&c_start);
cudaEventCreate(&c_stop);
cudaEventRecord(c_start, 0);
#endif
}
inline void CTSTOP(const char *file) {
#ifdef CUDA_TIMING
cudaEventRecord(c_stop, 0);
cudaEventSynchronize(c_stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, c_start, c_stop);
elapsedTime *= 1000.0; // ms to us
char filePath[100];
sprintf(filePath, "timer/%s.txt", file);
FILE *outFile = fopen(filePath, "a");
if (outFile != NULL) {
fprintf(outFile, "%f\n", elapsedTime);
fclose(outFile);
} else {
printf("Warning: cannot open %s\n", filePath);
}
#endif
}
// CUDA variables
int *ids, *out1, *out2, *out3;
int *d_ids, *d_out1, *d_out2, *d_out3;
unsigned int *d_data_in;
long sizeW;
#define gf_mask_gpu(x) (d_gf_maskdata[(x)])
__constant__ int d_gf_maskdata[33] = {
0x00000000,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
0x0001ffff, 0x0003ffff, 0x0007ffff, 0x000fffff,
0x001fffff, 0x003fffff, 0x007fffff, 0x00ffffff,
0x01ffffff, 0x03ffffff, 0x07ffffff, 0x0fffffff,
0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff
};
void GPU_Init(int n_words)
{
sizeW = sizeof(int) * n_words;
ids = (int *)malloc(sizeW);
out1 = (int *)malloc(sizeW);
out2 = (int *)malloc(sizeW);
out3 = (int *)malloc(sizeW);
cudaMalloc((void **)&d_data_in, sizeW);
cudaMalloc((void **)&d_ids, sizeW);
cudaMalloc((void **)&d_out1, sizeW);
cudaMalloc((void **)&d_out2, sizeW);
cudaMalloc((void **)&d_out3, sizeW);
}
void GPU_Destroy()
{
free(ids);
free(out1);
free(out2);
free(out3);
cudaFree(d_data_in);
cudaFree(d_ids);
cudaFree(d_out1);
cudaFree(d_out2);
cudaFree(d_out3);
}
template <typename Vector>
void print(const char *title, const Vector &v)
{
std::cout << title << ": ";
for(size_t i = 0; i < v.size(); i++)
std::cout << v[i] << " ";
std::cout << "\n";
}
// unpacking function itself. unpack the passed-in int to the tuple.
struct unpacker : public thrust::unary_function<DataPair, UnpackTuple> {
/* parallel word_decode kernel.
each word is decoded and layer (id) and output values are set.
we only use 3 output arrays since depending on the layer,
we only need 3 different values. this saves allocating/copying empty arrays
format (out1, out2, out3):
id < XFT_LYR: zid, lcl, hit
id == XFT_LYR: crv, crv_sign, phi
id == IP_LYR: sector, amroad, 0
id == EE_LYR: ee_word
*/
__host__ __device__
UnpackTuple operator()(DataPair t) {
unsigned int word = thrust::get<0>(t);
unsigned int prev_word = thrust::get<1>(t);
unsigned int val1 = 0, val2 = 0, val3 = 0;
int ee, ep, lyr;
lyr = -999; /* Any invalid numbers != 0-7 */
/*
if (word > gf_mask_gpu(SVT_WORD_WIDTH)) {
//printf("gf_iword_decode: Input data is larger than the maximum SVT word");
//return SVTSIM_GF_ERR;
return;
}
*/
/* check if this is a EP or EE word */
ee = (word >> SVT_EE_BIT) & gf_mask_gpu(1);
ep = (word >> SVT_EP_BIT) & gf_mask_gpu(1);
// check if this is the second XFT word
//int prev_word = (idx==0) ? 0 : words[idx-1];
bool xft = ((prev_word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH)) == XFT_LYR ? 1 : 0;
if (ee && ep) { /* End of Event word */
val1 = word; // ee_word
//*parity_in = (word >> SVT_PAR_BIT) & gf_mask_gpu(1);
lyr = EE_LYR;
} else if (ee) { /* only EE bit ON is error condition */
//*err |= (1 << UNKNOWN_ERR);
lyr = EE_LYR; /* We have to check */
} else if (ep) { /* End of Packet word */
lyr = EP_LYR;
val1 = 6; // sector
/* *sector = (word >> SVT_SECT_LSB) & gf_mask_gpu(SVT_SECT_WIDTH); */
val2 = word & gf_mask_gpu(AMROAD_WORD_WIDTH); // amroad
} else if (xft) { /* Second XFT word */
val1 = (word >> SVT_CRV_LSB) & gf_mask_gpu(SVT_CRV_WIDTH); // crv
val2 = (word >> (SVT_CRV_LSB + SVT_CRV_WIDTH)) & gf_mask_gpu(1); // crv_sign
val3 = word & gf_mask_gpu(SVT_PHI_WIDTH); // phi
lyr = XFT_LYR_2;
} else { /* SVX hits or the first XFT word */
lyr = (word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH);
if (lyr == XFT_LYRID) lyr = XFT_LYR; // probably don't need - stp
val1 = (word >> SVT_Z_LSB) & gf_mask_gpu(SVT_Z_WIDTH); // zid
val2 = (word >> SVT_LCLS_BIT) & gf_mask_gpu(1); // lcl
val3 = word & gf_mask_gpu(SVT_HIT_WIDTH); // hit
}
return thrust::make_tuple(lyr,val1,val2,val3);
}
};
struct isNewRoad : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
//return id == EP_LYR;
return id == EP_LYR || id == EE_LYR;
}
};
struct isNewHit : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
return id < XFT_LYR || id == XFT_LYR_2;
}
};
struct isNewEvt : public thrust::unary_function<unsigned int, bool> {
__host__ __device__ bool operator()(const unsigned int &id) {
return id == EE_LYR;
}
};
struct lhitToHitmap : public thrust::unary_function<DataPair, Hitmap> {
__host__ __device__ Hitmap operator()(const DataPair &t) {
int layer = thrust::get<0>(t);
int lhit = thrust::get<1>(t);
if (layer == XFT_LYR_2) layer = XFT_LYR;
Hitmap h;
for (int i=0; i<=XFT_LYR; i++)
h.hitmap[i] = layer == i ? lhit : 0;
return h;
}
};
struct tupleSecond {// : public thrust::unary_function<T, bool> {
template <typename T>
__host__ __device__ bool operator()(const T &t) {
return thrust::get<1>(t);
}
};
struct isEqualLayer : public thrust::binary_function<unsigned int, unsigned int, bool>
{
__host__ __device__ bool operator()(const unsigned int &a, const unsigned int &b) {
return a == b || ((a == XFT_LYR || a == XFT_LYR_2) && (b == XFT_LYR || b == XFT_LYR_2));
}
};
struct layerHitMultiply
{
template <typename T>
__host__ __device__ T operator()(const T &a, const T &b) {
//return a * (b>1 ? b:1);
return MAX(a,1) * MAX(b,1);
}
};
struct hitmapAccumulate
{
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) {
Hitmap r;
for (int i=0; i<=XFT_LYR; i++)
r.hitmap[i] = MAX(a.hitmap[i], b.hitmap[i]);
return r;
}
};
struct hitmapComb : public thrust::unary_function<DataPair,Hitmap>
{
Hitmap *d_hitmap;
hitmapComb(Hitmap *_hm) : d_hitmap(_hm) {} // constructor
template <typename T>
__host__ __device__ Hitmap operator()(const T& t) {
unsigned int road = thrust::get<0>(t);
unsigned int ic = thrust::get<1>(t) -1;
Hitmap hm = d_hitmap[road];
Hitmap r;
for (int i=0; i<=XFT_LYR; i++) {
int nh = hm.hitmap[i];
if (nh == 0) {
r.hitmap[i] = 0;
} else {
r.hitmap[i] = ic % nh + 1;
ic /= nh;
}
}
return r;
}
};
struct hitmapAbsoluteIndices : public thrust::unary_function<DataPair,Hitmap>
{
Hitmap *d_hitmap;
unsigned int *d_road_indices;
hitmapAbsoluteIndices(Hitmap *_hm, unsigned int *_ri) : d_hitmap(_hm), d_road_indices(_ri) {} // constructor
template <typename T>
__host__ __device__ Hitmap operator()(const T& t) {
unsigned int road = thrust::get<0>(t);
Hitmap hm_c = thrust::get<1>(t);
Hitmap hm = d_hitmap[road];
int offset = d_road_indices[road];
Hitmap r;
int ihits = 0;
for (int i=0; i<=XFT_LYR; i++) {
int ih = hm_c.hitmap[i] - 1;
if (i == XFT_LYR) ih += 1+ih; // to account for unused first XFT word
if (ih < 0) r.hitmap[i] = -1;
else r.hitmap[i] = offset + ihits + ih;
ihits += hm.hitmap[i];
}
return r;
}
};
// BinaryPredicate for the head flag segment representation
// equivalent to thrust::not2(thrust::project2nd<int,int>()));
template <typename HeadFlagType>
struct head_flag_predicate : public thrust::binary_function<HeadFlagType,HeadFlagType,bool>
{
__host__ __device__
bool operator()(HeadFlagType left, HeadFlagType right) const {
return !right;
}
};
struct fill_tf_gpu
{
tf_arrays_t tf; // pointer in device memory
fill_tf_gpu(tf_arrays_t _tf) : tf(_tf) {} // constructor
template <typename Tuple>
__device__ void operator()(Tuple t) {
unsigned int id = thrust::get<0>(t);
unsigned int id_next = thrust::get<1>(t);
unsigned int out1 = thrust::get<2>(t);
unsigned int out2 = thrust::get<3>(t);
unsigned int out3 = thrust::get<4>(t);
unsigned int evt = thrust::get<5>(t);
unsigned int road = thrust::get<6>(t);
unsigned int rhit = thrust::get<7>(t) -1;
unsigned int lhit = thrust::get<8>(t) -1;
// SVX Data
if (id < XFT_LYR) {
int zid = out1;
int lcl = out2;
int hit = out3;
tf->evt_hit[evt][road][id][lhit] = hit;
tf->evt_hitZ[evt][road][id][lhit] = zid;
tf->evt_lcl[evt][road][id][lhit] = lcl;
tf->evt_lclforcut[evt][road][id][lhit] = lcl;
tf->evt_layerZ[evt][road][id] = zid;
if (rhit == 0) {
atomicOr(&tf->evt_zid[evt][road], zid & gf_mask_gpu(GF_SUBZ_WIDTH));
} else if (id_next == XFT_LYR) {
atomicOr(&tf->evt_zid[evt][road], (zid & gf_mask_gpu(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH);
}
//tf->evt_nhits[evt][road][id]++;
atomicAdd(&tf->evt_nhits[evt][road][id], 1);
// Error Checking
if (lhit == MAX_HIT) tf->evt_err[evt][road] |= (1 << OFLOW_HIT_BIT);
//if (id < id_last) tf->evt_err[evt][road] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR) {
// we ignore but leave here to not trigger 'else' case - stp
} else if (id == XFT_LYR_2) {
id = XFT_LYR; // for XFT_LYR_2 kludge - stp
int crv = out1;
int crv_sign = out2;
int phi = out3;
tf->evt_crv[evt][road][lhit] = crv;
tf->evt_crv_sign[evt][road][lhit] = crv_sign;
tf->evt_phi[evt][road][lhit] = phi;
//tf->evt_nhits[evt][road][id]++;
atomicAdd(&tf->evt_nhits[evt][road][id], 1);
// Error Checking
if (lhit == MAX_HIT) tf->evt_err[evt][road] |= (1 << OFLOW_HIT_BIT);
//if (id < id_last) tf->evt_err[evt][road] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1;
int amroad = out2;
tf->evt_cable_sect[evt][road] = sector;
tf->evt_sect[evt][road] = sector;
tf->evt_road[evt][road] = amroad;
tf->evt_err_sum[evt] |= tf->evt_err[evt][road];
//tf->evt_nroads[evt]++;
atomicAdd(&tf->evt_nroads[evt], 1);
if (road > MAXROAD) {
;
//printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
//printf("You reached that limit evt->road = %d\n",road);
}
//for (id = 0; id <= XFT_LYR; id++)
// tf->evt_nhits[evt][road][id] = 0;
//tf->evt_err[evt][road] = 0;
//tf->evt_zid[evt][road] = -1;
} else if (id == EE_LYR) {
int ee_word = out1;
tf->evt_ee_word[evt] = ee_word;
//tf->totEvts++;
atomicAdd(&tf->totEvts, 1);
} else {
//printf("Error INV_DATA_BIT: layer = %u\n", id);
tf->evt_err[evt][road] |= (1 << INV_DATA_BIT);
}
}
};
__global__ void
k_word_decode(int N, unsigned int *words, int *ids, int *out1, int *out2, int *out3)
{
/* parallel word_decode kernel.
each word is decoded and layer (id) and output values are set.
we only use 3 output arrays since depending on the layer,
we only need 3 different values. this saves allocating/copying empty arrays
format (out1, out2, out3):
id < XFT_LYR: zid, lcl, hit
id == XFT_LYR: crv, crv_sign, phi
id == IP_LYR: sector, amroad, 0
id == EE_LYR: ee_word
*/
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) return;
//ids[idx] = idx; return;
int word = words[idx];
int ee, ep, lyr;
lyr = -999; /* Any invalid numbers != 0-7 */
out1[idx] = 0;
out2[idx] = 0;
out3[idx] = 0;
if (word > gf_mask_gpu(SVT_WORD_WIDTH)) {
//printf("gf_iword_decode: Input data is larger than the maximum SVT word");
//return SVTSIM_GF_ERR;
ids[idx] = lyr;
return;
}
/* check if this is a EP or EE word */
ee = (word >> SVT_EE_BIT) & gf_mask_gpu(1);
ep = (word >> SVT_EP_BIT) & gf_mask_gpu(1);
// check if this is the second XFT word
int prev_word = (idx==0) ? 0 : words[idx-1];
bool xft = ((prev_word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH)) == XFT_LYR ? 1 : 0;
if (ee && ep) { /* End of Event word */
out1[idx] = word; // ee_word
//*parity_in = (word >> SVT_PAR_BIT) & gf_mask_gpu(1);
lyr = EE_LYR;
} else if (ee) { /* only EE bit ON is error condition */
//*err |= (1 << UNKNOWN_ERR);
lyr = EE_LYR; /* We have to check */
} else if (ep) { /* End of Packet word */
lyr = EP_LYR;
out1[idx] = 6; // sector
/* *sector = (word >> SVT_SECT_LSB) & gf_mask_gpu(SVT_SECT_WIDTH); */
out2[idx] = word & gf_mask_gpu(AMROAD_WORD_WIDTH); // amroad
} else if (xft) { /* Second XFT word */
out1[idx] = (word >> SVT_CRV_LSB) & gf_mask_gpu(SVT_CRV_WIDTH); // crv
out2[idx] = (word >> (SVT_CRV_LSB + SVT_CRV_WIDTH)) & gf_mask_gpu(1); // crv_sign
out3[idx] = word & gf_mask_gpu(SVT_PHI_WIDTH); // phi
lyr = XFT_LYR_2;
} else { /* SVX hits or the first XFT word */
lyr = (word >> SVT_LYR_LSB) & gf_mask_gpu(SVT_LYR_WIDTH);
if (lyr == XFT_LYRID) lyr = XFT_LYR; // probably don't need - stp
out1[idx] = (word >> SVT_Z_LSB) & gf_mask_gpu(SVT_Z_WIDTH); // zid
out2[idx] = (word >> SVT_LCLS_BIT) & gf_mask_gpu(1); // lcl
out3[idx] = word & gf_mask_gpu(SVT_HIT_WIDTH); // hit
}
ids[idx] = lyr;
}
void scan_threads_per_block_fep(int n_words, unsigned int *words, int *ids, int *out1, int *out2, int *out3)
{
cudaEvent_t c_start, c_stop;
cudaEventCreate(&c_start);
cudaEventCreate(&c_stop);
int step = 2; //64;
int n_threads_max;
float elapsedTime, totalTime = 0;
int i;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
n_threads_max = deviceProp.maxThreadsPerBlock;
printf("n_threads_max = %d\n", n_threads_max);
// call once without timing to be sure GPU is initialized
i = n_threads_max;
k_word_decode <<<(n_words+i-1)/i, i>>>
(n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
for (i=1; i<n_threads_max; i+=step) {
int j, n_iter = 10;
totalTime = 0;
for (j = 0; j < n_iter; j++) {
cudaEventRecord(c_start, 0);
k_word_decode <<<(n_words+i-1)/i, i>>>
(n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
cudaEventRecord(c_stop, 0);
cudaEventSynchronize(c_stop);
cudaEventElapsedTime(&elapsedTime, c_start, c_stop);
elapsedTime *= 1000.0; // ms to us
totalTime += elapsedTime;
}
elapsedTime = totalTime / n_iter;
FILE *outFile = fopen("threads_scan.txt", "a");
fprintf(outFile, "%d\t%f\n", i, elapsedTime);
fclose(outFile);
if (i==1) i = 0;
}
}
void launchFepUnpackKernel(tf_arrays_t tf, unsigned int *data_in, int n_words)
{
/* initializing arrays */
int ie, id;
tf->totEvts = 0;
for (ie = 0; ie < NEVTS; ie++) {
tf->evt_nroads[ie] = 0;
tf->evt_err_sum[ie] = 0;
for (id = 0; id <= NSVX_PLANE; id++) {
tf->evt_layerZ[ie][tf->evt_nroads[ie]][id] = 0;
}
for (id = 0; id <= XFT_LYR; id++) {
tf->evt_nhits[ie][tf->evt_nroads[ie]][id] = 0;
}
tf->evt_err[ie][tf->evt_nroads[ie]] = 0;
//tf->evt_zid[ie][tf->evt_nroads[ie]] = -1;
tf->evt_zid[ie][tf->evt_nroads[ie]] = 0; // we need to or these - stp
// printf("tf->evt_nroads[%d] = %d, tf->evt_zid[%d][tf->evt_nroads[%d]] = %d\n", ie, tf->evt_nroads[ie], ie, ie, tf->evt_zid[ie][tf->evt_nroads[ie]]);
//}
}
CTSTART();
// Copy data to the Device
cudaMemcpy(d_data_in, data_in, sizeW, cudaMemcpyHostToDevice);
CTSTOP("copyWordsToDevice_CUDA");
//scan_threads_per_block_fep(n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
CTSTART();
k_word_decode <<<(n_words+N_THREADS_PER_BLOCK-1)/N_THREADS_PER_BLOCK, N_THREADS_PER_BLOCK>>>
(n_words, d_data_in, d_ids, d_out1, d_out2, d_out3);
CTSTOP("k_word_decode");
// Copy output to the Host
CTSTART();
cudaMemcpy(ids, d_ids, sizeW, cudaMemcpyDeviceToHost);
cudaMemcpy(out1, d_out1, sizeW, cudaMemcpyDeviceToHost);
cudaMemcpy(out2, d_out2, sizeW, cudaMemcpyDeviceToHost);
cudaMemcpy(out3, d_out3, sizeW, cudaMemcpyDeviceToHost);
CTSTOP("k_word_decode_copyToHost");
//////////////////// also do with THRUST
// input data
thrust::device_vector<unsigned int> d_vec(n_words); // this would be done in the GPU_Init()
CTSTART();
thrust::copy(data_in, data_in + n_words, d_vec.begin());
CTSTOP("copyWordsToDevice_thrust");
// output vectors
thrust::device_vector<unsigned int> d_idt(n_words);
thrust::device_vector<unsigned int> d_out1t(n_words);
thrust::device_vector<unsigned int> d_out2t(n_words);
thrust::device_vector<unsigned int> d_out3t(n_words);
// unpack
CTSTART();
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_vec.begin(), d_vec.begin()-1)),
thrust::make_zip_iterator(thrust::make_tuple(d_vec.end(), d_vec.end()-1)),
thrust::make_zip_iterator(thrust::make_tuple(d_idt.begin(), d_out1t.begin(),
d_out2t.begin(), d_out3t.begin())),
unpacker());
CTSTOP("thrust_unpacker");
// copy to CPU for verification
thrust::host_vector<unsigned int> h_test0 = d_idt;
thrust::host_vector<unsigned int> h_test1 = d_out1t;
thrust::host_vector<unsigned int> h_test2 = d_out2t;
thrust::host_vector<unsigned int> h_test3 = d_out3t;
/*
int ndiff = 0;
for (int i=0; i<n_words; i++) {
if (h_test0[i] != ids[i]) ndiff++;
if (h_test1[i] != out1[i]) ndiff++;
if (h_test2[i] != out2[i]) ndiff++;
if (h_test3[i] != out3[i]) ndiff++;
}
printf("ndiff = %d\n", ndiff);
printf("nmatch = %d\n", 4*n_words - ndiff);
*/
//// fill nevt, nroad, nhit arrays
//// want to restart counting according to evt > road > layer > hit
thrust::device_vector<unsigned int> d_evt(n_words);
thrust::device_vector<unsigned int> d_road(n_words);
thrust::device_vector<unsigned int> d_rhit(n_words);
thrust::device_vector<unsigned int> d_lhit(n_words);
CTSTART();
thrust::transform(d_idt.begin(), d_idt.end(), d_road.begin(), isNewRoad());
CTSTOP("scans_singleTransform");
CTSTART();
thrust::exclusive_scan(
thrust::make_transform_iterator(d_idt.begin(), isNewEvt()),
thrust::make_transform_iterator(d_idt.end(), isNewEvt()),
d_evt.begin());
CTSTOP("scans_exclusive_scan");
CTSTART();
thrust::exclusive_scan_by_key(
d_evt.begin(), d_evt.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewRoad()), // vals
d_road.begin());
CTSTOP("scans_exclusive_scan_by_key");
CTSTART();
thrust::inclusive_scan_by_key(
d_road.begin(), d_road.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_rhit.begin());
CTSTOP("scans_inclusive_scan_by_key_rhit");
CTSTART();
thrust::inclusive_scan_by_key(
d_idt.begin(), d_idt.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_lhit.begin(),
isEqualLayer()); // binary predicate
CTSTOP("scans_inclusive_scan_by_key_lhit");
//// alternate method of segmenting based on flags instead of scans
thrust::device_vector<unsigned int> d_evt_flag(n_words);
thrust::device_vector<unsigned int> d_road_flag(n_words);
//thrust::device_vector<unsigned int> d_rhit_flag(n_words);
//thrust::device_vector<unsigned int> d_lhit_flag(n_words);
thrust::transform(d_idt.begin(), d_idt.end(), d_evt_flag.begin(), isNewEvt());
thrust::transform(d_idt.begin(), d_idt.end(), d_road_flag.begin(), isNewRoad());
CTSTART();
// can do key-based operations on flags instead of scans
thrust::inclusive_scan_by_key(
d_road_flag.begin(), d_road_flag.end(), // keys
thrust::make_transform_iterator(d_idt.begin(), isNewHit()), //vals
d_rhit.begin(),
head_flag_predicate<unsigned int>());
CTSTOP("scan_inclusive_scan_by_key_rhit_flags");
//// calculate number of combinations per road
// for the size of these, only need n_roads, but might be slower(?) to wait for
// that result to come back to CPU
thrust::device_vector<unsigned int> d_roadKey(n_words); // will soon only take up n_roads
thrust::device_vector<unsigned int> d_ncomb(n_words); // will soon only take up n_roads
CTSTART();
size_t n_roads = thrust::reduce_by_key(
d_road.begin(), d_road.end(), // keys
d_lhit.begin(), // vals
d_roadKey.begin(), // keys output
d_ncomb.begin(), // vals output
thrust::equal_to<int>(), // binary predicate
layerHitMultiply() // binary operator
).first - d_roadKey.begin(); // new output size
CTSTOP("reduce_by_key");
#ifdef DEBUG
for (int i=0; i<n_words; i++) {
unsigned int evt = d_evt[i];
unsigned int road = d_road[i];
unsigned int rhit = d_rhit[i];
unsigned int lhit = d_lhit[i];
printf("%.6x\tevt = %d\troad = %d\trhit = %d\tlayer = %d\tlhit = %d\tout=(%.6x,%.6x,%.6x)\n", data_in[i], evt, road, rhit, h_test0[i], lhit, h_test1[i], h_test2[i], h_test3[i]);
}
#endif
#ifdef DEBUG
for (int i=0; i<n_roads; i++) {
unsigned int road = d_roadKey[i];
unsigned int ncomb = d_ncomb[i];
printf("road %d has %d combinations\n", road, ncomb);
}
#endif
// get global road offset indices
CTSTART();
thrust::device_vector<unsigned int> d_road_indices(n_roads);
thrust::copy_if(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(1), d_road_flag.begin())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(n_words+1), d_road_flag.end())),
thrust::make_zip_iterator(thrust::make_tuple(d_road_indices.begin(), thrust::constant_iterator<int>(0))),
tupleSecond());
CTSTOP("road_indices");
#ifdef DEBUG
print("road_indices", d_road_indices);
#endif
CTSTART();
thrust::device_vector<unsigned int> d_ncomb_scan(n_roads);
thrust::inclusive_scan(d_ncomb.begin(), d_ncomb.begin() + n_roads, d_ncomb_scan.begin());
//unsigned int n_combs = thrust::reduce(d_ncomb.begin(), d_ncomb.end());
unsigned int n_combs = d_ncomb_scan.back();
#ifdef DEBUG
printf("total combinations: %d\n", n_combs);
#endif
// get the combination indices. might be able to do this better with an iterator, like
// https://github.com/thrust/thrust/blob/master/examples/repeated_range.cu
thrust::device_vector<unsigned int> d_indices(n_combs);
thrust::lower_bound(d_ncomb_scan.begin(), d_ncomb_scan.end(),
thrust::counting_iterator<unsigned int>(1),
thrust::counting_iterator<unsigned int>(n_combs + 1),
d_indices.begin());
thrust::device_vector<unsigned int> d_indices_road(d_indices);
#ifdef DEBUG
print("indices_road", d_indices_road);
#endif
/* // can also do with a scan but will take longer
thrust::inclusive_scan_by_key(
d_indices.begin(), d_indices.end(),
thrust::constant_iterator<int>(1),
d_indices.begin());
*/
thrust::gather(d_indices.begin(), d_indices.end(),
d_ncomb_scan.begin(),
d_indices.begin());
thrust::transform(d_indices.begin(), d_indices.end(),
thrust::constant_iterator<int>(*d_ncomb_scan.begin()),
d_indices.begin(),
thrust::minus<int>());
thrust::transform(thrust::counting_iterator<int>(1),
thrust::counting_iterator<int>(n_combs + 1),
d_indices.begin(),
d_indices.begin(),
thrust::minus<int>());
CTSTOP("indices");
#ifdef DEBUG
print("ncomb_scan", d_ncomb_scan);
#endif
#ifdef DEBUG
printf("indices: ");
for (int i=0; i<n_combs; i++) {
unsigned int index = d_indices[i];
printf("%d ", index);
}
printf("\n");
#endif
CTSTART();
HitmapVector d_hitmap(n_roads);
// faster way would be to copy_if (layer,lhit) according to the isNewLayer flag to grab the last lhit
// then reduce_by_key(road) and write into tuple (no collision -> no MAX() needed)
thrust::reduce_by_key(
d_road.begin(), d_road.end(), // keys
thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(d_idt.begin(), d_lhit.begin())),
lhitToHitmap()), // vals
d_roadKey.begin(), // keys output
d_hitmap.begin(), // vals output
thrust::equal_to<int>(), // binary predicate
hitmapAccumulate()); // binary operator
CTSTOP("hitmaps");
#ifdef DEBUG
for (int i=0; i<n_roads; i++) {
Hitmap t = d_hitmap[i];
printf("road = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", i,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
// get combination hitmaps
CTSTART();
HitmapVector d_hitmap_combs(n_combs);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.begin(), d_indices.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.end(), d_indices.end())),
d_hitmap_combs.begin(),
hitmapComb(thrust::raw_pointer_cast(&d_hitmap[0])));
CTSTOP("hitmapComb");
#ifdef DEBUG
for (int i=0; i<n_combs; i++) {
unsigned int road = d_indices_road[i];
uint comb = d_indices[i];
Hitmap t = d_hitmap_combs[i];
printf("road = %d, comb = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", road, comb,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
// get absolute hit indices in the word data list
CTSTART();
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.begin(), d_hitmap_combs.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_indices_road.end(), d_hitmap_combs.end())),
d_hitmap_combs.begin(),
hitmapAbsoluteIndices(
thrust::raw_pointer_cast(&d_hitmap[0]),
thrust::raw_pointer_cast(&d_road_indices[0])));
CTSTOP("hitmapCombAbs");
#ifdef DEBUG
printf("\nabsolute combinations:\n");
for (int i=0; i<n_combs; i++) {
unsigned int road = d_indices_road[i];
uint comb = d_indices[i];
Hitmap t = d_hitmap_combs[i];
printf("road = %d, comb = %d, hitmap = (%d, %d, %d, %d, %d, %d)\n", road, comb,
t.hitmap[0], t.hitmap[1], t.hitmap[2],
t.hitmap[3], t.hitmap[4], t.hitmap[5]);
}
#endif
///////////////// fill tf on GPU
// Copy tf to the Device
long tfSize = sizeof(struct tf_arrays);
tf_arrays_t d_tf;
cudaMalloc((void **)&d_tf, tfSize);
cudaMemcpy(d_tf, tf, tfSize, cudaMemcpyHostToDevice);
CTSTART();
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
d_idt.begin(), d_idt.begin()+1, d_out1t.begin(), d_out2t.begin(), d_out3t.begin(),
d_evt.begin(), d_road.begin(), d_rhit.begin(), d_lhit.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_idt.end(), d_idt.end()+1, d_out1t.end(), d_out2t.end(), d_out3t.end(),
d_evt.end(), d_road.end(), d_rhit.end(), d_lhit.end())),
fill_tf_gpu(d_tf));
CTSTOP("thrust_fill");
/*
// Copy tf to the Host
CTSTART();
cudaMemcpy(tf, d_tf, tfSize, cudaMemcpyDeviceToHost);
CTSTOP("copyTFtoHost");
// for informational purposes
CTSTART();
cudaMemcpy(d_tf, tf, tfSize, cudaMemcpyHostToDevice);
CTSTOP("copyTFtoDevice");
*/
cudaFree(d_tf);
///////////////// now fill tf (gf_fep_unpack)
for (ie = 0; ie < NEVTS; ie++) {
tf->evt_zid[ie][tf->evt_nroads[ie]] = -1; // because we set it to 0 for GPU version
}
CTSTART();
int id_last = -1;
int evt = EVT;
unsigned int *data = (unsigned int *) data_in;
for (int i = 0; i < n_words; i++) {
id = ids[i];
bool gf_xft = 0;
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = tf->evt_nroads[evt];
int nhits = tf->evt_nhits[evt][nroads][id];
// SVX Data
if (id < XFT_LYR) {
int zid = out1[i];
int lcl = out2[i];
int hit = out3[i];
tf->evt_hit[evt][nroads][id][nhits] = hit;
tf->evt_hitZ[evt][nroads][id][nhits] = zid;
tf->evt_lcl[evt][nroads][id][nhits] = lcl;
tf->evt_lclforcut[evt][nroads][id][nhits] = lcl;
tf->evt_layerZ[evt][nroads][id] = zid;
if (tf->evt_zid[evt][nroads] == -1) {
tf->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
tf->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (tf->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++tf->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) tf->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) tf->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = out1[i];
int crv_sign = out2[i];
int phi = out3[i];
tf->evt_crv[evt][nroads][nhits] = crv;
tf->evt_crv_sign[evt][nroads][nhits] = crv_sign;
tf->evt_phi[evt][nroads][nhits] = phi;
nhits = ++tf->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) tf->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) tf->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1[i];
int amroad = out2[i];
tf->evt_cable_sect[evt][nroads] = sector;
tf->evt_sect[evt][nroads] = sector;
tf->evt_road[evt][nroads] = amroad;
tf->evt_err_sum[evt] |= tf->evt_err[evt][nroads];
nroads = ++tf->evt_nroads[evt];
if (nroads > MAXROAD) {
printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
printf("You reached that limit evt->nroads = %d\n",nroads);
}
for (id = 0; id <= XFT_LYR; id++)
tf->evt_nhits[evt][nroads][id] = 0;
tf->evt_err[evt][nroads] = 0;
tf->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
int ee_word = out1[i];
tf->evt_ee_word[evt] = ee_word;
tf->totEvts++;
evt++;
id = -1; id_last = -1;
} else {
printf("Error INV_DATA_BIT: layer = %u\n", id);
tf->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
} //end loop on input words
CTSTOP("fill_CPU");
}
///////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kTestKernel_tf(tf_arrays_t tf)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
tf->gf_emsk = 1; // test - stp
}
void launchTestKernel_tf(tf_arrays_t tf, unsigned int *data_in, int n_words)
{
printf("sizeof(tf_arrays) = %u\n", sizeof(struct tf_arrays));
printf("sizeof(tf) = %u\n", sizeof(tf));
//printf("%d\n", tf);
//printf("before kernel: %u\n", tf->dummy);
printf("before kernel: %u\n", tf->gf_emsk);
long tfSize = sizeof(struct tf_arrays);
// Allocate device tf array
tf_arrays_t d_tf;
cudaMalloc((void **)&d_tf, tfSize);
// Copy tf to the Device
cudaMemcpy(d_tf, tf, tfSize, cudaMemcpyHostToDevice);
// Kernel
kTestKernel_tf<<<n_words, 1>>>(d_tf);
// Copy tf to the Host
cudaMemcpy(tf, d_tf, tfSize, cudaMemcpyDeviceToHost);
printf("after kernel: %u\n", tf->gf_emsk);
}
///////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kFepComb(unsigned int *data_out, unsigned int *data_in)
{
/*
This function calculates all the combinations of hits given a certain road.
For each road we can have multiple hits per layer.
the input of this function is the set of "evt_" arrays, the output is:
int fep_ncmb[NEVTS][MAXROAD];
int fep_hit[NEVTS][MAXROAD][MAXCOMB][NSVX_PLANE];
int fep_phi[NEVTS][MAXROAD][MAXCOMB];
int fep_crv[NEVTS][MAXROAD][MAXCOMB];
int fep_lcl[NEVTS][MAXROAD][MAXCOMB];
int fep_lclforcut[NEVTS][MAXROAD][MAXCOMB];
int fep_hitmap[NEVTS][MAXROAD][MAXCOMB];
int fep_zid[NEVTS][MAXROAD];
int fep_road[NEVTS][MAXROAD];
int fep_sect[NEVTS][MAXROAD];
int fep_cable_sect[NEVTS][MAXROAD];
int fep_err[NEVTS][MAXROAD][MAXCOMB][MAXCOMB5H];
int fep_crv_sign[NEVTS][MAXROAD][MAXCOMB];
int fep_ncomb5h[NEVTS][MAXROAD][MAXCOMB];
int fep_hitZ[NEVTS][MAXROAD][MAXCOMB][NSVX_PLANE];
int fep_nroads[NEVTS];
int fep_ee_word[NEVTS];
int fep_err_sum[NEVTS];
*/
}
void launchFepComb(unsigned int *data_res, unsigned int *data_in)
{
kFepComb <<< N_BLOCKS, N_THREADS_PER_BLOCK>>>(data_res, data_in);
}
/////////////////////////////////////////////////////////////////////////////////
__global__ void
kFit(int *fit_fit_dev, int *fep_ncmb_dev)
{
/*
This function, for each road, for each combination:
- retrieves the correct constant set, based on
tf->fep_hitmap[ie][ir][ic], tf->fep_lcl[ie][ir][ic], tf->fep_zid[ie][ir]
- performs the scalar product.
It handles the 5/5 tracks as well (for each 5/5 track, 5 4/5 fits are run, and only the
best is kept).
The inputs of this function are the fep arrays, the output are:
long long int fit_fit[NEVTS][6][MAXROAD][MAXCOMB][MAXCOMB5H];
int fit_err[NEVTS][MAXROAD][MAXCOMB][MAXCOMB5H];
int fit_err_sum[NEVTS];
All the arrays needed by the function (constants, etc..) need to be stored on
memory easily accessible by the GPU.
*/
/*
int ir, ic, ip, ih, ihit, il, i;
int rc = 1;
int hit[SVTNHITS];
long long int coeff[NFITTER][SVTNHITS];
int coe_addr, int_addr; // Address for coefficients and intercept
int mka_addr; // Address for MKADDR memory
long long int theintcp = 0;
int sign_crv = 0;
int which, lwhich;
int iz;
int newhitmap;
int g = 0;
int p0[6], ix[7];
int ie;
// struct fep_out *fep;
//struct fit_out *trk;
int map[7][7] = {
{ 0, 1, 2, 3, -1, 4, 5 }, // 01235
{ 0, 1, 2, -1, 3, 4, 5 }, // 01245
{ 0, 1, -1, 2, 3, 4, 5 }, // 01345
{ 0, -1, 1, 2, 3, 4, 5 }, // 02345
{ -1, 0, 1, 2, 3, 4, 5 }, // 12345
{ 0, 1, 2, 3, -1, 4, 5 }, // (??)
{ 0, 1, 2, 3, -1, 4, 5 } // (??)
};
*/
/* --------- Executable starts here ------------ */
//the following are just test...
// ie =blockIdx.x;
// if(ie != 0) ie = ie + (MAXROAD-1);
// for(ir = 0; ir < MAXROAD; ir++) {
// i = ie+ir;
// fit_fit_dev[i] = fep_ncmb_dev[i];
// }
//
// fit_fit_dev[ie] = fep_nroads_dev[ie];
//
// int x = blockIdx.x;
//
// i=0;
// for(ir = 0; ir < 100; ir++) {
// i = blockIdx.x + 100*ir;
//
// fit_fit_dev[i] = fep_ncmb_dev[i];
// }
}
void launchFitKernel(int *fit_fit_dev, int *fep_ncmb_dev)
{
kFit <<< NEVTS, 1>>>(fit_fit_dev, fep_ncmb_dev);
}
//////////////////////////////////////////////////////////
__global__ void kTestKernel(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y;
int i;
for (y = 0; y < ROWS; y++) {
i = x + (COLUMNS * y);
c[i] = a[i] + b[i];
}
}
void launchTestKernel(int *dev_a, int *dev_b, int *dev_c)
{
kTestKernel <<< COLUMNS, 1>>>(dev_a, dev_b, dev_c);
}
|
72f268284039ba28c809a1c764a13950dc8cdb48.hip | // !!! This is a file automatically generated by hipify!!!
#include "grid.cuh"
#include "cuda_utils.hpp"
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void gridLabelKernel(uint *dev_pt_ids, uint *dev_grid_labels,
float *dev_coords,
float min_x, float min_y, float side_len,
uint grid_x_size, int num) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < num; idx += blockDim.x) {
dev_pt_ids[idx] = idx;
uint x = (uint) ((dev_coords[2*idx] - min_x) / side_len);
uint y = (uint) ((dev_coords[2*idx+1] - min_y) / side_len);
uint label = y*grid_x_size + x;
dev_grid_labels[idx] = label;
}
}
__global__ void gridMarkCoreCells(uint *d_index_counts, uint unique_key_count,
uint *d_values, bool *isCore, uint min_points) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < unique_key_count; idx += blockDim.x) {
uint start = d_index_counts[2*idx];
uint length = d_index_counts[2*idx + 1];
if (length >= min_points) {
for (uint i = start; i < start + length; i++) {
isCore[d_values[i]] = true;
}
}
}
}
// Always called with one block since key_count <= 21
__global__ void gridCheckCore(float *dev_coords, uint *d_index_counts,
uint key_count, uint *d_values, bool *d_isCore,
uint min_points, float EPS_SQ, float x, float y,
int pt_idx) {
__shared__ int count;
if (threadIdx.x == 0)
count = 0;
__syncthreads();
uint start = d_index_counts[2*threadIdx.x];
uint length = d_index_counts[2*threadIdx.x+1];
for (uint i = start; i < start + length && count < min_points; i++) {
float x2 = dev_coords[d_values[i]*2];
float y2 = dev_coords[d_values[i]*2 + 1];
if ((x2 - x) * (x2 - x) + (y2 - y) * (y2 - y) <= EPS_SQ) {
atomicAdd(&count, 1);
}
}
__syncthreads();
if (threadIdx.x == 0 && count >= min_points) {
d_isCore[pt_idx] = true;
}
}
void callGridLabelKernel(uint blocks, uint threadsPerBlock,
uint *dev_pt_ids, uint *dev_grid_labels,
float *dev_coords,
float min_x, float min_y, float side_len,
uint grid_x_size, int num) {
hipLaunchKernelGGL(( gridLabelKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_pt_ids, dev_grid_labels,
dev_coords, min_x, min_y,
side_len, grid_x_size, num);
CUDA_KERNEL_CHECK();
}
void callGridMarkCoreCells(uint blocks, uint threadsPerBlock,
uint *d_index_counts, uint unique_key_count,
uint *d_values, bool *isCore, uint min_points) {
hipLaunchKernelGGL(( gridMarkCoreCells), dim3(blocks), dim3(threadsPerBlock), 0, 0,
d_index_counts, unique_key_count, d_values, isCore, min_points);
CUDA_KERNEL_CHECK();
}
void callGridCheckCore(float *dev_coords, uint *d_index_counts,
uint key_count, uint *d_values, bool *d_isCore,
uint min_points, float EPS_SQ, float x, float y,
int pt_idx) {
hipLaunchKernelGGL(( gridCheckCore), dim3(1), dim3(key_count), 0, 0, dev_coords, d_index_counts,
key_count, d_values, d_isCore, min_points,
EPS_SQ, x, y, pt_idx);
CUDA_KERNEL_CHECK();
}
| 72f268284039ba28c809a1c764a13950dc8cdb48.cu | #include "grid.cuh"
#include "cuda_utils.hpp"
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void gridLabelKernel(uint *dev_pt_ids, uint *dev_grid_labels,
float *dev_coords,
float min_x, float min_y, float side_len,
uint grid_x_size, int num) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < num; idx += blockDim.x) {
dev_pt_ids[idx] = idx;
uint x = (uint) ((dev_coords[2*idx] - min_x) / side_len);
uint y = (uint) ((dev_coords[2*idx+1] - min_y) / side_len);
uint label = y*grid_x_size + x;
dev_grid_labels[idx] = label;
}
}
__global__ void gridMarkCoreCells(uint *d_index_counts, uint unique_key_count,
uint *d_values, bool *isCore, uint min_points) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < unique_key_count; idx += blockDim.x) {
uint start = d_index_counts[2*idx];
uint length = d_index_counts[2*idx + 1];
if (length >= min_points) {
for (uint i = start; i < start + length; i++) {
isCore[d_values[i]] = true;
}
}
}
}
// Always called with one block since key_count <= 21
__global__ void gridCheckCore(float *dev_coords, uint *d_index_counts,
uint key_count, uint *d_values, bool *d_isCore,
uint min_points, float EPS_SQ, float x, float y,
int pt_idx) {
__shared__ int count;
if (threadIdx.x == 0)
count = 0;
__syncthreads();
uint start = d_index_counts[2*threadIdx.x];
uint length = d_index_counts[2*threadIdx.x+1];
for (uint i = start; i < start + length && count < min_points; i++) {
float x2 = dev_coords[d_values[i]*2];
float y2 = dev_coords[d_values[i]*2 + 1];
if ((x2 - x) * (x2 - x) + (y2 - y) * (y2 - y) <= EPS_SQ) {
atomicAdd(&count, 1);
}
}
__syncthreads();
if (threadIdx.x == 0 && count >= min_points) {
d_isCore[pt_idx] = true;
}
}
void callGridLabelKernel(uint blocks, uint threadsPerBlock,
uint *dev_pt_ids, uint *dev_grid_labels,
float *dev_coords,
float min_x, float min_y, float side_len,
uint grid_x_size, int num) {
gridLabelKernel<<<blocks, threadsPerBlock>>>(dev_pt_ids, dev_grid_labels,
dev_coords, min_x, min_y,
side_len, grid_x_size, num);
CUDA_KERNEL_CHECK();
}
void callGridMarkCoreCells(uint blocks, uint threadsPerBlock,
uint *d_index_counts, uint unique_key_count,
uint *d_values, bool *isCore, uint min_points) {
gridMarkCoreCells<<<blocks, threadsPerBlock>>>(
d_index_counts, unique_key_count, d_values, isCore, min_points);
CUDA_KERNEL_CHECK();
}
void callGridCheckCore(float *dev_coords, uint *d_index_counts,
uint key_count, uint *d_values, bool *d_isCore,
uint min_points, float EPS_SQ, float x, float y,
int pt_idx) {
gridCheckCore<<<1, key_count>>>(dev_coords, d_index_counts,
key_count, d_values, d_isCore, min_points,
EPS_SQ, x, y, pt_idx);
CUDA_KERNEL_CHECK();
}
|
b49c187d0fa2d2922208f0be86f5868f63fca8f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <hip/hip_runtime.h>
#include "init/init.hpp"
#include "utils/utils.hpp"
#include "launch/args.hpp"
enum class CUDA_LAUNCH_IMPLEMENTATION : int { EMPTY = 1, ADDTWO, RELU };
static inline std::string CUDA_LAUNCH_IMPLEMENTATION_STRING(const CUDA_LAUNCH_IMPLEMENTATION impl) {
switch (impl) {
case CUDA_LAUNCH_IMPLEMENTATION::EMPTY:
return "EMPTY";
case CUDA_LAUNCH_IMPLEMENTATION::ADDTWO:
return "ADDTWO";
case CUDA_LAUNCH_IMPLEMENTATION::RELU:
return "RELU";
default:
return "UNDEFINED";
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_empty_kernel(T *vec, size_t len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_add_two_kernel(T *vec, size_t len) {
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (index < len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
vec[index] += 2;
}
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_relu_kernel(T *vec, size_t len) {
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const T zero{0};
if (index < len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
vec[index] = vec[index] > zero ? vec[index] : zero;
}
}
}
template <CUDA_LAUNCH_IMPLEMENTATION IMPLEMENTATION, typename T, int LAUNCH_COUNT = 1, int ITERATION_COUNT = 1,
int BLOCK_SIZE = 128>
static void CUDA_LAUNCH(benchmark::State &state) {
const std::string IMPLEMENTATION_NAME = CUDA_LAUNCH_IMPLEMENTATION_STRING(IMPLEMENTATION);
state.SetLabel(fmt::format("CUDA/LAUNCH/{}", IMPLEMENTATION_NAME));
if (!has_cuda) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} no CUDA device found", IMPLEMENTATION_NAME).c_str());
return;
}
const size_t N = state.range(0);
const dim3 blockDim(BLOCK_SIZE);
const dim3 gridDim(ceil(((float) N) / blockDim.x));
if (gridDim.x >= cuda_device_prop.maxGridSize[0]) {
const auto str = fmt::format("CUDA/LAUNCH/{} the grid dimension {} exceeds the max grid dimensions {}",
IMPLEMENTATION_NAME, gridDim.x, cuda_device_prop.maxGridSize[0]);
state.SkipWithError(str.c_str());
return;
}
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
const auto str = fmt::format("CUDA/LAUNCH/{} the grid dimension {} exceeds the max grid dimensions {}",
IMPLEMENTATION_NAME, gridDim.x, CUDA_MAX_GRID_SIZE);
state.SkipWithError(str.c_str());
return;
}
auto a = std::vector<T>(N);
std::fill(a.begin(), a.end(), 1);
T *d_a{nullptr};
if (PRINT_IF_ERROR(hipMalloc((void **) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUDA/LAUNCH/{} device memory allocation failed for vector A", IMPLEMENTATION_NAME);
return;
}
defer(hipFree(d_a));
if (PRINT_IF_ERROR(hipMemcpy(d_a, a.data(), a.size() * sizeof(*a.data()), hipMemcpyHostToDevice))) {
LOG(critical, "CUDA/LAUNCH/{} failed to copy vector to device", IMPLEMENTATION_NAME);
return;
}
#ifdef USE_CUDA_EVENTS
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
#endif // USE_CUDA_EVENTS
for (auto _ : state) {
#ifdef USE_CUDA_EVENTS
hipEventRecord(start, NULL);
#endif // USE_CUDA_EVENTS
for (int ii = 0; ii < LAUNCH_COUNT; ii++) {
switch (IMPLEMENTATION) {
case CUDA_LAUNCH_IMPLEMENTATION::EMPTY:
hipLaunchKernelGGL(( cuda_empty_kernel<T, ITERATION_COUNT, BLOCK_SIZE>), dim3(gridDim), dim3(blockDim), 0, 0, d_a, N);
break;
case CUDA_LAUNCH_IMPLEMENTATION::ADDTWO:
hipLaunchKernelGGL(( cuda_add_two_kernel<T, ITERATION_COUNT, BLOCK_SIZE>), dim3(gridDim), dim3(blockDim), 0, 0, d_a, N);
break;
case CUDA_LAUNCH_IMPLEMENTATION::RELU:
hipLaunchKernelGGL(( cuda_relu_kernel<T, ITERATION_COUNT, BLOCK_SIZE>), dim3(gridDim), dim3(blockDim), 0, 0, d_a, N);
break;
}
}
#ifdef USE_CUDA_EVENTS
hipEventRecord(stop, NULL);
const auto cuda_err = hipEventSynchronize(stop);
#else // USE_CUDA_EVENTS
const auto cuda_err = hipDeviceSynchronize();
#endif
state.PauseTiming();
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} failed to synchronize", IMPLEMENTATION_NAME).c_str());
break;
}
#ifdef USE_CUDA_EVENTS
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} failed to get elapsed time", IMPLEMENTATION_NAME).c_str());
break;
}
state.SetIterationTime(msecTotal / 1000);
#endif // USE_CUDA_EVENTS
state.ResumeTiming();
}
state.counters.insert({{"N", N},
{"BLOCK_SIZE", BLOCK_SIZE},
{"THREAD_BLOCKS", gridDim.x},
{"IMPLEMENTATION_TYPE", (int) IMPLEMENTATION},
{"ITERATION_COUNT", ITERATION_COUNT},
{"LAUNCH_COUNT", LAUNCH_COUNT}});
state.SetBytesProcessed(int64_t(state.iterations()) * ITERATION_COUNT * LAUNCH_COUNT * N);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_EMPTY(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::EMPTY, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_ADDTWO(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::ADDTWO, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_RELU(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::RELU, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
#ifdef USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH0(B, ...) BENCHMARK_TEMPLATE(B, __VA_ARGS__)->ALL_ARGS()->UseManualTime();
#else // USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH0(B, ...) BENCHMARK_TEMPLATE(B, __VA_ARGS__)->ALL_ARGS()
#endif // USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH(B, ...) \
BENCHMARK_CUDA_LAUNCH0(B, char, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, int, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, float, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, double, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_EMPTY(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_EMPTY, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_ADDTWO(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_ADDTWO, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_RELU(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_RELU, __VA_ARGS__)
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_EMPTY(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_EMPTY(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(2048, 1, 128);
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_ADDTWO(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_ADDTWO(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(2048, 1, 128);
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_RELU(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_RELU(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(2048, 1, 128);
| b49c187d0fa2d2922208f0be86f5868f63fca8f8.cu | #include <benchmark/benchmark.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <cuda_runtime.h>
#include "init/init.hpp"
#include "utils/utils.hpp"
#include "launch/args.hpp"
enum class CUDA_LAUNCH_IMPLEMENTATION : int { EMPTY = 1, ADDTWO, RELU };
static inline std::string CUDA_LAUNCH_IMPLEMENTATION_STRING(const CUDA_LAUNCH_IMPLEMENTATION impl) {
switch (impl) {
case CUDA_LAUNCH_IMPLEMENTATION::EMPTY:
return "EMPTY";
case CUDA_LAUNCH_IMPLEMENTATION::ADDTWO:
return "ADDTWO";
case CUDA_LAUNCH_IMPLEMENTATION::RELU:
return "RELU";
default:
return "UNDEFINED";
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_empty_kernel(T *vec, size_t len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_add_two_kernel(T *vec, size_t len) {
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (index < len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
vec[index] += 2;
}
}
}
template <typename T, int ITERATION_COUNT, int BLOCK_SIZE>
__global__ void cuda_relu_kernel(T *vec, size_t len) {
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const T zero{0};
if (index < len) {
#pragma unroll
for (int ii = 0; ii < ITERATION_COUNT; ii++) {
vec[index] = vec[index] > zero ? vec[index] : zero;
}
}
}
template <CUDA_LAUNCH_IMPLEMENTATION IMPLEMENTATION, typename T, int LAUNCH_COUNT = 1, int ITERATION_COUNT = 1,
int BLOCK_SIZE = 128>
static void CUDA_LAUNCH(benchmark::State &state) {
const std::string IMPLEMENTATION_NAME = CUDA_LAUNCH_IMPLEMENTATION_STRING(IMPLEMENTATION);
state.SetLabel(fmt::format("CUDA/LAUNCH/{}", IMPLEMENTATION_NAME));
if (!has_cuda) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} no CUDA device found", IMPLEMENTATION_NAME).c_str());
return;
}
const size_t N = state.range(0);
const dim3 blockDim(BLOCK_SIZE);
const dim3 gridDim(ceil(((float) N) / blockDim.x));
if (gridDim.x >= cuda_device_prop.maxGridSize[0]) {
const auto str = fmt::format("CUDA/LAUNCH/{} the grid dimension {} exceeds the max grid dimensions {}",
IMPLEMENTATION_NAME, gridDim.x, cuda_device_prop.maxGridSize[0]);
state.SkipWithError(str.c_str());
return;
}
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
const auto str = fmt::format("CUDA/LAUNCH/{} the grid dimension {} exceeds the max grid dimensions {}",
IMPLEMENTATION_NAME, gridDim.x, CUDA_MAX_GRID_SIZE);
state.SkipWithError(str.c_str());
return;
}
auto a = std::vector<T>(N);
std::fill(a.begin(), a.end(), 1);
T *d_a{nullptr};
if (PRINT_IF_ERROR(cudaMalloc((void **) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUDA/LAUNCH/{} device memory allocation failed for vector A", IMPLEMENTATION_NAME);
return;
}
defer(cudaFree(d_a));
if (PRINT_IF_ERROR(cudaMemcpy(d_a, a.data(), a.size() * sizeof(*a.data()), cudaMemcpyHostToDevice))) {
LOG(critical, "CUDA/LAUNCH/{} failed to copy vector to device", IMPLEMENTATION_NAME);
return;
}
#ifdef USE_CUDA_EVENTS
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
#endif // USE_CUDA_EVENTS
for (auto _ : state) {
#ifdef USE_CUDA_EVENTS
cudaEventRecord(start, NULL);
#endif // USE_CUDA_EVENTS
for (int ii = 0; ii < LAUNCH_COUNT; ii++) {
switch (IMPLEMENTATION) {
case CUDA_LAUNCH_IMPLEMENTATION::EMPTY:
cuda_empty_kernel<T, ITERATION_COUNT, BLOCK_SIZE><<<gridDim, blockDim>>>(d_a, N);
break;
case CUDA_LAUNCH_IMPLEMENTATION::ADDTWO:
cuda_add_two_kernel<T, ITERATION_COUNT, BLOCK_SIZE><<<gridDim, blockDim>>>(d_a, N);
break;
case CUDA_LAUNCH_IMPLEMENTATION::RELU:
cuda_relu_kernel<T, ITERATION_COUNT, BLOCK_SIZE><<<gridDim, blockDim>>>(d_a, N);
break;
}
}
#ifdef USE_CUDA_EVENTS
cudaEventRecord(stop, NULL);
const auto cuda_err = cudaEventSynchronize(stop);
#else // USE_CUDA_EVENTS
const auto cuda_err = cudaDeviceSynchronize();
#endif
state.PauseTiming();
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} failed to synchronize", IMPLEMENTATION_NAME).c_str());
break;
}
#ifdef USE_CUDA_EVENTS
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError(fmt::format("CUDA/LAUNCH/{} failed to get elapsed time", IMPLEMENTATION_NAME).c_str());
break;
}
state.SetIterationTime(msecTotal / 1000);
#endif // USE_CUDA_EVENTS
state.ResumeTiming();
}
state.counters.insert({{"N", N},
{"BLOCK_SIZE", BLOCK_SIZE},
{"THREAD_BLOCKS", gridDim.x},
{"IMPLEMENTATION_TYPE", (int) IMPLEMENTATION},
{"ITERATION_COUNT", ITERATION_COUNT},
{"LAUNCH_COUNT", LAUNCH_COUNT}});
state.SetBytesProcessed(int64_t(state.iterations()) * ITERATION_COUNT * LAUNCH_COUNT * N);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_EMPTY(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::EMPTY, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_ADDTWO(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::ADDTWO, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
template <typename T, int LAUNCH_COUNT, int ITERATION_COUNT, int BLOCK_SIZE>
static void CUDA_LAUNCH_RELU(benchmark::State &state) {
return CUDA_LAUNCH<CUDA_LAUNCH_IMPLEMENTATION::RELU, T, LAUNCH_COUNT, ITERATION_COUNT, BLOCK_SIZE>(state);
}
#ifdef USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH0(B, ...) BENCHMARK_TEMPLATE(B, __VA_ARGS__)->ALL_ARGS()->UseManualTime();
#else // USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH0(B, ...) BENCHMARK_TEMPLATE(B, __VA_ARGS__)->ALL_ARGS()
#endif // USE_CUDA_EVENTS
#define BENCHMARK_CUDA_LAUNCH(B, ...) \
BENCHMARK_CUDA_LAUNCH0(B, char, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, int, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, float, __VA_ARGS__); \
BENCHMARK_CUDA_LAUNCH0(B, double, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_EMPTY(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_EMPTY, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_ADDTWO(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_ADDTWO, __VA_ARGS__)
#define BENCHMARK_CUDA_LAUNCH_RELU(...) BENCHMARK_CUDA_LAUNCH(CUDA_LAUNCH_RELU, __VA_ARGS__)
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_EMPTY(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_EMPTY(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_EMPTY(2048, 1, 128);
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_ADDTWO(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_ADDTWO(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_ADDTWO(2048, 1, 128);
#ifndef FAST_MODE
BENCHMARK_CUDA_LAUNCH_RELU(1, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(4, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(16, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(32, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(64, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(128, 1, 128);
#endif // FAST_MODE
BENCHMARK_CUDA_LAUNCH_RELU(256, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(512, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(1024, 1, 128);
BENCHMARK_CUDA_LAUNCH_RELU(2048, 1, 128);
|
f6a0e2c06cd3407408b5a766aacb35f5badd5230.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by Francisco Jos Gonzlez Garca.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include <iostream>
#include "jacobiCuda.h"
#include "kernels_hip.cuh"
#include "utilidades.h"
double *jacobi_CUDA::multiplicacionMV() {
dim3 block_size(BLOCK_SIZE);
dim3 grid_size{};
const unsigned int warp_size = 32; /// One warp per row
grid_size.x = (warp_size * getFilas() + block_size.x - 1) / block_size.x;
hipMemcpy(x_d, x.data(), getColumnas() * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrix_vector_multiplication<double>), dim3(grid_size), dim3(block_size), 0, 0, A, col_ind, row_ptr, x_d, y_d, getFilas());
hipMemcpy(y, y_d, getFilas() * sizeof(double), hipMemcpyDeviceToHost);
return y;
}
double jacobi_CUDA::norma() {
double r_max = utilidades::reduce_max_CUDA(r_d, getFilas(), BLOCK_SIZE);
double x_max = utilidades::reduce_max_CUDA(x_d, getFilas(), BLOCK_SIZE);
double norma = r_max / x_max;
// cout << "r_max: " << r_max;
// cout << " x_max: " << x_max << endl;
// cout << " norma: " << norma << endl;
return norma;
}
void jacobi_CUDA::obtenerNuevaX() {
dim3 block_size(BLOCK_SIZE);
dim3 grid_size{};
grid_size.x = (getFilas() + block_size.x - 1) / block_size.x;
hipMemcpy(r_d, r, getFilas() * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( nuevaX<double>), dim3(grid_size), dim3(block_size), 0, 0, x.size(), x_d, r_d);
}
void jacobi_CUDA::actualizaX() {
hipMemcpy(x.data(), x_d, getColumnas() * sizeof(double), hipMemcpyDeviceToHost);
}
jacobi_CUDA::jacobi_CUDA(const CSR &m, const vector<double> &aprox_inicial, const int &block_size_arg)
: jacobi(m, aprox_inicial),
BLOCK_SIZE(block_size_arg) {
hipMalloc(&r_d, sizeof(double) * getFilas());
hipMalloc(&A, sizeof(double) * matriz.getVal().size());
hipMalloc(&col_ind, sizeof(int) * matriz.getColInd().size());
hipMalloc(&row_ptr, sizeof(int) * matriz.getRowPtr().size());
hipMalloc(&x_d, sizeof(double) * getFilas());
hipMalloc(&y_d, sizeof(double) * getFilas());
hipMalloc(&inversa_d, sizeof(double) * getFilas());
hipMemcpy(A, matriz.getVal().data(), matriz.getVal().size() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(col_ind, matriz.getColInd().data(), matriz.getColInd().size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(row_ptr, matriz.getRowPtr().data(), matriz.getRowPtr().size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(inversa_d, inversa, getFilas() * sizeof(double), hipMemcpyHostToDevice);
}
jacobi_CUDA::jacobi_CUDA(const CSR &m, const int &block_size_arg) :
jacobi_CUDA(m, vector<double>(m.getFilas(), 1), block_size_arg) {}
jacobi_CUDA::~jacobi_CUDA() {
hipFree(A);
hipFree(x_d);
hipFree(y_d);
hipFree(r_d);
hipFree(col_ind);
hipFree(row_ptr);
hipFree(inversa_d);
};
| f6a0e2c06cd3407408b5a766aacb35f5badd5230.cu | //
// Created by Francisco José González García.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include <iostream>
#include "jacobiCuda.h"
#include "kernels.cuh"
#include "utilidades.h"
double *jacobi_CUDA::multiplicacionMV() {
dim3 block_size(BLOCK_SIZE);
dim3 grid_size{};
const unsigned int warp_size = 32; /// One warp per row
grid_size.x = (warp_size * getFilas() + block_size.x - 1) / block_size.x;
cudaMemcpy(x_d, x.data(), getColumnas() * sizeof(double), cudaMemcpyHostToDevice);
matrix_vector_multiplication<double><<<grid_size, block_size>>>(A, col_ind, row_ptr, x_d, y_d, getFilas());
cudaMemcpy(y, y_d, getFilas() * sizeof(double), cudaMemcpyDeviceToHost);
return y;
}
double jacobi_CUDA::norma() {
double r_max = utilidades::reduce_max_CUDA(r_d, getFilas(), BLOCK_SIZE);
double x_max = utilidades::reduce_max_CUDA(x_d, getFilas(), BLOCK_SIZE);
double norma = r_max / x_max;
// cout << "r_max: " << r_max;
// cout << " x_max: " << x_max << endl;
// cout << " norma: " << norma << endl;
return norma;
}
void jacobi_CUDA::obtenerNuevaX() {
dim3 block_size(BLOCK_SIZE);
dim3 grid_size{};
grid_size.x = (getFilas() + block_size.x - 1) / block_size.x;
cudaMemcpy(r_d, r, getFilas() * sizeof(double), cudaMemcpyHostToDevice);
nuevaX<double><<<grid_size, block_size>>>(x.size(), x_d, r_d);
}
void jacobi_CUDA::actualizaX() {
cudaMemcpy(x.data(), x_d, getColumnas() * sizeof(double), cudaMemcpyDeviceToHost);
}
jacobi_CUDA::jacobi_CUDA(const CSR &m, const vector<double> &aprox_inicial, const int &block_size_arg)
: jacobi(m, aprox_inicial),
BLOCK_SIZE(block_size_arg) {
cudaMalloc(&r_d, sizeof(double) * getFilas());
cudaMalloc(&A, sizeof(double) * matriz.getVal().size());
cudaMalloc(&col_ind, sizeof(int) * matriz.getColInd().size());
cudaMalloc(&row_ptr, sizeof(int) * matriz.getRowPtr().size());
cudaMalloc(&x_d, sizeof(double) * getFilas());
cudaMalloc(&y_d, sizeof(double) * getFilas());
cudaMalloc(&inversa_d, sizeof(double) * getFilas());
cudaMemcpy(A, matriz.getVal().data(), matriz.getVal().size() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(col_ind, matriz.getColInd().data(), matriz.getColInd().size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(row_ptr, matriz.getRowPtr().data(), matriz.getRowPtr().size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(inversa_d, inversa, getFilas() * sizeof(double), cudaMemcpyHostToDevice);
}
jacobi_CUDA::jacobi_CUDA(const CSR &m, const int &block_size_arg) :
jacobi_CUDA(m, vector<double>(m.getFilas(), 1), block_size_arg) {}
jacobi_CUDA::~jacobi_CUDA() {
cudaFree(A);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(r_d);
cudaFree(col_ind);
cudaFree(row_ptr);
cudaFree(inversa_d);
};
|
7670ebfab3509db25e4583781ad1b279d8b9fba5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reorderData(uint startbit, uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint totalBlocks)
{
uint GROUP_SIZE = blockDim.x;
__shared__ uint2 sKeys2[256];
__shared__ uint2 sValues2[256];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint* sKeys1 = (uint*) sKeys2;
uint* sValues1 = (uint*) sValues2;
uint blockId = blockIdx.x;
uint i = blockId * blockDim.x + threadIdx.x;
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks +
blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x];
outValues[globalOffset] = sValues1[threadIdx.x];
radix = (sKeys1[threadIdx.x + GROUP_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + GROUP_SIZE -
sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x + GROUP_SIZE];
outValues[globalOffset] = sValues1[threadIdx.x + GROUP_SIZE];
} | 7670ebfab3509db25e4583781ad1b279d8b9fba5.cu | #include "includes.h"
__global__ void reorderData(uint startbit, uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint totalBlocks)
{
uint GROUP_SIZE = blockDim.x;
__shared__ uint2 sKeys2[256];
__shared__ uint2 sValues2[256];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint* sKeys1 = (uint*) sKeys2;
uint* sValues1 = (uint*) sValues2;
uint blockId = blockIdx.x;
uint i = blockId * blockDim.x + threadIdx.x;
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks +
blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x];
outValues[globalOffset] = sValues1[threadIdx.x];
radix = (sKeys1[threadIdx.x + GROUP_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + GROUP_SIZE -
sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x + GROUP_SIZE];
outValues[globalOffset] = sValues1[threadIdx.x + GROUP_SIZE];
} |
b8d98f2b45483f91540cea0af5438aeafec7863b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
c[tid] = a[tid + b[tid]];
}
int main() {
int n = 1 << 20;
//Host pointers
int* h_a;
int* h_b;
int* h_c;
//Device pointers
int* d_a;
int* d_b;
int* d_c;
size_t bytes = n * sizeof(int);
//Allocate memory (RAM)
h_a = (int*) malloc(bytes);
h_b = (int*) malloc(bytes);
h_c = (int*) malloc(bytes);
for(int i = 0; i < n; ++i) {
h_a[i] = 1;
h_b[i] = 2;
}
//Allocate memory (VRAM)
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
//Init block and grid size
int block_size = 1024;
int grid_size = (int) ceil((float) n / block_size);
printf("Grid size is %d\n", grid_size);
//Copying mem...
cudaMemCpy(d_a, h_a, bytes, cudaMemCpyHostToDevice);
cudaMemCpy(d_b, h_b, bytes, cudaMemCpyHostToDevice);
hipLaunchKernelGGL(( vectorAdd), dim3(grid_size), dim3(block_size), 0, 0, d_a, d_b, d_c, n);
cudaMemCpy(h_c, d_c, bytes, cudaMemCpyDeviceToHost);
for(int i = 0; i < n; ++i) {
if(h_c != 3){
printf("Error!\n");
break;
}
}
printf("Completed successfully!\n");
//Free mem...
free(h_a);
free(h_b);
free(h_c);
//Free vram
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | b8d98f2b45483f91540cea0af5438aeafec7863b.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
c[tid] = a[tid + b[tid]];
}
int main() {
int n = 1 << 20;
//Host pointers
int* h_a;
int* h_b;
int* h_c;
//Device pointers
int* d_a;
int* d_b;
int* d_c;
size_t bytes = n * sizeof(int);
//Allocate memory (RAM)
h_a = (int*) malloc(bytes);
h_b = (int*) malloc(bytes);
h_c = (int*) malloc(bytes);
for(int i = 0; i < n; ++i) {
h_a[i] = 1;
h_b[i] = 2;
}
//Allocate memory (VRAM)
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//Init block and grid size
int block_size = 1024;
int grid_size = (int) ceil((float) n / block_size);
printf("Grid size is %d\n", grid_size);
//Copying mem...
cudaMemCpy(d_a, h_a, bytes, cudaMemCpyHostToDevice);
cudaMemCpy(d_b, h_b, bytes, cudaMemCpyHostToDevice);
vectorAdd<<<grid_size, block_size>>>(d_a, d_b, d_c, n);
cudaMemCpy(h_c, d_c, bytes, cudaMemCpyDeviceToHost);
for(int i = 0; i < n; ++i) {
if(h_c != 3){
printf("Error!\n");
break;
}
}
printf("Completed successfully!\n");
//Free mem...
free(h_a);
free(h_b);
free(h_c);
//Free vram
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
32130f7450cdb02aaa2c1be261af1a00052c7490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THCTensor_preserveReduceDimSemantics(
state, self_, THCTensor_(nDimensionLegacyAll)(state, src), dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(src);
dim[dimension] = 1;
THCTensor_(resize)(state, self_, dim, {});
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THCTensor_preserveReduceDimSemantics(
state, self_, THCTensor_(nDimensionLegacyAll)(state, src), dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(src);
dim[dimension] = 1;
THCTensor_(resize)(state, self_, dim, {});
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<real>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<real>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<real>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THCTensor *view = THCTensor_(newView)(state, self, {nelem});
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::lower_bound(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<real, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::upper_bound(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<real, int64_t>());
}
#endif
| 32130f7450cdb02aaa2c1be261af1a00052c7490.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
dim3 threads(32);
THCTensor_kernel_renorm<real, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THCTensor_preserveReduceDimSemantics(
state, self_, THCTensor_(nDimensionLegacyAll)(state, src), dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(src);
dim[dimension] = 1;
THCTensor_(resize)(state, self_, dim, {});
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THCTensor_preserveReduceDimSemantics(
state, self_, THCTensor_(nDimensionLegacyAll)(state, src), dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(src);
dim[dimension] = 1;
THCTensor_(resize)(state, self_, dim, {});
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<real>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<real>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<real>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THCTensor *view = THCTensor_(newView)(state, self, {nelem});
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::lower_bound(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<real, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::upper_bound(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<real, int64_t>());
}
#endif
|
74024cdbd9fd5b5bfa4c162e0f333fcc3eae0688.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target, amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = dso_local global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = dso_local externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = dso_local addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = dso_local addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = dso_local addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = dso_local addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %6 = sdiv i64 %5, 4
// HOST: %7 = sitofp i64 %6 to float
// HOST: ret float %7
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
| 74024cdbd9fd5b5bfa4c162e0f333fcc3eae0688.cu | // REQUIRES: x86-registered-target, amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = dso_local global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = dso_local externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = dso_local addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = dso_local addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = dso_local addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = dso_local addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = dso_local addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = dso_local addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %6 = sdiv i64 %5, 4
// HOST: %7 = sitofp i64 %6 to float
// HOST: ret float %7
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
|
f3b422a02ba4c3c1517395cc1ac5a7422ab2468d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "EstimateInputSize.cuh"
__global__ void estimate_input_size(
char* dev_raw_input,
uint* dev_raw_input_offsets,
uint* dev_estimated_input_size,
uint* dev_module_cluster_num,
uint* dev_event_candidate_num,
uint32_t* dev_cluster_candidates,
uint8_t* dev_velo_candidate_ks
) {
const uint event_number = blockIdx.x;
const uint raw_bank_starting_chunk = threadIdx.y; // up to 26
const uint raw_bank_chunk_size = VeloTracking::n_sensors / blockDim.y; // blockDim.y = 26 -> chunk_size = 8
const char* raw_input = dev_raw_input + dev_raw_input_offsets[event_number];
uint* estimated_input_size = dev_estimated_input_size + event_number * VeloTracking::n_modules;
uint* module_cluster_num = dev_module_cluster_num + event_number * VeloTracking::n_modules;
uint* event_candidate_num = dev_event_candidate_num + event_number;
uint32_t* cluster_candidates = dev_cluster_candidates + event_number * VeloClustering::max_candidates_event;
// Initialize estimated_input_size, module_cluster_num and dev_module_candidate_num to 0
for (int i=0; i<(VeloTracking::n_modules + blockDim.x - 1) / blockDim.x; ++i) {
const auto index = i*blockDim.x + threadIdx.x;
if (index < VeloTracking::n_modules) {
estimated_input_size[index] = 0;
module_cluster_num[index] = 0;
}
}
*event_candidate_num = 0;
__syncthreads();
// Read raw event
const auto raw_event = VeloRawEvent(raw_input);
for (int raw_bank_rel_number = 0; raw_bank_rel_number < raw_bank_chunk_size; ++raw_bank_rel_number) {
const int raw_bank_number = raw_bank_starting_chunk * raw_bank_chunk_size + raw_bank_rel_number;
if (raw_bank_number < raw_event.number_of_raw_banks) {
// Read raw bank
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
uint* estimated_module_size = estimated_input_size + (raw_bank.sensor_index >> 2);
for (int i=0; i<(raw_bank.sp_count + blockDim.x - 1) / blockDim.x; ++i) {
const auto sp_index = i*blockDim.x + threadIdx.x;
if (sp_index < raw_bank.sp_count) {
// Decode sp
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t no_sp_neighbours = sp_word & 0x80000000U;
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
const uint8_t sp = sp_word & 0xFFU;
if (no_sp_neighbours) {
// The SP does not have any neighbours
// The problem is as simple as a lookup pattern
// It can be implemented in two operations
// Pattern 0:
// (x x)
// o o
// (x x
// x x)
//
// Note: Pixel order in sp
// 0x08 | 0x80
// 0x04 | 0x40
// 0x02 | 0x20
// 0x01 | 0x10
const bool pattern_0 = sp&0x88 && !(sp&0x44) && sp&0x33;
// Pattern 1:
// (x x
// x x)
// o o
// (x x)
const bool pattern_1 = sp&0xCC && !(sp&0x22) && sp&0x11;
const uint number_of_clusters = 1 + (pattern_0 | pattern_1);
// Add the found clusters
uint current_estimated_module_size = atomicAdd(estimated_module_size, number_of_clusters);
assert( current_estimated_module_size < VeloTracking::max_numhits_in_module);
} else {
// Find candidates that follow this condition:
// For pixel o, all pixels x should *not* be populated
// x x
// o x
// x
// Load required neighbouring pixels in order to check the condition
// x x x
// o o x
// o o x
// o o x
// o o x
// x x
//
// Use an int for storing and calculating
// Bit order
//
// 4 10 16
// 3 9 15
// 2 8 14
// 1 7 13
// 0 6 12
// 5 11
//
// Bit masks
//
// 0x10 0x0400 0x010000
// 0x08 0x0200 0x8000
// 0x04 0x0100 0x4000
// 0x02 0x80 0x2000
// 0x01 0x40 0x1000
// 0x20 0x0800
uint32_t pixels = sp&0x0F | ((sp&0xF0) << 2);
// Current row and col
const uint32_t sp_row = sp_addr & 0x3FU;
const uint32_t sp_col = sp_addr >> 6;
for (uint k=0; k<raw_bank.sp_count; ++k) {
const uint32_t other_sp_word = raw_bank.sp_word[k];
const uint32_t other_no_sp_neighbours = sp_word & 0x80000000U;
if (!other_no_sp_neighbours) {
const uint32_t other_sp_addr = (other_sp_word & 0x007FFF00U) >> 8;
const uint32_t other_sp_row = other_sp_addr & 0x3FU;
const uint32_t other_sp_col = (other_sp_addr >> 6);
const uint8_t other_sp = other_sp_word & 0xFFU;
// Populate pixels
// Note: Pixel order in sp
// 0x08 | 0x80
// 0x04 | 0x40
// 0x02 | 0x20
// 0x01 | 0x10
const bool is_top = other_sp_row==(sp_row+1) && other_sp_col==sp_col;
const bool is_top_right = other_sp_row==(sp_row+1) && other_sp_col==(sp_col+1);
const bool is_right = other_sp_row==sp_row && other_sp_col==(sp_col+1);
const bool is_right_bottom = other_sp_row==(sp_row-1) && other_sp_col==(sp_col+1);
const bool is_bottom = other_sp_row==(sp_row-1) && other_sp_col==sp_col;
if (is_top || is_top_right || is_right || is_right_bottom || is_bottom) {
pixels |= is_top*((other_sp&0x01 | ((other_sp&0x10) << 2)) << 4);
pixels |= is_top_right*((other_sp&0x01) << 16);
pixels |= is_right*((other_sp&0x0F) << 12);
pixels |= is_right_bottom*((other_sp&0x08) << 8);
pixels |= is_bottom*((other_sp&0x80) >> 2);
}
}
}
// 16 1024 65536
// 8 512 32768
// 4 256 16384
// 2 128 8192
// 1 64 4096
// 32 2048
//
// Look up pattern
// x x
// o x
// x
//
uint found_cluster_candidates = 0;
assert(raw_bank_number < VeloTracking::n_sensors);
const uint32_t sp_inside_pixel = pixels & 0x3CF;
const uint32_t mask = (sp_inside_pixel << 1)
| (sp_inside_pixel << 5)
| (sp_inside_pixel << 6)
| (sp_inside_pixel << 7);
const uint32_t working_cluster = mask & (~pixels);
const uint32_t candidates_temp = (working_cluster >> 1)
& (working_cluster >> 5)
& (working_cluster >> 6)
& (working_cluster >> 7);
const uint32_t candidates = candidates_temp & pixels;
const uint8_t candidates_uint8 = (candidates & 0x03) | ((candidates & 0xC0) >> 4)
| ((candidates & 0x0C) << 2) | ((candidates & 0x0300) >> 2);
// Add candidates 0, 1, 4, 5
// Only one of those candidates can be flagged at a time
if (candidates_uint8 & 0xF) {
// if ((candidates_uint8 & 0xF) >= 9) {
// auto print_candidates8 = [] (const uint8_t& candidates) {
// printf("%i%i\n%i%i\n%i%i\n%i%i\n\n",
// (candidates & 0x80) > 0, (candidates & 0x40) > 0,
// (candidates & 0x20) > 0, (candidates & 0x10) > 0,
// (candidates & 0x8) > 0, (candidates & 0x4) > 0,
// (candidates & 0x2) > 0, candidates & 0x1
// );
// };
// auto print_candidates = [] (const uint32_t& candidates) {
// printf("%i%i%i\n%i%i%i\n%i%i%i\n%i%i%i\n%i%i%i\n %i%i\n\n",
// (candidates & 0x10) > 0, (candidates & 0x0400) > 0, (candidates & 0x010000) > 0,
// (candidates & 0x08) > 0, (candidates & 0x0200) > 0, (candidates & 0x8000) > 0,
// (candidates & 0x04) > 0, (candidates & 0x0100) > 0, (candidates & 0x4000) > 0,
// (candidates & 0x02) > 0, (candidates & 0x80) > 0, (candidates & 0x2000) > 0,
// (candidates & 0x01) > 0, (candidates & 0x40) > 0, (candidates & 0x1000) > 0,
// (candidates & 0x20) > 0, (candidates & 0x0800) > 0
// );
// };
// printf("pixels:\n");
// print_candidates(pixels);
// printf("sp_inside_pixel:\n");
// print_candidates(sp_inside_pixel);
// printf("mask:\n");
// print_candidates(mask);
// printf("working_cluster:\n");
// print_candidates(working_cluster);
// printf("candidates:\n");
// print_candidates(candidates);
// printf("candidates_uint8:\n");
// print_candidates8(candidates_uint8);
// }
// Verify candidates are correctly created
assert((candidates_uint8 & 0xF) < 9);
// Decode the candidate number (ie. find out the active bit)
const uint8_t k = dev_velo_candidate_ks[candidates_uint8 & 0xF];
auto current_cluster_candidate = atomicAdd(event_candidate_num, 1);
const uint32_t candidate = (sp_index << 11)
| (raw_bank_number << 3)
| k;
assert(current_cluster_candidate < blockDim.x * VeloClustering::max_candidates_event);
cluster_candidates[current_cluster_candidate] = candidate;
++found_cluster_candidates;
}
// Add candidates 2, 3, 6, 7
// Only one of those candidates can be flagged at a time
if (candidates_uint8 & 0xF0) {
assert(((candidates_uint8 >> 4) & 0xF) < 9);
const uint8_t k = dev_velo_candidate_ks[(candidates_uint8 >> 4)] + 2;
auto current_cluster_candidate = atomicAdd(event_candidate_num, 1);
const uint32_t candidate = (sp_index << 11)
| (raw_bank_number << 3)
| k;
assert(current_cluster_candidate < blockDim.x * VeloClustering::max_candidates_event);
cluster_candidates[current_cluster_candidate] = candidate;
++found_cluster_candidates;
}
// Add the found cluster candidates
if (found_cluster_candidates > 0) {
uint current_estimated_module_size = atomicAdd(estimated_module_size, found_cluster_candidates);
assert(current_estimated_module_size < VeloTracking::max_numhits_in_module);
}
}
}
}
}
}
}
| f3b422a02ba4c3c1517395cc1ac5a7422ab2468d.cu | #include "EstimateInputSize.cuh"
__global__ void estimate_input_size(
char* dev_raw_input,
uint* dev_raw_input_offsets,
uint* dev_estimated_input_size,
uint* dev_module_cluster_num,
uint* dev_event_candidate_num,
uint32_t* dev_cluster_candidates,
uint8_t* dev_velo_candidate_ks
) {
const uint event_number = blockIdx.x;
const uint raw_bank_starting_chunk = threadIdx.y; // up to 26
const uint raw_bank_chunk_size = VeloTracking::n_sensors / blockDim.y; // blockDim.y = 26 -> chunk_size = 8
const char* raw_input = dev_raw_input + dev_raw_input_offsets[event_number];
uint* estimated_input_size = dev_estimated_input_size + event_number * VeloTracking::n_modules;
uint* module_cluster_num = dev_module_cluster_num + event_number * VeloTracking::n_modules;
uint* event_candidate_num = dev_event_candidate_num + event_number;
uint32_t* cluster_candidates = dev_cluster_candidates + event_number * VeloClustering::max_candidates_event;
// Initialize estimated_input_size, module_cluster_num and dev_module_candidate_num to 0
for (int i=0; i<(VeloTracking::n_modules + blockDim.x - 1) / blockDim.x; ++i) {
const auto index = i*blockDim.x + threadIdx.x;
if (index < VeloTracking::n_modules) {
estimated_input_size[index] = 0;
module_cluster_num[index] = 0;
}
}
*event_candidate_num = 0;
__syncthreads();
// Read raw event
const auto raw_event = VeloRawEvent(raw_input);
for (int raw_bank_rel_number = 0; raw_bank_rel_number < raw_bank_chunk_size; ++raw_bank_rel_number) {
const int raw_bank_number = raw_bank_starting_chunk * raw_bank_chunk_size + raw_bank_rel_number;
if (raw_bank_number < raw_event.number_of_raw_banks) {
// Read raw bank
const auto raw_bank = VeloRawBank(raw_event.payload + raw_event.raw_bank_offset[raw_bank_number]);
uint* estimated_module_size = estimated_input_size + (raw_bank.sensor_index >> 2);
for (int i=0; i<(raw_bank.sp_count + blockDim.x - 1) / blockDim.x; ++i) {
const auto sp_index = i*blockDim.x + threadIdx.x;
if (sp_index < raw_bank.sp_count) {
// Decode sp
const uint32_t sp_word = raw_bank.sp_word[sp_index];
const uint32_t no_sp_neighbours = sp_word & 0x80000000U;
const uint32_t sp_addr = (sp_word & 0x007FFF00U) >> 8;
const uint8_t sp = sp_word & 0xFFU;
if (no_sp_neighbours) {
// The SP does not have any neighbours
// The problem is as simple as a lookup pattern
// It can be implemented in two operations
// Pattern 0:
// (x x)
// o o
// (x x
// x x)
//
// Note: Pixel order in sp
// 0x08 | 0x80
// 0x04 | 0x40
// 0x02 | 0x20
// 0x01 | 0x10
const bool pattern_0 = sp&0x88 && !(sp&0x44) && sp&0x33;
// Pattern 1:
// (x x
// x x)
// o o
// (x x)
const bool pattern_1 = sp&0xCC && !(sp&0x22) && sp&0x11;
const uint number_of_clusters = 1 + (pattern_0 | pattern_1);
// Add the found clusters
uint current_estimated_module_size = atomicAdd(estimated_module_size, number_of_clusters);
assert( current_estimated_module_size < VeloTracking::max_numhits_in_module);
} else {
// Find candidates that follow this condition:
// For pixel o, all pixels x should *not* be populated
// x x
// o x
// x
// Load required neighbouring pixels in order to check the condition
// x x x
// o o x
// o o x
// o o x
// o o x
// x x
//
// Use an int for storing and calculating
// Bit order
//
// 4 10 16
// 3 9 15
// 2 8 14
// 1 7 13
// 0 6 12
// 5 11
//
// Bit masks
//
// 0x10 0x0400 0x010000
// 0x08 0x0200 0x8000
// 0x04 0x0100 0x4000
// 0x02 0x80 0x2000
// 0x01 0x40 0x1000
// 0x20 0x0800
uint32_t pixels = sp&0x0F | ((sp&0xF0) << 2);
// Current row and col
const uint32_t sp_row = sp_addr & 0x3FU;
const uint32_t sp_col = sp_addr >> 6;
for (uint k=0; k<raw_bank.sp_count; ++k) {
const uint32_t other_sp_word = raw_bank.sp_word[k];
const uint32_t other_no_sp_neighbours = sp_word & 0x80000000U;
if (!other_no_sp_neighbours) {
const uint32_t other_sp_addr = (other_sp_word & 0x007FFF00U) >> 8;
const uint32_t other_sp_row = other_sp_addr & 0x3FU;
const uint32_t other_sp_col = (other_sp_addr >> 6);
const uint8_t other_sp = other_sp_word & 0xFFU;
// Populate pixels
// Note: Pixel order in sp
// 0x08 | 0x80
// 0x04 | 0x40
// 0x02 | 0x20
// 0x01 | 0x10
const bool is_top = other_sp_row==(sp_row+1) && other_sp_col==sp_col;
const bool is_top_right = other_sp_row==(sp_row+1) && other_sp_col==(sp_col+1);
const bool is_right = other_sp_row==sp_row && other_sp_col==(sp_col+1);
const bool is_right_bottom = other_sp_row==(sp_row-1) && other_sp_col==(sp_col+1);
const bool is_bottom = other_sp_row==(sp_row-1) && other_sp_col==sp_col;
if (is_top || is_top_right || is_right || is_right_bottom || is_bottom) {
pixels |= is_top*((other_sp&0x01 | ((other_sp&0x10) << 2)) << 4);
pixels |= is_top_right*((other_sp&0x01) << 16);
pixels |= is_right*((other_sp&0x0F) << 12);
pixels |= is_right_bottom*((other_sp&0x08) << 8);
pixels |= is_bottom*((other_sp&0x80) >> 2);
}
}
}
// 16 1024 65536
// 8 512 32768
// 4 256 16384
// 2 128 8192
// 1 64 4096
// 32 2048
//
// Look up pattern
// x x
// o x
// x
//
uint found_cluster_candidates = 0;
assert(raw_bank_number < VeloTracking::n_sensors);
const uint32_t sp_inside_pixel = pixels & 0x3CF;
const uint32_t mask = (sp_inside_pixel << 1)
| (sp_inside_pixel << 5)
| (sp_inside_pixel << 6)
| (sp_inside_pixel << 7);
const uint32_t working_cluster = mask & (~pixels);
const uint32_t candidates_temp = (working_cluster >> 1)
& (working_cluster >> 5)
& (working_cluster >> 6)
& (working_cluster >> 7);
const uint32_t candidates = candidates_temp & pixels;
const uint8_t candidates_uint8 = (candidates & 0x03) | ((candidates & 0xC0) >> 4)
| ((candidates & 0x0C) << 2) | ((candidates & 0x0300) >> 2);
// Add candidates 0, 1, 4, 5
// Only one of those candidates can be flagged at a time
if (candidates_uint8 & 0xF) {
// if ((candidates_uint8 & 0xF) >= 9) {
// auto print_candidates8 = [] (const uint8_t& candidates) {
// printf("%i%i\n%i%i\n%i%i\n%i%i\n\n",
// (candidates & 0x80) > 0, (candidates & 0x40) > 0,
// (candidates & 0x20) > 0, (candidates & 0x10) > 0,
// (candidates & 0x8) > 0, (candidates & 0x4) > 0,
// (candidates & 0x2) > 0, candidates & 0x1
// );
// };
// auto print_candidates = [] (const uint32_t& candidates) {
// printf("%i%i%i\n%i%i%i\n%i%i%i\n%i%i%i\n%i%i%i\n %i%i\n\n",
// (candidates & 0x10) > 0, (candidates & 0x0400) > 0, (candidates & 0x010000) > 0,
// (candidates & 0x08) > 0, (candidates & 0x0200) > 0, (candidates & 0x8000) > 0,
// (candidates & 0x04) > 0, (candidates & 0x0100) > 0, (candidates & 0x4000) > 0,
// (candidates & 0x02) > 0, (candidates & 0x80) > 0, (candidates & 0x2000) > 0,
// (candidates & 0x01) > 0, (candidates & 0x40) > 0, (candidates & 0x1000) > 0,
// (candidates & 0x20) > 0, (candidates & 0x0800) > 0
// );
// };
// printf("pixels:\n");
// print_candidates(pixels);
// printf("sp_inside_pixel:\n");
// print_candidates(sp_inside_pixel);
// printf("mask:\n");
// print_candidates(mask);
// printf("working_cluster:\n");
// print_candidates(working_cluster);
// printf("candidates:\n");
// print_candidates(candidates);
// printf("candidates_uint8:\n");
// print_candidates8(candidates_uint8);
// }
// Verify candidates are correctly created
assert((candidates_uint8 & 0xF) < 9);
// Decode the candidate number (ie. find out the active bit)
const uint8_t k = dev_velo_candidate_ks[candidates_uint8 & 0xF];
auto current_cluster_candidate = atomicAdd(event_candidate_num, 1);
const uint32_t candidate = (sp_index << 11)
| (raw_bank_number << 3)
| k;
assert(current_cluster_candidate < blockDim.x * VeloClustering::max_candidates_event);
cluster_candidates[current_cluster_candidate] = candidate;
++found_cluster_candidates;
}
// Add candidates 2, 3, 6, 7
// Only one of those candidates can be flagged at a time
if (candidates_uint8 & 0xF0) {
assert(((candidates_uint8 >> 4) & 0xF) < 9);
const uint8_t k = dev_velo_candidate_ks[(candidates_uint8 >> 4)] + 2;
auto current_cluster_candidate = atomicAdd(event_candidate_num, 1);
const uint32_t candidate = (sp_index << 11)
| (raw_bank_number << 3)
| k;
assert(current_cluster_candidate < blockDim.x * VeloClustering::max_candidates_event);
cluster_candidates[current_cluster_candidate] = candidate;
++found_cluster_candidates;
}
// Add the found cluster candidates
if (found_cluster_candidates > 0) {
uint current_estimated_module_size = atomicAdd(estimated_module_size, found_cluster_candidates);
assert(current_estimated_module_size < VeloTracking::max_numhits_in_module);
}
}
}
}
}
}
}
|
26d41e20912674fe3deb83e27b2f3eade36e2d94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
template<typename T>
class TmpBufferManager final {
public:
OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager);
TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num)
: capacity_{capacity}, key_value_out_elem_cnt_{instance_num} {
const int32_t key_value_out_aligned_bytes =
GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(hipcub::KeyValuePair<int32_t, T>));
key_value_out_ptr_ = reinterpret_cast<hipcub::KeyValuePair<int32_t, T>*>(ptr);
temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_)
+ key_value_out_aligned_bytes);
temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes;
CHECK_GE(temp_storage_bytes_, 0);
}
~TmpBufferManager() = default;
hipcub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; }
void* TempStoragePtr() const { return temp_storage_ptr_; }
int32_t KeyValueOutElemCnt() const { return key_value_out_elem_cnt_; }
int32_t TempStorageBytes() const { return temp_storage_bytes_; }
private:
int32_t capacity_;
hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr_;
void* temp_storage_ptr_;
int32_t key_value_out_elem_cnt_;
int32_t temp_storage_bytes_;
};
class MultiplyFunctor final {
public:
MultiplyFunctor(int32_t num_col) : num_col_(num_col) {}
__host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const {
return idx * num_col_;
}
private:
int32_t num_col_;
};
template<typename T>
size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) {
using SegmentOffsetIter =
hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>;
hipcub::CountingInputIterator<int32_t> counting_iter(0);
MultiplyFunctor multiply_functor(num_col);
SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor);
size_t temp_storage_bytes = -1;
auto err =
hipcub::DeviceSegmentedReduce::ArgMax<T*, hipcub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>(
/* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes,
/* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row,
/* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1,
/* stream */ 0);
OF_CUDA_CHECK(err);
return temp_storage_bytes;
}
template<typename T>
void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr,
int32_t temp_storage_bytes, hipcub::KeyValuePair<int32_t, T>* out_ptr,
hipStream_t stream) {
size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col);
CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes);
using SegmentOffsetIter =
hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>;
hipcub::CountingInputIterator<int32_t> counting_iter(0);
MultiplyFunctor multiply_functor(num_col);
SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor);
auto err = hipcub::DeviceSegmentedReduce::ArgMax(
/* d_temp_storage */ temp_storage_ptr,
/* temp_storage_bytes */ rt_inferred_temp_storage_bytes,
/* d_in */ in_ptr,
/* d_out */ out_ptr,
/* num_segments */ num_row,
/* d_begin_offsets */ segment_offset_iter,
/* d_end_offsets */ segment_offset_iter + 1,
/* stream */ stream);
OF_CUDA_CHECK(err);
}
template<typename T>
__global__ void WriteKeysToOutput(const int32_t instance_num,
const hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr,
int32_t* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; }
}
} // namespace
template<typename T>
class GpuArgMaxKernel final : public user_op::OpKernel {
public:
GpuArgMaxKernel() = default;
~GpuArgMaxKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = in->shape().elem_cnt();
const int32_t instance_size = in->shape().At(in->shape().NumAxes() - 1);
const int32_t instance_num = elem_cnt / instance_size;
TmpBufferManager<T> buffer_manager(tmp_buffer->shape().elem_cnt(), tmp_buffer->mut_dptr<void>(),
instance_num);
ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(),
buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(),
ctx->device_ctx()->cuda_stream());
hipLaunchKernelGGL(( WriteKeysToOutput<T>), dim3(BlocksNum4ThreadsNum(instance_num)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int32_t>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_ARGMAX_KERNEL(dtype) \
REGISTER_USER_KERNEL("argmax") \
.SetCreateFn<GpuArgMaxKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
const int32_t instance_size = in_shape->dim_vec().back(); \
const int32_t instance_num = in_shape->elem_cnt() / instance_size; \
\
/* Key-Value Out */ \
int32_t key_value_out_bytes = \
GetCudaAlignedSize(instance_num * sizeof(hipcub::KeyValuePair<int32_t, dtype>)); \
\
/* CUB Temp Storage */ \
size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \
\
return key_value_out_bytes + temp_storage_bytes; \
});
REGISTER_GPU_ARGMAX_KERNEL(float)
REGISTER_GPU_ARGMAX_KERNEL(double)
REGISTER_GPU_ARGMAX_KERNEL(int32_t)
REGISTER_GPU_ARGMAX_KERNEL(int64_t)
} // namespace oneflow
| 26d41e20912674fe3deb83e27b2f3eade36e2d94.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
template<typename T>
class TmpBufferManager final {
public:
OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager);
TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num)
: capacity_{capacity}, key_value_out_elem_cnt_{instance_num} {
const int32_t key_value_out_aligned_bytes =
GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(cub::KeyValuePair<int32_t, T>));
key_value_out_ptr_ = reinterpret_cast<cub::KeyValuePair<int32_t, T>*>(ptr);
temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_)
+ key_value_out_aligned_bytes);
temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes;
CHECK_GE(temp_storage_bytes_, 0);
}
~TmpBufferManager() = default;
cub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; }
void* TempStoragePtr() const { return temp_storage_ptr_; }
int32_t KeyValueOutElemCnt() const { return key_value_out_elem_cnt_; }
int32_t TempStorageBytes() const { return temp_storage_bytes_; }
private:
int32_t capacity_;
cub::KeyValuePair<int32_t, T>* key_value_out_ptr_;
void* temp_storage_ptr_;
int32_t key_value_out_elem_cnt_;
int32_t temp_storage_bytes_;
};
class MultiplyFunctor final {
public:
MultiplyFunctor(int32_t num_col) : num_col_(num_col) {}
__host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const {
return idx * num_col_;
}
private:
int32_t num_col_;
};
template<typename T>
size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) {
using SegmentOffsetIter =
cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>;
cub::CountingInputIterator<int32_t> counting_iter(0);
MultiplyFunctor multiply_functor(num_col);
SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor);
size_t temp_storage_bytes = -1;
auto err =
cub::DeviceSegmentedReduce::ArgMax<T*, cub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>(
/* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes,
/* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row,
/* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1,
/* stream */ 0);
OF_CUDA_CHECK(err);
return temp_storage_bytes;
}
template<typename T>
void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr,
int32_t temp_storage_bytes, cub::KeyValuePair<int32_t, T>* out_ptr,
cudaStream_t stream) {
size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col);
CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes);
using SegmentOffsetIter =
cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>;
cub::CountingInputIterator<int32_t> counting_iter(0);
MultiplyFunctor multiply_functor(num_col);
SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor);
auto err = cub::DeviceSegmentedReduce::ArgMax(
/* d_temp_storage */ temp_storage_ptr,
/* temp_storage_bytes */ rt_inferred_temp_storage_bytes,
/* d_in */ in_ptr,
/* d_out */ out_ptr,
/* num_segments */ num_row,
/* d_begin_offsets */ segment_offset_iter,
/* d_end_offsets */ segment_offset_iter + 1,
/* stream */ stream);
OF_CUDA_CHECK(err);
}
template<typename T>
__global__ void WriteKeysToOutput(const int32_t instance_num,
const cub::KeyValuePair<int32_t, T>* key_value_out_ptr,
int32_t* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; }
}
} // namespace
template<typename T>
class GpuArgMaxKernel final : public user_op::OpKernel {
public:
GpuArgMaxKernel() = default;
~GpuArgMaxKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = in->shape().elem_cnt();
const int32_t instance_size = in->shape().At(in->shape().NumAxes() - 1);
const int32_t instance_num = elem_cnt / instance_size;
TmpBufferManager<T> buffer_manager(tmp_buffer->shape().elem_cnt(), tmp_buffer->mut_dptr<void>(),
instance_num);
ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(),
buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(),
ctx->device_ctx()->cuda_stream());
WriteKeysToOutput<T><<<BlocksNum4ThreadsNum(instance_num), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int32_t>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_ARGMAX_KERNEL(dtype) \
REGISTER_USER_KERNEL("argmax") \
.SetCreateFn<GpuArgMaxKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
const int32_t instance_size = in_shape->dim_vec().back(); \
const int32_t instance_num = in_shape->elem_cnt() / instance_size; \
\
/* Key-Value Out */ \
int32_t key_value_out_bytes = \
GetCudaAlignedSize(instance_num * sizeof(cub::KeyValuePair<int32_t, dtype>)); \
\
/* CUB Temp Storage */ \
size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \
\
return key_value_out_bytes + temp_storage_bytes; \
});
REGISTER_GPU_ARGMAX_KERNEL(float)
REGISTER_GPU_ARGMAX_KERNEL(double)
REGISTER_GPU_ARGMAX_KERNEL(int32_t)
REGISTER_GPU_ARGMAX_KERNEL(int64_t)
} // namespace oneflow
|
14e98b805fe082ac3177d13d0dd0b0b9a6d1ca61.hip | // !!! This is a file automatically generated by hipify!!!
// Program to test the allocation and sending of quarter tiles to erform matrix multiplication on devices 2 and 3
#include <stdio.h>
#include "../DIEKUHDA/kuhda.h"
#include "omp.h"
#define NUMTHREADS 5
// Run with:
// nvcc -O3 lXcompiler -fopenmp -lcublas ../DIEKUHDA/kuhda.c TwoDeviceTileMultiplication.c && ./a.out
// What do we want to test: (in parallel)
// Send d_A1 and d_B1 to device 3 and d_A2 and d_B3 to device 2
// call kuhdamm() to try and compute in parallel
// Results: everything works, see how every element in the quarter tile = n
// Remark: computations can run in parallel, but depends on many factors, like matrix size etc
int main()
{
//omp_set_num_threads(2);
//omp_set_dynamic(0);
//gpuErrchk(hipSetDevice(0));
unsigned long n = 1000;
int x = n/2; // x * x = dimension of quarter tile
// Containers for host and device matrices
matrix *h_A = kuhdaMallocMP1(n, n); // diagonal A matrix
matrix *h_B = kuhdaMallocMP1(n, n); // diagonal B matrix
matrix *h_C = kuhdaMallocMP(n, n); // empty C matrix
int abc, ABC = 3, device, devicecount = 4;
matrix *d_All[devicecount][ABC];
hipStream_t d_streams[devicecount], mainstream;
hipEvent_t start[devicecount], stop[devicecount], mainstart, mainstop;
gpuErrchk(hipStreamCreate(&mainstream));
gpuErrchk(hipEventCreate(&mainstart));
gpuErrchk(hipEventCreate(&mainstop));
float ms_timer[4] = {0.0, 0.0, 0.0, 0.0}, mainstreamtimer;
#pragma omp parallel for private(abc) private(device) num_threads(NUMTHREADS)
// Creat all dependencies:
for (device = 0; device < devicecount; device++){
//printf("Number of threads = %d\n", omp_get_thread_num());
gpuErrchk(hipSetDevice(device));
printf("Allocating tiles A, B and C on device %d\n", device);
gpuErrchk(hipStreamCreate(&d_streams[device]));
gpuErrchk(hipEventCreate(&start[device]));
gpuErrchk(hipEventCreate(&stop[device]));
for (abc = 0; abc < ABC; ++abc){
d_All[device][abc] = kuhdaMallocDeviceM(x, x);
}
// Send the first quarter tiles of A and B to device 0...
TileHostToGPU(0, x, 0, x, h_A, d_All[device][0], d_streams[device]);
TileHostToGPU(0, x, 0, x, h_B, d_All[device][1], d_streams[device]);
}
gpuErrchk(hipStreamSynchronize(mainstream));
gpuErrchk(hipEventRecord(mainstart, mainstream));
int checkCorrectDevice = 0, rep = 0, reps = 500;
#pragma omp parallel for private(device) private(rep) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
gpuErrchk(hipSetDevice(device));
gpuErrchk(hipGetDevice(&checkCorrectDevice));
if (checkCorrectDevice != device) printf("Wrong device?");
//gpuErrchk(hipStreamSynchronize(d_streams[device]));
gpuErrchk(hipEventRecord(start[device], d_streams[device]));
// Matrix multiplication: damm man that's fast
for (rep = 0; rep < reps; ++rep) kuhdamm(d_All[device][0], d_All[device][1], d_All[device][2], d_streams[device], 0);
//gpuErrchk(hipStreamSynchronize(d_streams[device]));
gpuErrchk(hipEventRecord(stop[device], d_streams[device]));
gpuErrchk(hipEventSynchronize(stop[device]));
gpuErrchk(hipEventElapsedTime(&ms_timer[device], start[device], stop[device]));
printf("Multiplication on device %d took %lf seconds\n", device, ms_timer[device]/1000);
// ...retrieve it again into C on the host
//TileGPUAddToHost(0, x, 0, x, d_All[device][2], h_C, d_streams[device]);
}
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
gpuErrchk(hipSetDevice(device));
gpuErrchk(hipStreamSynchronize(d_streams[device]));
TileGPUAddToHost(0, x, 0, x, d_All[device][2], h_C, d_streams[device]);
}
gpuErrchk(hipEventRecord(mainstop, mainstream));
gpuErrchk(hipEventSynchronize(mainstop));
gpuErrchk(hipEventElapsedTime(&mainstreamtimer, mainstart, mainstop));
int timerindex;
for (timerindex = 1; timerindex < devicecount; timerindex++) ms_timer[0] += ms_timer[timerindex];
printf("Everything: %lf s, multiplication: = %lf s\n", mainstreamtimer/1000, ms_timer[0]/1000);
//kuhdaTestM(0, x, 0, x, h_C);
//kuhdaPrintM(h_C);
//printf("%lf %lf \n%lf %lf \n", h_C->data[(n-1)*x-1], h_C->data[(n-1)*x], h_C->data[n*x-1], h_C->data[n*x]);
// free all matrices
printf("Cleaning up ..\n");
gpuErrchk(hipStreamDestroy(mainstream));
gpuErrchk(hipEventDestroy(mainstart));
gpuErrchk(hipEventDestroy(mainstop));
kuhdaFreeM(h_A, 'p');
kuhdaFreeM(h_B, 'p');
kuhdaFreeM(h_C, 'p');
for (device = 2; device < devicecount; device++){
gpuErrchk(hipSetDevice(device));
gpuErrchk(hipStreamDestroy(d_streams[device]));
gpuErrchk(hipEventDestroy(start[device]));
gpuErrchk(hipEventDestroy(stop[device]));
for (abc = 0; abc < ABC; ++abc){
kuhdaFreeM(d_All[device][abc], 'c');
}
// Takes NO arguments
gpuErrchk(hipDeviceReset());
}
// Dis work??
// hipblasShutdown();
return 0;
}
| 14e98b805fe082ac3177d13d0dd0b0b9a6d1ca61.cu | // Program to test the allocation and sending of quarter tiles to erform matrix multiplication on devices 2 and 3
#include <stdio.h>
#include "../DIEKUHDA/kuhda.h"
#include "omp.h"
#define NUMTHREADS 5
// Run with:
// nvcc -O3 lXcompiler -fopenmp -lcublas ../DIEKUHDA/kuhda.c TwoDeviceTileMultiplication.c && ./a.out
// What do we want to test: (in parallel)
// Send d_A1 and d_B1 to device 3 and d_A2 and d_B3 to device 2
// call kuhdamm() to try and compute in parallel
// Results: everything works, see how every element in the quarter tile = n
// Remark: computations can run in parallel, but depends on many factors, like matrix size etc
int main()
{
//omp_set_num_threads(2);
//omp_set_dynamic(0);
//gpuErrchk(cudaSetDevice(0));
unsigned long n = 1000;
int x = n/2; // x * x = dimension of quarter tile
// Containers for host and device matrices
matrix *h_A = kuhdaMallocMP1(n, n); // diagonal A matrix
matrix *h_B = kuhdaMallocMP1(n, n); // diagonal B matrix
matrix *h_C = kuhdaMallocMP(n, n); // empty C matrix
int abc, ABC = 3, device, devicecount = 4;
matrix *d_All[devicecount][ABC];
cudaStream_t d_streams[devicecount], mainstream;
cudaEvent_t start[devicecount], stop[devicecount], mainstart, mainstop;
gpuErrchk(cudaStreamCreate(&mainstream));
gpuErrchk(cudaEventCreate(&mainstart));
gpuErrchk(cudaEventCreate(&mainstop));
float ms_timer[4] = {0.0, 0.0, 0.0, 0.0}, mainstreamtimer;
#pragma omp parallel for private(abc) private(device) num_threads(NUMTHREADS)
// Creat all dependencies:
for (device = 0; device < devicecount; device++){
//printf("Number of threads = %d\n", omp_get_thread_num());
gpuErrchk(cudaSetDevice(device));
printf("Allocating tiles A, B and C on device %d\n", device);
gpuErrchk(cudaStreamCreate(&d_streams[device]));
gpuErrchk(cudaEventCreate(&start[device]));
gpuErrchk(cudaEventCreate(&stop[device]));
for (abc = 0; abc < ABC; ++abc){
d_All[device][abc] = kuhdaMallocDeviceM(x, x);
}
// Send the first quarter tiles of A and B to device 0...
TileHostToGPU(0, x, 0, x, h_A, d_All[device][0], d_streams[device]);
TileHostToGPU(0, x, 0, x, h_B, d_All[device][1], d_streams[device]);
}
gpuErrchk(cudaStreamSynchronize(mainstream));
gpuErrchk(cudaEventRecord(mainstart, mainstream));
int checkCorrectDevice = 0, rep = 0, reps = 500;
#pragma omp parallel for private(device) private(rep) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
gpuErrchk(cudaSetDevice(device));
gpuErrchk(cudaGetDevice(&checkCorrectDevice));
if (checkCorrectDevice != device) printf("Wrong device?");
//gpuErrchk(cudaStreamSynchronize(d_streams[device]));
gpuErrchk(cudaEventRecord(start[device], d_streams[device]));
// Matrix multiplication: damm man that's fast
for (rep = 0; rep < reps; ++rep) kuhdamm(d_All[device][0], d_All[device][1], d_All[device][2], d_streams[device], 0);
//gpuErrchk(cudaStreamSynchronize(d_streams[device]));
gpuErrchk(cudaEventRecord(stop[device], d_streams[device]));
gpuErrchk(cudaEventSynchronize(stop[device]));
gpuErrchk(cudaEventElapsedTime(&ms_timer[device], start[device], stop[device]));
printf("Multiplication on device %d took %lf seconds\n", device, ms_timer[device]/1000);
// ...retrieve it again into C on the host
//TileGPUAddToHost(0, x, 0, x, d_All[device][2], h_C, d_streams[device]);
}
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
gpuErrchk(cudaSetDevice(device));
gpuErrchk(cudaStreamSynchronize(d_streams[device]));
TileGPUAddToHost(0, x, 0, x, d_All[device][2], h_C, d_streams[device]);
}
gpuErrchk(cudaEventRecord(mainstop, mainstream));
gpuErrchk(cudaEventSynchronize(mainstop));
gpuErrchk(cudaEventElapsedTime(&mainstreamtimer, mainstart, mainstop));
int timerindex;
for (timerindex = 1; timerindex < devicecount; timerindex++) ms_timer[0] += ms_timer[timerindex];
printf("Everything: %lf s, multiplication: = %lf s\n", mainstreamtimer/1000, ms_timer[0]/1000);
//kuhdaTestM(0, x, 0, x, h_C);
//kuhdaPrintM(h_C);
//printf("%lf %lf \n%lf %lf \n", h_C->data[(n-1)*x-1], h_C->data[(n-1)*x], h_C->data[n*x-1], h_C->data[n*x]);
// free all matrices
printf("Cleaning up ..\n");
gpuErrchk(cudaStreamDestroy(mainstream));
gpuErrchk(cudaEventDestroy(mainstart));
gpuErrchk(cudaEventDestroy(mainstop));
kuhdaFreeM(h_A, 'p');
kuhdaFreeM(h_B, 'p');
kuhdaFreeM(h_C, 'p');
for (device = 2; device < devicecount; device++){
gpuErrchk(cudaSetDevice(device));
gpuErrchk(cudaStreamDestroy(d_streams[device]));
gpuErrchk(cudaEventDestroy(start[device]));
gpuErrchk(cudaEventDestroy(stop[device]));
for (abc = 0; abc < ABC; ++abc){
kuhdaFreeM(d_All[device][abc], 'c');
}
// Takes NO arguments
gpuErrchk(cudaDeviceReset());
}
// Dis work??
// cublasShutdown();
return 0;
}
|
8df61d138534a95647039d77bd6e02c7c888c896.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "cuda_knn.h"
#define SHARED_SIZE 3060
#define TRAIN_SIZE 100
#define TEST_SIZE 10
/*
ComputeDistance: Compute the distances between test instances and training instances.
Save the distance in device_distances
*/
__global__ void kernelComputeDistance(double *trainAttr, double *testAttr,
double* device_distances, int trainSize, int testSize, int attrSize){
//48KB shared array
//Warp size: 32. the Number of instances should fit the warp size.
__shared__ double trainData[SHARED_SIZE];//Number of attributes X Number of Train instances in this batch
__shared__ double testData[SHARED_SIZE];//Number of attributes X Number of Test instances in this batch
int trainIdx = threadIdx.x;
int testIdx = threadIdx.y;
int trainOffset = blockDim.x * blockIdx.x;
int testOffset = blockDim.y * blockIdx.y;
trainIdx += trainOffset;
testOffset += testOffset;
//Each thread compute a distance of x to y.
//Read train data
//Threads that need the same train instance will read it together
for(int i = 0;i < attrSize;i += blockDim.y){
int tmpIdx = i + threadIdx.y;
if(tmpIdx < attrSize){
trainData[threadIdx.x * attrSize + threadIdx.y + i * blockDim.y] =
trainAttr[trainIdx * attrSize + threadIdx.y + i * blockDim.y];
testData[threadIdx.y * attrSize + threadIdx.y + i * blockDim.y] =
testAttr[testIdx * attrSize + threadIdx.y + i * blockDim.y];
}
}
double distance = 0.f;
//Compute distances
for(int i = 0;i < attrSize;i++){
distance += trainData[threadIdx.x]
}
}
int *cuPredict(double *trainAttr, int* trainLabels, int trainSize, int trainLength,
double *testAttr, int testSize, int testLength, int attrSize){
double *device_trainAttr, device_testAttr, device_trainLabels, device_distances;
hipMalloc((void **)&device_trainAttr, sizeof(double) * trainSize);
hipMalloc((void **)&device_trainLabels, sizeof(int) * trainLength);
hipMalloc((void **)&device_testAttr, sizeof(double) * testSize);
hipMalloc((void **)&device_distances, sizeof(double) * testLength * trainLength);
hipMemcpy(device_trainAttr, trainAttr, sizeof(double) * trainSize, hipMemcpyHostToDevice);
hipMemcpy(device_trainLabels, trainLabels, sizeof(int) * trainLength, hipMemcpyHostToDevice);
hipMemcpy(device_testAttr, testAttr, sizeof(double) * testSize, hipMemcpyHostToDevice);
hipFree(device_trainAttr);
hipFree(device_testAttr);
hipFree(device_trainLabels);
//Get distance
//Sort distance
//find nearest neighbor
//return labels
} | 8df61d138534a95647039d77bd6e02c7c888c896.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "cuda_knn.h"
#define SHARED_SIZE 3060
#define TRAIN_SIZE 100
#define TEST_SIZE 10
/*
ComputeDistance: Compute the distances between test instances and training instances.
Save the distance in device_distances
*/
__global__ void kernelComputeDistance(double *trainAttr, double *testAttr,
double* device_distances, int trainSize, int testSize, int attrSize){
//48KB shared array
//Warp size: 32. the Number of instances should fit the warp size.
__shared__ double trainData[SHARED_SIZE];//Number of attributes X Number of Train instances in this batch
__shared__ double testData[SHARED_SIZE];//Number of attributes X Number of Test instances in this batch
int trainIdx = threadIdx.x;
int testIdx = threadIdx.y;
int trainOffset = blockDim.x * blockIdx.x;
int testOffset = blockDim.y * blockIdx.y;
trainIdx += trainOffset;
testOffset += testOffset;
//Each thread compute a distance of x to y.
//Read train data
//Threads that need the same train instance will read it together
for(int i = 0;i < attrSize;i += blockDim.y){
int tmpIdx = i + threadIdx.y;
if(tmpIdx < attrSize){
trainData[threadIdx.x * attrSize + threadIdx.y + i * blockDim.y] =
trainAttr[trainIdx * attrSize + threadIdx.y + i * blockDim.y];
testData[threadIdx.y * attrSize + threadIdx.y + i * blockDim.y] =
testAttr[testIdx * attrSize + threadIdx.y + i * blockDim.y];
}
}
double distance = 0.f;
//Compute distances
for(int i = 0;i < attrSize;i++){
distance += trainData[threadIdx.x]
}
}
int *cuPredict(double *trainAttr, int* trainLabels, int trainSize, int trainLength,
double *testAttr, int testSize, int testLength, int attrSize){
double *device_trainAttr, device_testAttr, device_trainLabels, device_distances;
cudaMalloc((void **)&device_trainAttr, sizeof(double) * trainSize);
cudaMalloc((void **)&device_trainLabels, sizeof(int) * trainLength);
cudaMalloc((void **)&device_testAttr, sizeof(double) * testSize);
cudaMalloc((void **)&device_distances, sizeof(double) * testLength * trainLength);
cudaMemcpy(device_trainAttr, trainAttr, sizeof(double) * trainSize, cudaMemcpyHostToDevice);
cudaMemcpy(device_trainLabels, trainLabels, sizeof(int) * trainLength, cudaMemcpyHostToDevice);
cudaMemcpy(device_testAttr, testAttr, sizeof(double) * testSize, cudaMemcpyHostToDevice);
cudaFree(device_trainAttr);
cudaFree(device_testAttr);
cudaFree(device_trainLabels);
//Get distance
//Sort distance
//find nearest neighbor
//return labels
} |
c394da296bdfceb6479d63f0946a7c5ad5120883.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cstdlib>
#include <string>
#include "hipcub/hipcub.hpp"
#include "cnmem.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CHECK_EQ(error, CNMEM_STATUS_SUCCESS) << cnmemGetErrorString(error); \
} while (0)
DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
DEFINE_int32(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
DEFINE_int32(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
DEFINE_int32(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
bool g_memory_allocation_already_called = false;
// For cnmem allocator
vector<bool> g_cnmem_available_for_device(NumCudaDevices(), false);
// For cub allocator
unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
void* CUDAContext::New(size_t nbytes) {
g_memory_allocation_already_called = true;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_CHECK(hipMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM:
CAFFE_ENFORCE(
g_cnmem_available_for_device[GetCurrentGPUID()],
"Trying to allocate on device ", GetCurrentGPUID(),
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
return ptr;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM:
CNMEM_CHECK(cnmemFree(ptr, nullptr));
break;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceFree(ptr));
break;
}
}
static void SetUpCNMEM() {
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_CHECK(hipMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
// Global initializtion function to set up the cuda memory pool during
// construction time.
bool Caffe2SetCUDAMemoryPool(int*, char***) {
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't set up cuda memory pool";
return true;
}
if (g_memory_allocation_already_called) {
LOG(ERROR) << "Caffe2SetCUDAMemoryPool should always be called before "
"any CUDAContext::New() calls are made.";
return false;
}
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
return true;
}
LOG(ERROR) << "Unrecognized cuda memory pool type: "
<< FLAGS_caffe2_cuda_memory_pool;
return false;
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
bool Caffe2UsePinnedCPUAllocator(int*, char***) {
#ifdef __SANITIZE_ADDRESS__
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
return true;
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return true;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
return true;
#endif
}
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2SetCUDAMemoryPool,
&Caffe2SetCUDAMemoryPool,
"Sets up the cuda memory pool.");
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2UsePinnedCPUAllocator,
&Caffe2UsePinnedCPUAllocator,
"Make the CPU side use pinned memory.");
} // namespace caffe2
| c394da296bdfceb6479d63f0946a7c5ad5120883.cu | #include <algorithm>
#include <cstdlib>
#include <string>
#include "cub/util_allocator.cuh"
#include "cnmem.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CHECK_EQ(error, CNMEM_STATUS_SUCCESS) << cnmemGetErrorString(error); \
} while (0)
DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
DEFINE_int32(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
DEFINE_int32(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
DEFINE_int32(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
bool g_memory_allocation_already_called = false;
// For cnmem allocator
vector<bool> g_cnmem_available_for_device(NumCudaDevices(), false);
// For cub allocator
unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
void* CUDAContext::New(size_t nbytes) {
g_memory_allocation_already_called = true;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_CHECK(cudaMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM:
CAFFE_ENFORCE(
g_cnmem_available_for_device[GetCurrentGPUID()],
"Trying to allocate on device ", GetCurrentGPUID(),
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
return ptr;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM:
CNMEM_CHECK(cnmemFree(ptr, nullptr));
break;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceFree(ptr));
break;
}
}
static void SetUpCNMEM() {
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_CHECK(cudaMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
// Global initializtion function to set up the cuda memory pool during
// construction time.
bool Caffe2SetCUDAMemoryPool(int*, char***) {
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't set up cuda memory pool";
return true;
}
if (g_memory_allocation_already_called) {
LOG(ERROR) << "Caffe2SetCUDAMemoryPool should always be called before "
"any CUDAContext::New() calls are made.";
return false;
}
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
return true;
}
LOG(ERROR) << "Unrecognized cuda memory pool type: "
<< FLAGS_caffe2_cuda_memory_pool;
return false;
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
bool Caffe2UsePinnedCPUAllocator(int*, char***) {
#ifdef __SANITIZE_ADDRESS__
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
return true;
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return true;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
return true;
#endif
}
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2SetCUDAMemoryPool,
&Caffe2SetCUDAMemoryPool,
"Sets up the cuda memory pool.");
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2UsePinnedCPUAllocator,
&Caffe2UsePinnedCPUAllocator,
"Make the CPU side use pinned memory.");
} // namespace caffe2
|
5457b1ea554140cb1ecb8a3515a86f292805032d.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHStorage.h"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
void THCudaStorage_fill(THCState *state, THCudaStorage *self, float value)
{
thrust::device_ptr<float> self_data(self->data);
thrust::fill(self_data, self_data+self->size, value);
}
void THCudaStorage_resize(THCState *state, THCudaStorage *self, long size)
{
THArgCheck(size >= 0, 2, "invalid size");
if(!(self->flag & TH_STORAGE_RESIZABLE))
return;
if(size == 0)
{
if(self->flag & TH_STORAGE_FREEMEM)
THCudaCheck(hipFree(self->data));
self->data = NULL;
self->size = 0;
}
else
{
float *data;
THCudaCheck(hipMalloc((void**)(&data), size * sizeof(float)));
THCudaCheck(hipMemcpyAsync(data, self->data, THMin(self->size, size) * sizeof(float), hipMemcpyDeviceToDevice));
THCudaCheck(hipFree(self->data));
self->data = data;
self->size = size;
}
}
| 5457b1ea554140cb1ecb8a3515a86f292805032d.cu | #include "THCStorage.h"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
void THCudaStorage_fill(THCState *state, THCudaStorage *self, float value)
{
thrust::device_ptr<float> self_data(self->data);
thrust::fill(self_data, self_data+self->size, value);
}
void THCudaStorage_resize(THCState *state, THCudaStorage *self, long size)
{
THArgCheck(size >= 0, 2, "invalid size");
if(!(self->flag & TH_STORAGE_RESIZABLE))
return;
if(size == 0)
{
if(self->flag & TH_STORAGE_FREEMEM)
THCudaCheck(cudaFree(self->data));
self->data = NULL;
self->size = 0;
}
else
{
float *data;
THCudaCheck(cudaMalloc((void**)(&data), size * sizeof(float)));
THCudaCheck(cudaMemcpyAsync(data, self->data, THMin(self->size, size) * sizeof(float), cudaMemcpyDeviceToDevice));
THCudaCheck(cudaFree(self->data));
self->data = data;
self->size = size;
}
}
|
3262ca37e9daada6865511a5b07245752678da68.hip | // !!! This is a file automatically generated by hipify!!!
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "cuda_utils.cuh"
#include "functions/sigmoid.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct SigmoidInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) {
return os;
}
template <typename T>
class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam();
int len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data, len);
T data_h[params.len] = {2.1, -4.5, -0.34, 10.0};
updateDevice(data, data_h, len, stream);
allocate(result, len);
allocate(result_ref, len);
T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948,
0.9999546};
updateDevice(result_ref, result_ref_h, len, stream);
sigmoid(result, data, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(result));
CUDA_CHECK(hipFree(result_ref));
}
protected:
SigmoidInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}};
const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}};
typedef SigmoidTest<float> SigmoidTestValF;
TEST_P(SigmoidTestValF, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SigmoidTest<double> SigmoidTestValD;
TEST_P(SigmoidTestValD, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
| 3262ca37e9daada6865511a5b07245752678da68.cu | #include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "cuda_utils.cuh"
#include "functions/sigmoid.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct SigmoidInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) {
return os;
}
template <typename T>
class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam();
int len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data, len);
T data_h[params.len] = {2.1, -4.5, -0.34, 10.0};
updateDevice(data, data_h, len, stream);
allocate(result, len);
allocate(result_ref, len);
T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948,
0.9999546};
updateDevice(result_ref, result_ref_h, len, stream);
sigmoid(result, data, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(result));
CUDA_CHECK(cudaFree(result_ref));
}
protected:
SigmoidInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}};
const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}};
typedef SigmoidTest<float> SigmoidTestValF;
TEST_P(SigmoidTestValF, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SigmoidTest<double> SigmoidTestValD;
TEST_P(SigmoidTestValD, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
|
8ba2151f420ac1b83f672015908d691b48759b62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__device__ int clamp(int pos, int maxpos) {
pos = pos > 0 ? pos : 0;
pos = pos < (maxpos - 1) ? pos : (maxpos - 1);
return pos;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
float res = 0.f;
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r) {
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c) {
res += inputChannel[clamp(thread_2D_pos.y + r, numRows) * numCols + clamp(thread_2D_pos.x + c, numCols)]
*filter[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2];
}
}
outputChannel[thread_1D_pos] = res;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 origin = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = origin.x;
greenChannel[thread_1D_pos] = origin.y;
blueChannel[thread_1D_pos] = origin.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize((numCols - 1) / 2 + 1, (numRows - 1) / 512 + 1, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(2, 512, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 8ba2151f420ac1b83f672015908d691b48759b62.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__device__ int clamp(int pos, int maxpos) {
pos = pos > 0 ? pos : 0;
pos = pos < (maxpos - 1) ? pos : (maxpos - 1);
return pos;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
float res = 0.f;
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r) {
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c) {
res += inputChannel[clamp(thread_2D_pos.y + r, numRows) * numCols + clamp(thread_2D_pos.x + c, numCols)]
*filter[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2];
}
}
outputChannel[thread_1D_pos] = res;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 origin = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = origin.x;
greenChannel[thread_1D_pos] = origin.y;
blueChannel[thread_1D_pos] = origin.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize((numCols - 1) / 2 + 1, (numRows - 1) / 512 + 1, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(2, 512, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
338d985a22c0a8b40f2fc99ff68e2482fcdb76b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmgeelltmv.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
smgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y)
{
extern __shared__ float dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
float val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( float ); // num_vecs vectors
hipLaunchKernelGGL(( smgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0,
m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| 338d985a22c0a8b40f2fc99ff68e2482fcdb76b9.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmgeelltmv.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
smgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y)
{
extern __shared__ float dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
float val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( float ); // num_vecs vectors
smgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>>
( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
148bc4d0c9432c67ce6bdbea17a19644403ed36d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#define G_1 1000000000
__global__ void
vectorAdd(int *A, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
A[i] *= A[i];
}
}
int cmpfunc (const void *a, const void *b)
{
double a_d = *(double *) a;
double b_d = *(double *) b;
if(a_d > b_d) {
return 1;
} else if(a_d < b_d) {
return -1;
} else {
return 0;
}
}
void dummy_run(int *h_A, int *d_A, int num_pkts)
{
int err = hipSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
err = hipMemcpy(d_A, h_A, num_pkts * sizeof(int), hipMemcpyHostToDevice);
CPE(err != hipSuccess, "H2D memcpy failed\n");
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Kernel launch failed\n");
err = hipMemcpy(h_A, d_A, num_pkts * sizeof(int), hipMemcpyDeviceToHost);
CPE(err != hipSuccess, "D2H memcpy failed\n");
}
void gpu_run(int *h_A, int *d_A, int num_pkts)
{
int err = hipSuccess;
struct timespec h2d_start[ITERS], h2d_end[ITERS];
struct timespec kernel_start[ITERS], kernel_end[ITERS];
struct timespec d2h_start[ITERS], d2h_end[ITERS];
/** < Microseconds */
double h2d_diff[ITERS], kernel_diff[ITERS], d2h_diff[ITERS];
double h2d_tot = 0, kernel_tot = 0, d2h_tot = 0;
int i, j;
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
/** < Do a dummy run for warmup */
dummy_run(h_A, d_A, num_pkts);
/** < Run several iterations */
for(i = 0; i < ITERS; i ++) {
for(j = 0; j < num_pkts; j++) {
h_A[j] = i;
}
/** < Host-to-device memcpy */
clock_gettime(CLOCK_REALTIME, &h2d_start[i]);
err = hipMemcpy(d_A, h_A, num_pkts * sizeof(int), hipMemcpyHostToDevice);
CPE(err != hipSuccess, "H2D memcpy failed\n");
clock_gettime(CLOCK_REALTIME, &h2d_end[i]);
/** < Kernel launch */
clock_gettime(CLOCK_REALTIME, &kernel_start[i]);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Kernel launch failed\n");
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &kernel_end[i]);
/** < Device-to-host memcpy */
clock_gettime(CLOCK_REALTIME, &d2h_start[i]);
err = hipMemcpy(h_A, d_A, num_pkts * sizeof(int), hipMemcpyDeviceToHost);
CPE(err != hipSuccess, "D2H memcpy failed\n");
clock_gettime(CLOCK_REALTIME, &d2h_end[i]);
/** < Measure the difference */
h2d_diff[i] =
(double) (h2d_end[i].tv_nsec - h2d_start[i].tv_nsec) / 1000 +
(h2d_end[i].tv_sec - h2d_start[i].tv_sec) * 1000000;
kernel_diff[i] =
(double) (kernel_end[i].tv_nsec - kernel_start[i].tv_nsec) / 1000 +
(kernel_end[i].tv_sec - kernel_start[i].tv_sec) * 1000000;
d2h_diff[i] =
(double) (d2h_end[i].tv_nsec - d2h_start[i].tv_nsec) / 1000 +
(d2h_end[i].tv_sec - d2h_start[i].tv_sec) * 1000000;
printf("ITER %d: h2d: %f us, kernel: %f us, d2h us: %f\n", i,
h2d_diff[i], kernel_diff[i], d2h_diff[i]);
h2d_tot += h2d_diff[i];
kernel_tot += kernel_diff[i];
d2h_tot += d2h_diff[i];
}
/** < Sort the times for percentiles */
qsort(h2d_diff, ITERS, sizeof(double), cmpfunc);
qsort(kernel_diff, ITERS, sizeof(double), cmpfunc);
qsort(d2h_diff, ITERS, sizeof(double), cmpfunc);
int i_5 = (ITERS * 5) / 100;
int i_95 = (ITERS * 95) / 100;
red_printf("H2D average %.2f us 5th %.2f us 95th %.2f\n",
h2d_tot / ITERS, h2d_diff[i_5], h2d_diff[i_95]);
red_printf("Kernel average %.2f us 5th %.2f us 95th %.2f\n",
kernel_tot / ITERS, kernel_diff[i_5], kernel_diff[i_95]);
red_printf("D2H average %.2f us 5th %.2f us 95th %.2f\n",
d2h_tot / ITERS, d2h_diff[i_5], d2h_diff[i_95]);
red_printf("TOT average %.2f us 5th %.2f us 95th %.2f\n",
(h2d_tot + kernel_tot + d2h_tot) / ITERS,
(h2d_diff[i_5] + kernel_diff[i_5] + d2h_diff[i_5]),
(d2h_diff[i_95] + kernel_diff[i_95] + d2h_diff[i_95]));
}
int main(int argc, char *argv[])
{
int err = hipSuccess;
int *h_A, *d_A;
assert(argc == 2);
int num_pkts = atoi(argv[1]);
printDeviceProperties();
/** < Allocate host and device buffers */
h_A = (int *) malloc(num_pkts * sizeof(int));
err = hipMalloc((void **) &d_A, num_pkts * sizeof(int));
CPE(err != hipSuccess, "Failed to hipMalloc\n");
if (h_A == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
/** < Run the measurement code */
gpu_run(h_A, d_A, num_pkts);
/** < Free host and device memory */
free(h_A);
hipFree(d_A);
// Reset the device and exit
err = hipDeviceReset();
CPE(err != hipSuccess, "Failed to de-initialize the device\n");
return 0;
}
| 148bc4d0c9432c67ce6bdbea17a19644403ed36d.cu | #include "common.h"
#define G_1 1000000000
__global__ void
vectorAdd(int *A, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
A[i] *= A[i];
}
}
int cmpfunc (const void *a, const void *b)
{
double a_d = *(double *) a;
double b_d = *(double *) b;
if(a_d > b_d) {
return 1;
} else if(a_d < b_d) {
return -1;
} else {
return 0;
}
}
void dummy_run(int *h_A, int *d_A, int num_pkts)
{
int err = cudaSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
err = cudaMemcpy(d_A, h_A, num_pkts * sizeof(int), cudaMemcpyHostToDevice);
CPE(err != cudaSuccess, "H2D memcpy failed\n");
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Kernel launch failed\n");
err = cudaMemcpy(h_A, d_A, num_pkts * sizeof(int), cudaMemcpyDeviceToHost);
CPE(err != cudaSuccess, "D2H memcpy failed\n");
}
void gpu_run(int *h_A, int *d_A, int num_pkts)
{
int err = cudaSuccess;
struct timespec h2d_start[ITERS], h2d_end[ITERS];
struct timespec kernel_start[ITERS], kernel_end[ITERS];
struct timespec d2h_start[ITERS], d2h_end[ITERS];
/** < Microseconds */
double h2d_diff[ITERS], kernel_diff[ITERS], d2h_diff[ITERS];
double h2d_tot = 0, kernel_tot = 0, d2h_tot = 0;
int i, j;
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
/** < Do a dummy run for warmup */
dummy_run(h_A, d_A, num_pkts);
/** < Run several iterations */
for(i = 0; i < ITERS; i ++) {
for(j = 0; j < num_pkts; j++) {
h_A[j] = i;
}
/** < Host-to-device memcpy */
clock_gettime(CLOCK_REALTIME, &h2d_start[i]);
err = cudaMemcpy(d_A, h_A, num_pkts * sizeof(int), cudaMemcpyHostToDevice);
CPE(err != cudaSuccess, "H2D memcpy failed\n");
clock_gettime(CLOCK_REALTIME, &h2d_end[i]);
/** < Kernel launch */
clock_gettime(CLOCK_REALTIME, &kernel_start[i]);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Kernel launch failed\n");
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &kernel_end[i]);
/** < Device-to-host memcpy */
clock_gettime(CLOCK_REALTIME, &d2h_start[i]);
err = cudaMemcpy(h_A, d_A, num_pkts * sizeof(int), cudaMemcpyDeviceToHost);
CPE(err != cudaSuccess, "D2H memcpy failed\n");
clock_gettime(CLOCK_REALTIME, &d2h_end[i]);
/** < Measure the difference */
h2d_diff[i] =
(double) (h2d_end[i].tv_nsec - h2d_start[i].tv_nsec) / 1000 +
(h2d_end[i].tv_sec - h2d_start[i].tv_sec) * 1000000;
kernel_diff[i] =
(double) (kernel_end[i].tv_nsec - kernel_start[i].tv_nsec) / 1000 +
(kernel_end[i].tv_sec - kernel_start[i].tv_sec) * 1000000;
d2h_diff[i] =
(double) (d2h_end[i].tv_nsec - d2h_start[i].tv_nsec) / 1000 +
(d2h_end[i].tv_sec - d2h_start[i].tv_sec) * 1000000;
printf("ITER %d: h2d: %f us, kernel: %f us, d2h us: %f\n", i,
h2d_diff[i], kernel_diff[i], d2h_diff[i]);
h2d_tot += h2d_diff[i];
kernel_tot += kernel_diff[i];
d2h_tot += d2h_diff[i];
}
/** < Sort the times for percentiles */
qsort(h2d_diff, ITERS, sizeof(double), cmpfunc);
qsort(kernel_diff, ITERS, sizeof(double), cmpfunc);
qsort(d2h_diff, ITERS, sizeof(double), cmpfunc);
int i_5 = (ITERS * 5) / 100;
int i_95 = (ITERS * 95) / 100;
red_printf("H2D average %.2f us 5th %.2f us 95th %.2f\n",
h2d_tot / ITERS, h2d_diff[i_5], h2d_diff[i_95]);
red_printf("Kernel average %.2f us 5th %.2f us 95th %.2f\n",
kernel_tot / ITERS, kernel_diff[i_5], kernel_diff[i_95]);
red_printf("D2H average %.2f us 5th %.2f us 95th %.2f\n",
d2h_tot / ITERS, d2h_diff[i_5], d2h_diff[i_95]);
red_printf("TOT average %.2f us 5th %.2f us 95th %.2f\n",
(h2d_tot + kernel_tot + d2h_tot) / ITERS,
(h2d_diff[i_5] + kernel_diff[i_5] + d2h_diff[i_5]),
(d2h_diff[i_95] + kernel_diff[i_95] + d2h_diff[i_95]));
}
int main(int argc, char *argv[])
{
int err = cudaSuccess;
int *h_A, *d_A;
assert(argc == 2);
int num_pkts = atoi(argv[1]);
printDeviceProperties();
/** < Allocate host and device buffers */
h_A = (int *) malloc(num_pkts * sizeof(int));
err = cudaMalloc((void **) &d_A, num_pkts * sizeof(int));
CPE(err != cudaSuccess, "Failed to cudaMalloc\n");
if (h_A == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
/** < Run the measurement code */
gpu_run(h_A, d_A, num_pkts);
/** < Free host and device memory */
free(h_A);
cudaFree(d_A);
// Reset the device and exit
err = cudaDeviceReset();
CPE(err != cudaSuccess, "Failed to de-initialize the device\n");
return 0;
}
|
8437f8667fc6519db380898e2f11809083167d46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated ds Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define PRECISION_d
#define blksize 64
// TODO get rid of global variable!
static __device__ int flag = 0;
__global__ void
magmaint_dlag2s( int M, int N,
const double *A, int lda,
float *SA, int ldsa,
double RMAX )
{
const double *Aend = A + lda*N;
double tmp;
double mRMAX = - RMAX;
int mym = blockIdx.x * blksize + threadIdx.x;
if ( mym < M ){
A += mym;
SA+= mym;
tmp = *A;
for ( ; A < Aend; )
{
A += lda;
if( ((tmp) < mRMAX) || ((tmp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < mRMAX) || ((tmp) > RMAX)
#endif
)
{
flag = 1;
}
*SA = (float)( tmp );
tmp = *A;
SA += ldsa;
}
}
}
extern "C" void
magmablas_dlag2s( magma_int_t M, magma_int_t N ,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
Note
====
- We have to provide INFO at the end that dlag2s isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
Purpose
=======
DLAG2S converts a DOUBLE PRECISION matrix A to a SINGLE PRECISION
matrix SA.
RMAX is the overflow for the SINGLE PRECISION arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
=========
M (input) INTEGER
The number of lines of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input) DOUBLE PRECISION array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
SA (output) SINGLE PRECISION array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
LDSA (input) INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
INFO (output) INTEGER
= 0: successful exit.
< 0: if INFO = -i, the i-th argument had an illegal value
= 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA in exit is unspecified.
===================================================================== */
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
double RMAX = (double)lapackf77_slamch("O");
dim3 threads( blksize, 1, 1 );
dim3 grid( (M+blksize-1)/blksize, 1, 1);
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( magmaint_dlag2s), dim3(grid), dim3(threads), 0, magma_stream , M, N, A, lda, SA, ldsa, RMAX ) ;
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| 8437f8667fc6519db380898e2f11809083167d46.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated ds Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define PRECISION_d
#define blksize 64
// TODO get rid of global variable!
static __device__ int flag = 0;
__global__ void
magmaint_dlag2s( int M, int N,
const double *A, int lda,
float *SA, int ldsa,
double RMAX )
{
const double *Aend = A + lda*N;
double tmp;
double mRMAX = - RMAX;
int mym = blockIdx.x * blksize + threadIdx.x;
if ( mym < M ){
A += mym;
SA+= mym;
tmp = *A;
for ( ; A < Aend; )
{
A += lda;
if( ((tmp) < mRMAX) || ((tmp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < mRMAX) || ((tmp) > RMAX)
#endif
)
{
flag = 1;
}
*SA = (float)( tmp );
tmp = *A;
SA += ldsa;
}
}
}
extern "C" void
magmablas_dlag2s( magma_int_t M, magma_int_t N ,
const double *A, magma_int_t lda,
float *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
Note
====
- We have to provide INFO at the end that dlag2s isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
Purpose
=======
DLAG2S converts a DOUBLE PRECISION matrix A to a SINGLE PRECISION
matrix SA.
RMAX is the overflow for the SINGLE PRECISION arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
=========
M (input) INTEGER
The number of lines of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input) DOUBLE PRECISION array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
SA (output) SINGLE PRECISION array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
LDSA (input) INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
INFO (output) INTEGER
= 0: successful exit.
< 0: if INFO = -i, the i-th argument had an illegal value
= 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA in exit is unspecified.
===================================================================== */
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
double RMAX = (double)lapackf77_slamch("O");
dim3 threads( blksize, 1, 1 );
dim3 grid( (M+blksize-1)/blksize, 1, 1);
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
magmaint_dlag2s<<< grid, threads, 0, magma_stream >>>( M, N, A, lda, SA, ldsa, RMAX ) ;
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
7297e9e238ab0b72567afc575241950d72b075f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
//--------------------------------------------------------------------
//A : m x l
//B : l x n
//C : m x n (C=A*B)
//--------------------------------------------------------------------
void host_mm(float* C, float* A, float* B, int m, int n, int l){
for(int i=0; i<m; i++)
for(int j=0; j<n; j++)
{
float s=0;
for (int k=0; k<l; k++)
{
float a = A[i*l + k];
float b = B[k*n + j];
s += a * b;
}
C[i*n + j] = s;
}
}
//--------------------------------------------------------------------
__global__ void gpu_mm(float* C, float* A, float* B, int m, int n, int l){
//// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < l; ++k)
{
float Aelement = A[ty * l + k];
float Belement = B[k * n + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
C[ty * n + tx] = Pvalue;
}
//----------------------------------------------
double diff(float* a, float* b, int n){
double s=0, r=0;
for(int k=0; k<n; k++)
{
double w=a[k]-b[k];
s+=w*w;
r+=a[k]*a[k];
}
return sqrt(s/r);
}
void random(float* a, int n){
for(int k=0; k<n; k++){
a[k]=(float)rand()/RAND_MAX*2-1;
}
}
//----------------------------------------------
void testMatrix(int m, int n, int l)
{
//initialize
float *a = (float*)malloc(sizeof(float)*m*l);
float *b = (float*)malloc(sizeof(float)*l*n);
float *c1 = (float*)malloc(sizeof(float)*m*n);
float *c2 = (float*)malloc(sizeof(float)*m*n);
srand(time(0));
random(a,m*l);
random(b,l*n);
memset(c1, 0, sizeof(float)*m*n);
memset(c2, 0, sizeof(float)*m*n);
float *ga,*gb,*gc;
hipMalloc((void**)&ga, m*l*sizeof(float));
hipMalloc((void**)&gb, l*n*sizeof(float));
hipMalloc((void**)&gc, m*n*sizeof(float));
hipMemcpy(ga, a, m*l*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gb, b, l*n*sizeof(float), hipMemcpyHostToDevice);
hipMemset(gc, 0, m*n*sizeof(float));
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//SBMT(Single Block, Multiple Threads)
hipLaunchKernelGGL(( gpu_mm), dim3(dim3(1,1,1)), dim3(dim3(m, n, 1)), 0, 0, gc,ga,gb,m,n,l);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(c2, gc, m*n*sizeof(float), hipMemcpyDeviceToHost);
double c_start,c_stop;
double CPU_execution_time;
c_start = (double)clock();
host_mm(c1, a, b, m, n, l);
c_stop = (double)clock();
CPU_execution_time = (c_stop - c_start)/(double)CLOCKS_PER_SEC;
//check precision
double err=diff(c1,c2,m*n);
printf("err = %g\n", err);
printf(" ======== Execution Infomation ========\n");
printf(" Excuetion Time on GPU: %3.20f s\n",elapsedTime/1000);
printf(" Excuetion Time on CPU: %3.20f s\n",CPU_execution_time);
printf(" Speed up = %f\n",(CPU_execution_time/(elapsedTime/1000)));
printf(" ======================================\n\n");
free(a);
free(b);
free(c1);
free(c2);
hipFree(ga);
hipFree(gb);
hipFree(gc);
}
//----------------------------------------------
int main()
{
int m=32;
int n=32;
int l=32;
testMatrix(m,n,l);
return 0;
} | 7297e9e238ab0b72567afc575241950d72b075f9.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
//--------------------------------------------------------------------
//A : m x l
//B : l x n
//C : m x n (C=A*B)
//--------------------------------------------------------------------
void host_mm(float* C, float* A, float* B, int m, int n, int l){
for(int i=0; i<m; i++)
for(int j=0; j<n; j++)
{
float s=0;
for (int k=0; k<l; k++)
{
float a = A[i*l + k];
float b = B[k*n + j];
s += a * b;
}
C[i*n + j] = s;
}
}
//--------------------------------------------------------------------
__global__ void gpu_mm(float* C, float* A, float* B, int m, int n, int l){
//// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < l; ++k)
{
float Aelement = A[ty * l + k];
float Belement = B[k * n + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
C[ty * n + tx] = Pvalue;
}
//----------------------------------------------
double diff(float* a, float* b, int n){
double s=0, r=0;
for(int k=0; k<n; k++)
{
double w=a[k]-b[k];
s+=w*w;
r+=a[k]*a[k];
}
return sqrt(s/r);
}
void random(float* a, int n){
for(int k=0; k<n; k++){
a[k]=(float)rand()/RAND_MAX*2-1;
}
}
//----------------------------------------------
void testMatrix(int m, int n, int l)
{
//initialize
float *a = (float*)malloc(sizeof(float)*m*l);
float *b = (float*)malloc(sizeof(float)*l*n);
float *c1 = (float*)malloc(sizeof(float)*m*n);
float *c2 = (float*)malloc(sizeof(float)*m*n);
srand(time(0));
random(a,m*l);
random(b,l*n);
memset(c1, 0, sizeof(float)*m*n);
memset(c2, 0, sizeof(float)*m*n);
float *ga,*gb,*gc;
cudaMalloc((void**)&ga, m*l*sizeof(float));
cudaMalloc((void**)&gb, l*n*sizeof(float));
cudaMalloc((void**)&gc, m*n*sizeof(float));
cudaMemcpy(ga, a, m*l*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gb, b, l*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(gc, 0, m*n*sizeof(float));
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//SBMT(Single Block, Multiple Threads)
gpu_mm<<<dim3(1,1,1), dim3(m, n, 1)>>> (gc,ga,gb,m,n,l);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c2, gc, m*n*sizeof(float), cudaMemcpyDeviceToHost);
double c_start,c_stop;
double CPU_execution_time;
c_start = (double)clock();
host_mm(c1, a, b, m, n, l);
c_stop = (double)clock();
CPU_execution_time = (c_stop - c_start)/(double)CLOCKS_PER_SEC;
//check precision
double err=diff(c1,c2,m*n);
printf("err = %g\n", err);
printf(" ======== Execution Infomation ========\n");
printf(" Excuetion Time on GPU: %3.20f s\n",elapsedTime/1000);
printf(" Excuetion Time on CPU: %3.20f s\n",CPU_execution_time);
printf(" Speed up = %f\n",(CPU_execution_time/(elapsedTime/1000)));
printf(" ======================================\n\n");
free(a);
free(b);
free(c1);
free(c2);
cudaFree(ga);
cudaFree(gb);
cudaFree(gc);
}
//----------------------------------------------
int main()
{
int m=32;
int n=32;
int l=32;
testMatrix(m,n,l);
return 0;
} |
c0ab94d3a2ac039b2455c54535f5df79b8fb62ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ceil_div.h>
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/Resize.h>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
void triu_tril_cuda_template(const Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
TORCH_IMPL_FUNC(tril_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<false>(result, self, k, "tril");
}
}
TORCH_IMPL_FUNC(triu_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<true>(result, self, k, "triu");
}
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU(__func__, {result_arg, self_arg});
checkSameType(__func__, result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = ::min(self.size(0), self.size(1) - dimension);
} else {
sz = ::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_from_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_to_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
| c0ab94d3a2ac039b2455c54535f5df79b8fb62ab.cu | #include <ATen/ceil_div.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/Resize.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
void triu_tril_cuda_template(const Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
triu_tril_kernel<scalar_t, int32_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
triu_tril_kernel<scalar_t, int64_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
TORCH_IMPL_FUNC(tril_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<false>(result, self, k, "tril");
}
}
TORCH_IMPL_FUNC(triu_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<true>(result, self, k, "triu");
}
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU(__func__, {result_arg, self_arg});
checkSameType(__func__, result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = std::min(self.size(0), self.size(1) - dimension);
} else {
sz = std::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(std::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
std::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
copy_from_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(std::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
std::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
copy_to_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
|
2245305a81b478822a94af879400c10a00df4f4b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include "CompareTextRegions.h"
namespace cg = cooperative_groups;
template<typename T>
__device__ __forceinline__ void write_pixel_gs(T* grayscale, std::int32_t batch, std::int32_t x, std::int32_t y, T val, std::int32_t width, std::int32_t height)
{
*(grayscale + width * height * batch + width * y + x) = val;
}
template<typename T>
__device__ __forceinline__ void write_pixel(T* rgb, std::int32_t batch, std::int32_t x, std::int32_t y, T r, T g, T b, std::int32_t width, std::int32_t height)
{
*(rgb + width * height * 3 * batch + width * height * 0 + width * y + x) = r;
*(rgb + width * height * 3 * batch + width * height * 1 + width * y + x) = g;
*(rgb + width * height * 3 * batch + width * height * 2 + width * y + x) = b;
}
template<typename T>
__device__ __forceinline__ T read_pixel(T const* const grayscale, std::int32_t batch, std::int32_t x, std::int32_t y, std::int32_t channel, std::int32_t width, std::int32_t height)
{
return *(grayscale + width * height * 3 * batch + width * height * channel + width * y + x);
}
union load_16bytes
{
uint4 u128;
struct
{
std::uint8_t u8s[16];
};
};
__global__ void CompareTextRegions_stage_1_kernel(
uchar const* const regions,
comparison_pair const* const compares,
std::uint32_t* out_intermediate_results,
std::int32_t reduced_region_width,
std::int32_t region_height
)
{
std::int32_t ix(blockIdx.x * blockDim.x + threadIdx.x), iy(blockIdx.y * blockDim.y + threadIdx.y), iz(blockIdx.z);
auto img1_idx(compares[iz].first), img2_idx(compares[iz].second);
load_16bytes const* const in_16bytes(reinterpret_cast<load_16bytes const* const>(regions));
if (ix >= reduced_region_width)
return;
std::uint32_t absdiff(0);
for (std::int32_t channel(0); channel < 3; ++channel)
{
// step 2: load 16 bytes from R channel of image img1
auto img1_segment(read_pixel(in_16bytes, img1_idx, ix, iy, channel, reduced_region_width, region_height));
// step 3: load 16 bytes from R channel of image img2
auto img2_segment(read_pixel(in_16bytes, img2_idx, ix, iy, channel, reduced_region_width, region_height));
// step 4: compute absdiff
for (std::int32_t i(0); i < 16; ++i)
absdiff += std::abs(static_cast<std::int32_t>(img1_segment.u8s[i]) - static_cast<std::int32_t>(img2_segment.u8s[i]));
}
// step 5: sum result of 3 channels and store
write_pixel_gs(out_intermediate_results, iz, ix, iy, absdiff, reduced_region_width, region_height);
}
__device__ void reduceBlock_2(std::uint32_t* sdata, const cg::thread_block& cta)
{
const unsigned int tid = cta.thread_rank();
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
sdata[tid] = cg::reduce(tile32, sdata[tid], cg::plus<std::uint32_t>());
cg::sync(cta);
std::uint32_t sum(0);
if (cta.thread_rank() == 0)
{
sum = 0;
for (int i = 0; i < blockDim.x; i += tile32.size())
{
sum += sdata[i];
}
sdata[0] = sum;
}
cg::sync(cta);
}
__global__ void CompareTextRegions_stage_2_kernel(
std::uint32_t const* const in_intermediate_results,
std::uint32_t* out_absdiffs,
std::int32_t array_length // value is (H*W/16)
)
{
cg::thread_block block = cg::this_thread_block();
__shared__ std::uint32_t smem[1024];
smem[block.thread_rank()] = 0;
for (std::int32_t i(block.thread_rank()); i < array_length; i += block.size())
smem[block.thread_rank()] += in_intermediate_results[i + array_length * blockIdx.x];
cg::sync(block);
reduceBlock_2(smem, block);
if (block.thread_rank() == 0)
{
out_absdiffs[blockIdx.x] = smem[0];
}
}
void CompareTextRegions(
cudawrapper::CUDADeviceMemoryUnique<uchar> const& in_all_text_regions,
std::vector<comparison_pair> const& in_comparison_pairs,
cudawrapper::CUDAHostMemoryUnique<std::uint32_t>& out_comparsion_result, // absdiffs
cudawrapper::CUDADeviceMemoryUnique<comparison_pair>& tmp_comparison_pairs_gpu,
cudawrapper::CUDADeviceMemoryUnique<std::uint32_t>& tmp_comparsion_result_gpu,
cudawrapper::CUDADeviceMemoryUnique<std::uint32_t>& tmp_intermediate_results,
std::size_t text_region_count,
std::int32_t region_width,
std::int32_t region_height,
hipStream_t stream
)
{
std::int32_t reduced_region_width(region_width / 16);
std::size_t num_comparisons(in_comparison_pairs.size());
std::size_t intermediate_results_size(num_comparisons * region_width * region_height);
if (tmp_intermediate_results.empty() || tmp_intermediate_results.size() < intermediate_results_size)
tmp_intermediate_results.reallocate(intermediate_results_size);
std::size_t out_absdiffs_size(num_comparisons);
if (out_comparsion_result.empty() || out_comparsion_result.size() != num_comparisons)
out_comparsion_result.reallocate(num_comparisons);
if (tmp_comparsion_result_gpu.empty() || tmp_comparsion_result_gpu.size() != num_comparisons)
tmp_comparsion_result_gpu.reallocate(num_comparisons);
tmp_comparison_pairs_gpu.upload(in_comparison_pairs, stream);
dim3 stage1_block(32, 32, 1);
dim3 stage1_grid((reduced_region_width - 1) / 32 + 1, region_height / 32, num_comparisons);
// launch stage 1
hipLaunchKernelGGL(( CompareTextRegions_stage_1_kernel), dim3(stage1_grid), dim3(stage1_block), 0, stream,
in_all_text_regions, tmp_comparison_pairs_gpu, tmp_intermediate_results, reduced_region_width, region_height);
ck2(hipGetLastError());
dim3 stage2_block(1024, 1, 1);
dim3 stage2_grid(num_comparisons, 1, 1);
// launch stage 2
hipLaunchKernelGGL(( CompareTextRegions_stage_2_kernel), dim3(stage2_grid), dim3(stage2_block), 0, stream, tmp_intermediate_results, tmp_comparsion_result_gpu, reduced_region_width * region_height);
ck2(hipGetLastError());
// copy back absdiffs
tmp_comparsion_result_gpu.download_block(out_comparsion_result, stream);
}
| 2245305a81b478822a94af879400c10a00df4f4b.cu |
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include "CompareTextRegions.h"
namespace cg = cooperative_groups;
template<typename T>
__device__ __forceinline__ void write_pixel_gs(T* grayscale, std::int32_t batch, std::int32_t x, std::int32_t y, T val, std::int32_t width, std::int32_t height)
{
*(grayscale + width * height * batch + width * y + x) = val;
}
template<typename T>
__device__ __forceinline__ void write_pixel(T* rgb, std::int32_t batch, std::int32_t x, std::int32_t y, T r, T g, T b, std::int32_t width, std::int32_t height)
{
*(rgb + width * height * 3 * batch + width * height * 0 + width * y + x) = r;
*(rgb + width * height * 3 * batch + width * height * 1 + width * y + x) = g;
*(rgb + width * height * 3 * batch + width * height * 2 + width * y + x) = b;
}
template<typename T>
__device__ __forceinline__ T read_pixel(T const* const grayscale, std::int32_t batch, std::int32_t x, std::int32_t y, std::int32_t channel, std::int32_t width, std::int32_t height)
{
return *(grayscale + width * height * 3 * batch + width * height * channel + width * y + x);
}
union load_16bytes
{
uint4 u128;
struct
{
std::uint8_t u8s[16];
};
};
__global__ void CompareTextRegions_stage_1_kernel(
uchar const* const regions,
comparison_pair const* const compares,
std::uint32_t* out_intermediate_results,
std::int32_t reduced_region_width,
std::int32_t region_height
)
{
std::int32_t ix(blockIdx.x * blockDim.x + threadIdx.x), iy(blockIdx.y * blockDim.y + threadIdx.y), iz(blockIdx.z);
auto img1_idx(compares[iz].first), img2_idx(compares[iz].second);
load_16bytes const* const in_16bytes(reinterpret_cast<load_16bytes const* const>(regions));
if (ix >= reduced_region_width)
return;
std::uint32_t absdiff(0);
for (std::int32_t channel(0); channel < 3; ++channel)
{
// step 2: load 16 bytes from R channel of image img1
auto img1_segment(read_pixel(in_16bytes, img1_idx, ix, iy, channel, reduced_region_width, region_height));
// step 3: load 16 bytes from R channel of image img2
auto img2_segment(read_pixel(in_16bytes, img2_idx, ix, iy, channel, reduced_region_width, region_height));
// step 4: compute absdiff
for (std::int32_t i(0); i < 16; ++i)
absdiff += std::abs(static_cast<std::int32_t>(img1_segment.u8s[i]) - static_cast<std::int32_t>(img2_segment.u8s[i]));
}
// step 5: sum result of 3 channels and store
write_pixel_gs(out_intermediate_results, iz, ix, iy, absdiff, reduced_region_width, region_height);
}
__device__ void reduceBlock_2(std::uint32_t* sdata, const cg::thread_block& cta)
{
const unsigned int tid = cta.thread_rank();
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
sdata[tid] = cg::reduce(tile32, sdata[tid], cg::plus<std::uint32_t>());
cg::sync(cta);
std::uint32_t sum(0);
if (cta.thread_rank() == 0)
{
sum = 0;
for (int i = 0; i < blockDim.x; i += tile32.size())
{
sum += sdata[i];
}
sdata[0] = sum;
}
cg::sync(cta);
}
__global__ void CompareTextRegions_stage_2_kernel(
std::uint32_t const* const in_intermediate_results,
std::uint32_t* out_absdiffs,
std::int32_t array_length // value is (H*W/16)
)
{
cg::thread_block block = cg::this_thread_block();
__shared__ std::uint32_t smem[1024];
smem[block.thread_rank()] = 0;
for (std::int32_t i(block.thread_rank()); i < array_length; i += block.size())
smem[block.thread_rank()] += in_intermediate_results[i + array_length * blockIdx.x];
cg::sync(block);
reduceBlock_2(smem, block);
if (block.thread_rank() == 0)
{
out_absdiffs[blockIdx.x] = smem[0];
}
}
void CompareTextRegions(
cudawrapper::CUDADeviceMemoryUnique<uchar> const& in_all_text_regions,
std::vector<comparison_pair> const& in_comparison_pairs,
cudawrapper::CUDAHostMemoryUnique<std::uint32_t>& out_comparsion_result, // absdiffs
cudawrapper::CUDADeviceMemoryUnique<comparison_pair>& tmp_comparison_pairs_gpu,
cudawrapper::CUDADeviceMemoryUnique<std::uint32_t>& tmp_comparsion_result_gpu,
cudawrapper::CUDADeviceMemoryUnique<std::uint32_t>& tmp_intermediate_results,
std::size_t text_region_count,
std::int32_t region_width,
std::int32_t region_height,
CUstream stream
)
{
std::int32_t reduced_region_width(region_width / 16);
std::size_t num_comparisons(in_comparison_pairs.size());
std::size_t intermediate_results_size(num_comparisons * region_width * region_height);
if (tmp_intermediate_results.empty() || tmp_intermediate_results.size() < intermediate_results_size)
tmp_intermediate_results.reallocate(intermediate_results_size);
std::size_t out_absdiffs_size(num_comparisons);
if (out_comparsion_result.empty() || out_comparsion_result.size() != num_comparisons)
out_comparsion_result.reallocate(num_comparisons);
if (tmp_comparsion_result_gpu.empty() || tmp_comparsion_result_gpu.size() != num_comparisons)
tmp_comparsion_result_gpu.reallocate(num_comparisons);
tmp_comparison_pairs_gpu.upload(in_comparison_pairs, stream);
dim3 stage1_block(32, 32, 1);
dim3 stage1_grid((reduced_region_width - 1) / 32 + 1, region_height / 32, num_comparisons);
// launch stage 1
CompareTextRegions_stage_1_kernel<<<stage1_grid, stage1_block, 0, stream>>>
(in_all_text_regions, tmp_comparison_pairs_gpu, tmp_intermediate_results, reduced_region_width, region_height);
ck2(cudaGetLastError());
dim3 stage2_block(1024, 1, 1);
dim3 stage2_grid(num_comparisons, 1, 1);
// launch stage 2
CompareTextRegions_stage_2_kernel<<<stage2_grid, stage2_block, 0, stream>>>(tmp_intermediate_results, tmp_comparsion_result_gpu, reduced_region_width * region_height);
ck2(cudaGetLastError());
// copy back absdiffs
tmp_comparsion_result_gpu.download_block(out_comparsion_result, stream);
}
|
5bc52a1d0ef47674b59a38922243164f98c798a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "stacked_autoencoder.h"
int main()
{
int visibleSize = 28*28; // Size of the input vectors
int hiddenSizeL1 = 196; // Hidden size of the first autoencoder layer
int hiddenSizeL2 = 196; // Hidden size of the second autoencoder layer
int numClasses = 10; // Number of different class labels
int trainSize = 5000; // Size of the training set
int testSize = 10000; // Size of the test set
float* d_data;
float* d_label;
float* d_testData;
float* d_testLabel;
float* h_data = new(float[visibleSize * trainSize]);
float* h_label = new(float[trainSize]);
float* h_testData = new(float[visibleSize * testSize]);
float* h_testLabel = new(float[testSize]);
// Load the training set in host memory
load("data/trainData.dat", h_data, trainSize*visibleSize);
load("data/trainLabel.dat", h_label, trainSize);
// Load the training labels in host memory
load("data/testData.dat", h_testData, testSize*visibleSize);
load("data/testLabel.dat", h_testLabel, testSize);
// Allocate memory in device for the training data and labels
CUDA_SAFE_CALL(hipMalloc(&d_data, visibleSize * trainSize * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc(&d_label, trainSize * sizeof(float)));
// Copy training set and label to device memory
CUDA_SAFE_CALL(hipMemcpy(d_data, h_data, visibleSize * trainSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_label, h_label, trainSize * sizeof(float), hipMemcpyHostToDevice));
// Create two sparse autoencoders and one softmax layer
SparseAutoencoder* sa1 = new SparseAutoencoder(visibleSize, hiddenSizeL1);
SparseAutoencoder* sa2 = new SparseAutoencoder(hiddenSizeL1, hiddenSizeL2);
Softmax* sm = new Softmax(hiddenSizeL2, numClasses);
// Create a stacked autoencoder object
StackAutoencoder* sa = new StackAutoencoder();
// Add the autoencoders to the stack (the order is important)
sa->addAutoencoder(sa1);
sa->addAutoencoder(sa2);
// Add the softmax layer to the stack (the order is important)
sa->addSoftmax(sm);
// Pre-train the network (layer-wise)
PROFILE("pre-train",
sa->train(d_data, d_label, trainSize, 2000);
);
// Allocate memory in device for the test data and labels
CUDA_SAFE_CALL(hipMalloc(&d_testData, visibleSize * testSize * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc(&d_testLabel, testSize * sizeof(float)));
// Copy test set and label to device memory
CUDA_SAFE_CALL(hipMemcpy(d_testData, h_testData, visibleSize * testSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_testLabel, h_testLabel, testSize * sizeof(float), hipMemcpyHostToDevice));
// Calculate the accuracy of the sparse autoencoder before fine tunning
sa->test(d_testData, d_testLabel, testSize);
// Free the memory allocated for the test data
CUDA_SAFE_CALL(hipFree(d_testData));
CUDA_SAFE_CALL(hipFree(d_testLabel));
// Train the network for fine tunning
PROFILE("finetune",
sa->fineTune(d_data, d_label, trainSize, 2000);
);
// Allocate memory in device for the test data and labels
CUDA_SAFE_CALL(hipMalloc(&d_testData, visibleSize * testSize * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc(&d_testLabel, testSize * sizeof(float)));
// Copy test set and label to device memory
CUDA_SAFE_CALL(hipMemcpy(d_testData, h_testData, visibleSize * testSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_testLabel, h_testLabel, testSize * sizeof(float), hipMemcpyHostToDevice));
// Calculate the accuracy of the sparse autoencoder after fine tunning
sa->test(d_testData, d_testLabel, testSize);
// Clean the resources
delete sa;
CUDA_SAFE_CALL(hipFree(d_testData));
CUDA_SAFE_CALL(hipFree(d_testLabel));
CUDA_SAFE_CALL(hipFree(d_data));
CUDA_SAFE_CALL(hipFree(d_label));
CUDA_SAFE_CALL(hipDeviceReset());
return 0;
} | 5bc52a1d0ef47674b59a38922243164f98c798a5.cu | #include "stacked_autoencoder.h"
int main()
{
int visibleSize = 28*28; // Size of the input vectors
int hiddenSizeL1 = 196; // Hidden size of the first autoencoder layer
int hiddenSizeL2 = 196; // Hidden size of the second autoencoder layer
int numClasses = 10; // Number of different class labels
int trainSize = 5000; // Size of the training set
int testSize = 10000; // Size of the test set
float* d_data;
float* d_label;
float* d_testData;
float* d_testLabel;
float* h_data = new(float[visibleSize * trainSize]);
float* h_label = new(float[trainSize]);
float* h_testData = new(float[visibleSize * testSize]);
float* h_testLabel = new(float[testSize]);
// Load the training set in host memory
load("data/trainData.dat", h_data, trainSize*visibleSize);
load("data/trainLabel.dat", h_label, trainSize);
// Load the training labels in host memory
load("data/testData.dat", h_testData, testSize*visibleSize);
load("data/testLabel.dat", h_testLabel, testSize);
// Allocate memory in device for the training data and labels
CUDA_SAFE_CALL(cudaMalloc(&d_data, visibleSize * trainSize * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc(&d_label, trainSize * sizeof(float)));
// Copy training set and label to device memory
CUDA_SAFE_CALL(cudaMemcpy(d_data, h_data, visibleSize * trainSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_label, h_label, trainSize * sizeof(float), cudaMemcpyHostToDevice));
// Create two sparse autoencoders and one softmax layer
SparseAutoencoder* sa1 = new SparseAutoencoder(visibleSize, hiddenSizeL1);
SparseAutoencoder* sa2 = new SparseAutoencoder(hiddenSizeL1, hiddenSizeL2);
Softmax* sm = new Softmax(hiddenSizeL2, numClasses);
// Create a stacked autoencoder object
StackAutoencoder* sa = new StackAutoencoder();
// Add the autoencoders to the stack (the order is important)
sa->addAutoencoder(sa1);
sa->addAutoencoder(sa2);
// Add the softmax layer to the stack (the order is important)
sa->addSoftmax(sm);
// Pre-train the network (layer-wise)
PROFILE("pre-train",
sa->train(d_data, d_label, trainSize, 2000);
);
// Allocate memory in device for the test data and labels
CUDA_SAFE_CALL(cudaMalloc(&d_testData, visibleSize * testSize * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc(&d_testLabel, testSize * sizeof(float)));
// Copy test set and label to device memory
CUDA_SAFE_CALL(cudaMemcpy(d_testData, h_testData, visibleSize * testSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_testLabel, h_testLabel, testSize * sizeof(float), cudaMemcpyHostToDevice));
// Calculate the accuracy of the sparse autoencoder before fine tunning
sa->test(d_testData, d_testLabel, testSize);
// Free the memory allocated for the test data
CUDA_SAFE_CALL(cudaFree(d_testData));
CUDA_SAFE_CALL(cudaFree(d_testLabel));
// Train the network for fine tunning
PROFILE("finetune",
sa->fineTune(d_data, d_label, trainSize, 2000);
);
// Allocate memory in device for the test data and labels
CUDA_SAFE_CALL(cudaMalloc(&d_testData, visibleSize * testSize * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc(&d_testLabel, testSize * sizeof(float)));
// Copy test set and label to device memory
CUDA_SAFE_CALL(cudaMemcpy(d_testData, h_testData, visibleSize * testSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_testLabel, h_testLabel, testSize * sizeof(float), cudaMemcpyHostToDevice));
// Calculate the accuracy of the sparse autoencoder after fine tunning
sa->test(d_testData, d_testLabel, testSize);
// Clean the resources
delete sa;
CUDA_SAFE_CALL(cudaFree(d_testData));
CUDA_SAFE_CALL(cudaFree(d_testLabel));
CUDA_SAFE_CALL(cudaFree(d_data));
CUDA_SAFE_CALL(cudaFree(d_label));
CUDA_SAFE_CALL(cudaDeviceReset());
return 0;
} |
a845be45f0f294568e35b63b5f08e8246512ee83.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "file_io.c"
#include "kCuda.cu"
#include "kmeans.h"
static void usage(char *argv0) {
char help[] =
"Usage: %s -i filename -n num_clusters\n"
" -i filename : file containing data to be clustered\n"
" -n num_clusters: number of clusters (K must > 1)\n";
fprintf(stderr, help, argv0);
exit(-1);
}
int main(int argc, char **argv) {
hipEvent_t event1,event2;
int opt;
int num_clusters = 0;
int num_obj = 0;
int num_coord;
int *membership;
char *filename = NULL;
float **objects;
float **clusters;
float dt_ms;
while ((opt = getopt(argc,argv,"i:n:"))!= -1) {
switch (opt) {
case 'i': filename=optarg;
break;
case 'n': num_clusters = atoi(optarg);
break;
default: usage(argv[0]);
break;
}
}
if (filename == 0 || num_clusters <= 1) usage(argv[0]);
printf("...READING DATA FROM %s...\n",filename);
objects = file_read(filename, &num_obj, &num_coord);
if(objects == NULL) {
printf("ERROR: 3D space was not found.\n");
exit(1);
}
if (num_obj < num_clusters) {
printf("ERROR: number of clusters exeedes number of objects.\n");
free(objects[0]);
free(objects);
exit(1);
}
/* allocate a 2D space for clusters[] (coordinates of cluster centers)
this array should be the same across all processes */
clusters = (float**) malloc(num_clusters * sizeof(float*)); assert(clusters != NULL);
clusters[0] = (float*) malloc(num_clusters * num_coord * sizeof(float)); assert(clusters[0] != NULL);
membership = (int*) malloc(num_obj * sizeof(int)); assert(membership != NULL);
for (int i=1; i<num_clusters; i++)
clusters[i] = clusters[i-1] + num_coord;
//printf("...SELECTING %i INITIAL CLUSTERS...\n",num_clusters);
printf("...COMPUTING...\n");
gpuErrchk(hipEventCreate(&event1));
gpuErrchk(hipEventCreate(&event2));
gpuErrchk(hipEventRecord(event1));
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
clusters = cuda_kmeans(objects, num_obj, num_clusters, membership);
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
gpuErrchk(hipEventRecord(event2));
gpuErrchk(hipEventSynchronize(event2));
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&dt_ms, event1,event2);
printf("...EXEQUTION TIME : %f sec. ...\n", dt_ms/1000);
file_write(filename, num_clusters, num_obj, num_coord, clusters, membership);
free(objects[0]);
free(objects);
free(membership);
free(clusters[0]);
free(clusters);
exit(0);
} | a845be45f0f294568e35b63b5f08e8246512ee83.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "file_io.c"
#include "kCuda.cu"
#include "kmeans.h"
static void usage(char *argv0) {
char help[] =
"Usage: %s -i filename -n num_clusters\n"
" -i filename : file containing data to be clustered\n"
" -n num_clusters: number of clusters (K must > 1)\n";
fprintf(stderr, help, argv0);
exit(-1);
}
int main(int argc, char **argv) {
cudaEvent_t event1,event2;
int opt;
int num_clusters = 0;
int num_obj = 0;
int num_coord;
int *membership;
char *filename = NULL;
float **objects;
float **clusters;
float dt_ms;
while ((opt = getopt(argc,argv,"i:n:"))!= -1) {
switch (opt) {
case 'i': filename=optarg;
break;
case 'n': num_clusters = atoi(optarg);
break;
default: usage(argv[0]);
break;
}
}
if (filename == 0 || num_clusters <= 1) usage(argv[0]);
printf("...READING DATA FROM %s...\n",filename);
objects = file_read(filename, &num_obj, &num_coord);
if(objects == NULL) {
printf("ERROR: 3D space was not found.\n");
exit(1);
}
if (num_obj < num_clusters) {
printf("ERROR: number of clusters exeedes number of objects.\n");
free(objects[0]);
free(objects);
exit(1);
}
/* allocate a 2D space for clusters[] (coordinates of cluster centers)
this array should be the same across all processes */
clusters = (float**) malloc(num_clusters * sizeof(float*)); assert(clusters != NULL);
clusters[0] = (float*) malloc(num_clusters * num_coord * sizeof(float)); assert(clusters[0] != NULL);
membership = (int*) malloc(num_obj * sizeof(int)); assert(membership != NULL);
for (int i=1; i<num_clusters; i++)
clusters[i] = clusters[i-1] + num_coord;
//printf("...SELECTING %i INITIAL CLUSTERS...\n",num_clusters);
printf("...COMPUTING...\n");
gpuErrchk(cudaEventCreate(&event1));
gpuErrchk(cudaEventCreate(&event2));
gpuErrchk(cudaEventRecord(event1));
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
clusters = cuda_kmeans(objects, num_obj, num_clusters, membership);
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
gpuErrchk(cudaEventRecord(event2));
gpuErrchk(cudaEventSynchronize(event2));
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&dt_ms, event1,event2);
printf("...EXEQUTION TIME : %f sec. ...\n", dt_ms/1000);
file_write(filename, num_clusters, num_obj, num_coord, clusters, membership);
free(objects[0]);
free(objects);
free(membership);
free(clusters[0]);
free(clusters);
exit(0);
} |
892c7f6a1e80c7d7fa990bc56a9d73c4b9c2fa61.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include "grid.h"
#include "streamcontainer.h"
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include "memory_leak_operators.h"
#define X_ 0
#define Y_ 1
#define Z_ 2
__host__ inline void check_grid_errors(const char *filename, const int line_number) {
#ifdef DEBUG_CUDA
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error));
exit(-1);
}
#endif
}
/***************************************************
* Grid1D implementation *
* *
***************************************************/
Grid1D::Grid1D() {
}
Grid1D::Grid1D(int ncell, int nlip, double r_max, double *h, double *d, double *gridpoints, double *lip, double *derivative_lip, double *lower_derivative_lip, double *base_integrals, StreamContainer *streamContainer, bool init_device_memory) {
this->ncell = ncell;
this->nlip = nlip;
this->r_max = r_max;
this->streamContainer = streamContainer;
// allocate space for device pointers
this->device_h = new double*[streamContainer->getNumberOfDevices()];
this->device_d = new double*[streamContainer->getNumberOfDevices()];
this->device_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_derivative_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_lower_derivative_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_gridpoints = new double*[streamContainer->getNumberOfDevices()];
this->device_integrals = new double*[streamContainer->getNumberOfDevices()];
this->device_copies = new Grid1D *[streamContainer->getNumberOfDevices()];
// allocate the memory at device
if (init_device_memory) {
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
size_t sz = sizeof(double)*this->ncell;
hipMalloc(&this->device_h[device], sz);
hipMalloc(&this->device_d[device], sz);
sz = sizeof(double)*(this->ncell * (this->nlip-1) +1);
hipMalloc(&this->device_gridpoints[device], sz);
hipMalloc(&this->device_integrals[device], sz);
sz=sizeof(double)*(nlip)*(nlip);
hipMalloc(&this->device_lip[device], sz);
sz=sizeof(double)*(nlip)*(nlip-1);
hipMalloc(&this->device_derivative_lip[device], sz);
sz=sizeof(double)*(nlip-2)*(nlip-1);
hipMalloc(&this->device_lower_derivative_lip[device], sz);
this->h = this->device_h[device];
this->d = this->device_d[device];
this->gridpoints = this->device_gridpoints[device];
this->integrals = this->device_integrals[device];
this->lip = this->device_lip[device];
this->derivative_lip = this->device_derivative_lip[device];
this->lower_derivative_lip = this->device_lower_derivative_lip[device];
hipMalloc(&this->device_copies[device], sizeof(Grid1D));
hipMemcpy(this->device_copies[device], this, sizeof(Grid1D), hipMemcpyHostToDevice);
}
// set the host variables and register them for faster data transfer
hipHostMalloc((void **)&this->h, sizeof(double)*this->ncell, hipHostMallocPortable);
hipHostMalloc((void **)&this->d, sizeof(double)*this->ncell, hipHostMallocPortable);
hipHostMalloc((void **)&this->lip, sizeof(double)*(nlip)*(nlip), hipHostMallocPortable);
hipHostMalloc((void **)&this->derivative_lip, sizeof(double)*(nlip)*(nlip-1), hipHostMallocPortable);
hipHostMalloc((void **)&this->lower_derivative_lip, sizeof(double)*(nlip-1)*(nlip-2), hipHostMallocPortable);
hipHostMalloc((void **)&this->base_integrals, sizeof(double)*(nlip), hipHostMallocPortable);
hipHostMalloc((void **)&this->gridpoints, sizeof(double)*((nlip-1)*(ncell)+1), hipHostMallocPortable);
for (int i = 0; i < this->ncell; i++) {
this->h[i] = h[i];
this->d[i] = d[i];
}
for (int i = 0; i < nlip*nlip; i++) {
this->lip[i] = lip[i];
}
for (int i = 0; i < (nlip)*(nlip-1); i++) {
this->derivative_lip[i] = derivative_lip[i];
}
for (int i = 0; i < (nlip-1)*(nlip-2); i++) {
this->lower_derivative_lip[i] = lower_derivative_lip[i];
}
for (int i = 0; i < nlip; i++) {
this->base_integrals[i] = base_integrals[i];
}
for (int i = 0; i < (nlip-1)*(ncell)+1; i++) {
this->gridpoints[i] = gridpoints[i];
}
}
else {
this->h = h;
this->d = d;
this->lip = lip;
this->derivative_lip = derivative_lip;
this->lower_derivative_lip = lower_derivative_lip;
this->base_integrals = base_integrals;
this->gridpoints = gridpoints;
//this->integrals = this->calculateIntegrals();
}
this->integrals = this->calculateIntegrals();
// upload the memory to device, if there is any memory allocated
if (init_device_memory) {
this->upload();
}
}
void Grid1D::upload() {
double *device_h, *device_d, *device_gridpoints, *device_integrals, *host_h, *host_d, *host_gridpoints, *host_integrals;
int cells_per_stream, gridpoints_per_stream;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// get the preallocated device pointers
device_h = this->device_h[device];
device_d = this->device_d[device];
device_gridpoints = this->device_gridpoints[device];
device_integrals = this->device_integrals[device];
// NOTE: for all devices the first pointer points to the first value of each array
host_h = this->h;
host_d = this->d;
host_gridpoints = this->gridpoints;
host_integrals = this->integrals;
// upload the lip to the device
hipMemcpyAsync(this->device_lip[device], this->lip, sizeof(double)*(this->nlip)*(this->nlip), hipMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
hipMemcpyAsync(this->device_derivative_lip[device], this->derivative_lip, sizeof(double)*(this->nlip)*(this->nlip-1), hipMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
hipMemcpyAsync(this->device_lower_derivative_lip[device], this->lower_derivative_lip, sizeof(double)*(this->nlip-2)*(this->nlip-1), hipMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
cells_per_stream = (this->ncell / this->streamContainer->getStreamsPerDevice()) +
((this->ncell % this->streamContainer->getStreamsPerDevice()) > stream);
gridpoints_per_stream = ((this->ncell*(this->nlip-1)+1) / this->streamContainer->getStreamsPerDevice()) +
(((this->ncell*(this->nlip-1)+1) % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device
hipMemcpyAsync(device_h, host_h, sizeof(double)*cells_per_stream, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
hipMemcpyAsync(device_d, host_d, sizeof(double)*cells_per_stream, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
hipMemcpyAsync(device_gridpoints, host_gridpoints, sizeof(double)*gridpoints_per_stream, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
hipMemcpyAsync(device_integrals, host_integrals, sizeof(double)*gridpoints_per_stream, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
// add to the pointers
device_h += cells_per_stream;
device_d += cells_per_stream;
device_gridpoints += gridpoints_per_stream;
device_integrals += gridpoints_per_stream;
host_h += cells_per_stream;
host_d += cells_per_stream;
host_gridpoints += gridpoints_per_stream;
host_integrals += gridpoints_per_stream;
}
}
// synchronize the host with both devices
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
hipDeviceSynchronize();
}
}
Grid1D *Grid1D::getSubGrid(int start_cell_index, int end_cell_index, StreamContainer *streamContainer) {
Grid1D *subgrid = new Grid1D(end_cell_index-start_cell_index, this->nlip, this->r_max, &this->h[start_cell_index],
&this->d[start_cell_index], &this->gridpoints[start_cell_index*(this->nlip-1)],
this->lip, this->derivative_lip, this->lower_derivative_lip, this->base_integrals, streamContainer, false);
subgrid->is_subgrid = true;
double *host_integrals = subgrid->integrals;
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// get the order number of current device in the context of the streamContainer of 'this'
int device_number = streamContainer->getDeviceNumber(device);
int device_order_number = this->streamContainer->getDeviceOrderNumber(device_number);
// get the pointers to the device arrays
subgrid->device_h[device] = &this->device_h[device_order_number][start_cell_index];
subgrid->device_d[device] = &this->device_d[device_order_number][start_cell_index];
subgrid->device_gridpoints[device] = &this->device_gridpoints[device_order_number][start_cell_index*(subgrid->nlip-1)];
subgrid->device_lip[device] = this->device_lip[device_order_number];
subgrid->device_derivative_lip[device] = this->device_derivative_lip[device_order_number];
subgrid->device_lower_derivative_lip[device] = this->device_lower_derivative_lip[device_order_number];
// allocate & upload the integrals (cannot be taken from this)
size_t sz = sizeof(double)*(subgrid->ncell * (subgrid->nlip-1) +1);
hipMalloc(&subgrid->device_integrals[device], sz);
hipMemcpy(subgrid->device_integrals[device], host_integrals, sz, hipMemcpyHostToDevice);
check_grid_errors(__FILE__, __LINE__);
// set the pointers to the device copy
subgrid->gridpoints = subgrid->device_gridpoints[device];
subgrid->integrals = subgrid->device_integrals[device];
subgrid->h = subgrid->device_h[device];
subgrid->d = subgrid->device_d[device];
subgrid->lip = subgrid->device_lip[device];
subgrid->derivative_lip = subgrid->device_derivative_lip[device];
subgrid->lower_derivative_lip = subgrid->device_lower_derivative_lip[device];
hipMalloc(&subgrid->device_copies[device], sizeof(Grid1D));
hipMemcpy(subgrid->device_copies[device], subgrid, sizeof(Grid1D), hipMemcpyHostToDevice);
}
// set the pointers to the host arrays
subgrid->h = &this->h[start_cell_index];
subgrid->d = &this->d[start_cell_index];
subgrid->gridpoints = &this->gridpoints[start_cell_index*(subgrid->nlip-1)];
subgrid->integrals = host_integrals;
subgrid->lip = this->lip;
return subgrid;
}
double **Grid1D::getDeviceIntegrals() {
return this->device_integrals;
}
double *Grid1D::getDeviceIntegrals(int device) {
return this->device_integrals[device];
}
int Grid1D::getShape() {
return this->ncell * (this->nlip-1) + 1;
}
double *Grid1D::getIntegrals(int first_cell) {
return &this->integrals[first_cell*(this->nlip-1)];
}
/*
* Calculates the values needed for integration of vector of with shape of this grid.
*
* NOTE: this is a host function, meaning that it does not use anything at gpus
* NOTE: first_cell and last_cell must be in C indexing (starting from 0)
*/
double *Grid1D::calculateIntegrals(int first_cell, int last_cell) {
if (last_cell == -1) last_cell = this->ncell-1;
// init the result array;
double *result = new double[(last_cell-first_cell+1)*(this->nlip-1) +1];
// init it to zero
for (int i = 0; i < (last_cell-first_cell+1)*(this->nlip-1) +1; i++) {
result[i] = 0.0;
}
// calculate the values
for (int i = 0; i < last_cell-first_cell+1; i++) {
int icell = first_cell +i;
for (int ilip = 0; ilip < this->nlip; ilip ++) {
result[icell*(this->nlip-1) + ilip] += this->base_integrals[ilip] * this->h[icell];
}
}
return result;
}
// destroy all cuda related objects
void Grid1D::destroy() {
check_errors_and_lock(__FILE__, __LINE__);
// determine whether this is a subgrid, if not delete everything normally
if (!this->is_subgrid) {
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
hipFree(this->device_h[device]);
hipFree(this->device_d[device]);
hipFree(this->device_gridpoints[device]);
hipFree(this->device_lip[device]);
hipFree(this->device_derivative_lip[device]);
hipFree(this->device_lower_derivative_lip[device]);
}
hipHostFree(this->h);
hipHostFree(this->d);
hipHostFree(this->lip);
hipHostFree(this->derivative_lip);
hipHostFree(this->lower_derivative_lip);
hipHostFree(this->base_integrals);
hipHostFree(this->gridpoints);
}
// if is a subgrid, delete only the device copy and integrals
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
hipFree(this->device_integrals[device]);
hipFree(this->device_copies[device]);
check_errors_and_lock(__FILE__, __LINE__);
}
delete[] this->device_integrals;
delete[] this->device_gridpoints;
delete[] this->device_copies;
delete[] this->device_h;
delete[] this->device_d;
delete[] this->device_lip;
delete[] this->device_derivative_lip;
delete[] this->device_lower_derivative_lip;
delete[] this->integrals;
check_grid_errors(__FILE__, __LINE__);
check_errors_and_lock(__FILE__, __LINE__);
}
/***************************************************
* Grid3D implementation *
* *
***************************************************/
Grid3D::Grid3D() {
}
Grid3D::Grid3D(Grid1D **axis, StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
Grid1D **temp_axis;
// set the shape parameter
this->shape[0] = axis[0]->ncell * (axis[0]->nlip-1) +1;
this->shape[1] = axis[1]->ncell * (axis[0]->nlip-1) +1;
this->shape[2] = axis[2]->ncell * (axis[0]->nlip-1) +1;
// allocate memory for the pointers of arrays
this->device_copies = new Grid3D *[streamContainer->getNumberOfDevices()];
temp_axis = new Grid1D *[3];
this->device_axis = new Grid1D **[streamContainer->getNumberOfDevices()];
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// set the device axis
temp_axis[0] = axis[0]->device_copies[device];
temp_axis[1] = axis[1]->device_copies[device];
temp_axis[2] = axis[2]->device_copies[device];
// copy the device axis to device
hipMalloc(&this->device_axis[device], sizeof(Grid1D *)*3);
hipMemcpy(this->device_axis[device], temp_axis, sizeof(Grid1D *) * 3, hipMemcpyHostToDevice);
this->axis = this->device_axis[device];
// allocate the device memory and copy
hipMalloc(&this->device_copies[device], sizeof(Grid3D));
hipMemcpy(this->device_copies[device], this, sizeof(Grid3D), hipMemcpyHostToDevice);
}
temp_axis[0] = axis[0];
temp_axis[1] = axis[1];
temp_axis[2] = axis[2];
// set the host pointers to the returned object
this->axis = temp_axis;
}
void Grid3D::destroy() {
// destroy the Grid1D objects owned by this object
this->axis[0]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[0];
this->axis[1]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[1];
this->axis[2]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[2];
// free the device_copies
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
check_errors_and_lock(__FILE__, __LINE__);
hipFree(this->device_copies[device]);
check_errors_and_lock(__FILE__, __LINE__);
hipFree(this->device_axis[device]);
check_errors_and_lock(__FILE__, __LINE__);
}
check_errors_and_lock(__FILE__, __LINE__);
// free the host parameters
delete[] this->device_copies;
delete[] this->device_axis;
delete[] this->axis;
check_grid_errors(__FILE__, __LINE__);
}
Grid3D *Grid3D::getSubGrid(int start_cell_indices[3], int end_cell_indices[3], StreamContainer *streamContainer) {
Grid3D *subgrid = new Grid3D();
subgrid->streamContainer = streamContainer;
Grid1D **temp_axis;
// allocate memory for the pointers of arrays
subgrid->device_copies = new Grid3D*[streamContainer->getNumberOfDevices()];
temp_axis = new Grid1D*[3];
subgrid->device_axis = new Grid1D **[streamContainer->getNumberOfDevices()];
// mark this grid to be a subgrid
subgrid->is_subgrid = true;
Grid1D **axis;
axis = new Grid1D *[3];
// init the axis
axis[0] = this->axis[0]->getSubGrid(start_cell_indices[0], end_cell_indices[0], streamContainer);
axis[1] = this->axis[1]->getSubGrid(start_cell_indices[1], end_cell_indices[1], streamContainer);
axis[2] = this->axis[2]->getSubGrid(start_cell_indices[2], end_cell_indices[2], streamContainer);
// set the shape parameter
subgrid->shape[0] = axis[0]->ncell * (axis[0]->nlip-1) +1;
subgrid->shape[1] = axis[1]->ncell * (axis[1]->nlip-1) +1;
subgrid->shape[2] = axis[2]->ncell * (axis[2]->nlip-1) +1;
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// set the device copies to the axis pointers
temp_axis[0] = axis[0]->device_copies[device];
temp_axis[1] = axis[1]->device_copies[device];
temp_axis[2] = axis[2]->device_copies[device];
// copy the device axis to device
hipMalloc(&subgrid->device_axis[device], sizeof(Grid1D *)*3);
hipMemcpy( subgrid->device_axis[device], temp_axis, sizeof(Grid1D *) * 3, hipMemcpyHostToDevice);
subgrid->axis = subgrid->device_axis[device];
// allocate the device memory and copy
hipMalloc(&subgrid->device_copies[device], sizeof(Grid3D));
check_grid_errors(__FILE__, __LINE__);
hipMemcpy(subgrid->device_copies[device], subgrid, sizeof(Grid3D), hipMemcpyHostToDevice);
check_grid_errors(__FILE__, __LINE__);
}
temp_axis[0] = axis[0];
temp_axis[1] = axis[1];
temp_axis[2] = axis[2];
// set the host pointers to the returned object
subgrid->axis = temp_axis;
delete[] axis;
return subgrid;
}
int *Grid3D::getShape() {
return this->shape;
}
int Grid3D::getShape(int axis) {
return this->shape[axis];
}
/***************************************************
* Fortran interfaces *
* *
***************************************************/
extern "C" Grid1D *grid1d_init_cuda(int ncell, int nlip, double r_max, double *h, double *d, double *gridpoints, double *lip, double *derivative_lip, double *lower_derivative_lip, double *base_integrals, StreamContainer *streamContainer) {
Grid1D *new_grid = new Grid1D(ncell, nlip, r_max, h, d, gridpoints, lip, derivative_lip, lower_derivative_lip, base_integrals, streamContainer);
return new_grid;
}
extern "C" void grid1d_upload_cuda(Grid1D *grid) {
grid->upload();
}
extern "C" void grid1d_destroy_cuda(Grid1D *grid) {
grid->destroy();
delete grid;
}
extern "C" Grid3D *grid3d_init_cuda(Grid1D **axis, StreamContainer *streamContainer) {
Grid3D *new_grid = new Grid3D(axis, streamContainer);
return new_grid;
}
extern "C" void grid3d_destroy_cuda(Grid3D *grid) {
grid->destroy();
delete grid;
} | 892c7f6a1e80c7d7fa990bc56a9d73c4b9c2fa61.cu | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include "grid.h"
#include "streamcontainer.h"
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include "memory_leak_operators.h"
#define X_ 0
#define Y_ 1
#define Z_ 2
__host__ inline void check_grid_errors(const char *filename, const int line_number) {
#ifdef DEBUG_CUDA
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error));
exit(-1);
}
#endif
}
/***************************************************
* Grid1D implementation *
* *
***************************************************/
Grid1D::Grid1D() {
}
Grid1D::Grid1D(int ncell, int nlip, double r_max, double *h, double *d, double *gridpoints, double *lip, double *derivative_lip, double *lower_derivative_lip, double *base_integrals, StreamContainer *streamContainer, bool init_device_memory) {
this->ncell = ncell;
this->nlip = nlip;
this->r_max = r_max;
this->streamContainer = streamContainer;
// allocate space for device pointers
this->device_h = new double*[streamContainer->getNumberOfDevices()];
this->device_d = new double*[streamContainer->getNumberOfDevices()];
this->device_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_derivative_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_lower_derivative_lip = new double*[streamContainer->getNumberOfDevices()];
this->device_gridpoints = new double*[streamContainer->getNumberOfDevices()];
this->device_integrals = new double*[streamContainer->getNumberOfDevices()];
this->device_copies = new Grid1D *[streamContainer->getNumberOfDevices()];
// allocate the memory at device
if (init_device_memory) {
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
size_t sz = sizeof(double)*this->ncell;
cudaMalloc(&this->device_h[device], sz);
cudaMalloc(&this->device_d[device], sz);
sz = sizeof(double)*(this->ncell * (this->nlip-1) +1);
cudaMalloc(&this->device_gridpoints[device], sz);
cudaMalloc(&this->device_integrals[device], sz);
sz=sizeof(double)*(nlip)*(nlip);
cudaMalloc(&this->device_lip[device], sz);
sz=sizeof(double)*(nlip)*(nlip-1);
cudaMalloc(&this->device_derivative_lip[device], sz);
sz=sizeof(double)*(nlip-2)*(nlip-1);
cudaMalloc(&this->device_lower_derivative_lip[device], sz);
this->h = this->device_h[device];
this->d = this->device_d[device];
this->gridpoints = this->device_gridpoints[device];
this->integrals = this->device_integrals[device];
this->lip = this->device_lip[device];
this->derivative_lip = this->device_derivative_lip[device];
this->lower_derivative_lip = this->device_lower_derivative_lip[device];
cudaMalloc(&this->device_copies[device], sizeof(Grid1D));
cudaMemcpy(this->device_copies[device], this, sizeof(Grid1D), cudaMemcpyHostToDevice);
}
// set the host variables and register them for faster data transfer
cudaHostAlloc((void **)&this->h, sizeof(double)*this->ncell, cudaHostAllocPortable);
cudaHostAlloc((void **)&this->d, sizeof(double)*this->ncell, cudaHostAllocPortable);
cudaHostAlloc((void **)&this->lip, sizeof(double)*(nlip)*(nlip), cudaHostAllocPortable);
cudaHostAlloc((void **)&this->derivative_lip, sizeof(double)*(nlip)*(nlip-1), cudaHostAllocPortable);
cudaHostAlloc((void **)&this->lower_derivative_lip, sizeof(double)*(nlip-1)*(nlip-2), cudaHostAllocPortable);
cudaHostAlloc((void **)&this->base_integrals, sizeof(double)*(nlip), cudaHostAllocPortable);
cudaHostAlloc((void **)&this->gridpoints, sizeof(double)*((nlip-1)*(ncell)+1), cudaHostAllocPortable);
for (int i = 0; i < this->ncell; i++) {
this->h[i] = h[i];
this->d[i] = d[i];
}
for (int i = 0; i < nlip*nlip; i++) {
this->lip[i] = lip[i];
}
for (int i = 0; i < (nlip)*(nlip-1); i++) {
this->derivative_lip[i] = derivative_lip[i];
}
for (int i = 0; i < (nlip-1)*(nlip-2); i++) {
this->lower_derivative_lip[i] = lower_derivative_lip[i];
}
for (int i = 0; i < nlip; i++) {
this->base_integrals[i] = base_integrals[i];
}
for (int i = 0; i < (nlip-1)*(ncell)+1; i++) {
this->gridpoints[i] = gridpoints[i];
}
}
else {
this->h = h;
this->d = d;
this->lip = lip;
this->derivative_lip = derivative_lip;
this->lower_derivative_lip = lower_derivative_lip;
this->base_integrals = base_integrals;
this->gridpoints = gridpoints;
//this->integrals = this->calculateIntegrals();
}
this->integrals = this->calculateIntegrals();
// upload the memory to device, if there is any memory allocated
if (init_device_memory) {
this->upload();
}
}
void Grid1D::upload() {
double *device_h, *device_d, *device_gridpoints, *device_integrals, *host_h, *host_d, *host_gridpoints, *host_integrals;
int cells_per_stream, gridpoints_per_stream;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// get the preallocated device pointers
device_h = this->device_h[device];
device_d = this->device_d[device];
device_gridpoints = this->device_gridpoints[device];
device_integrals = this->device_integrals[device];
// NOTE: for all devices the first pointer points to the first value of each array
host_h = this->h;
host_d = this->d;
host_gridpoints = this->gridpoints;
host_integrals = this->integrals;
// upload the lip to the device
cudaMemcpyAsync(this->device_lip[device], this->lip, sizeof(double)*(this->nlip)*(this->nlip), cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
cudaMemcpyAsync(this->device_derivative_lip[device], this->derivative_lip, sizeof(double)*(this->nlip)*(this->nlip-1), cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
cudaMemcpyAsync(this->device_lower_derivative_lip[device], this->lower_derivative_lip, sizeof(double)*(this->nlip-2)*(this->nlip-1), cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, 0));
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
cells_per_stream = (this->ncell / this->streamContainer->getStreamsPerDevice()) +
((this->ncell % this->streamContainer->getStreamsPerDevice()) > stream);
gridpoints_per_stream = ((this->ncell*(this->nlip-1)+1) / this->streamContainer->getStreamsPerDevice()) +
(((this->ncell*(this->nlip-1)+1) % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device
cudaMemcpyAsync(device_h, host_h, sizeof(double)*cells_per_stream, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
cudaMemcpyAsync(device_d, host_d, sizeof(double)*cells_per_stream, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
cudaMemcpyAsync(device_gridpoints, host_gridpoints, sizeof(double)*gridpoints_per_stream, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
cudaMemcpyAsync(device_integrals, host_integrals, sizeof(double)*gridpoints_per_stream, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream));
// add to the pointers
device_h += cells_per_stream;
device_d += cells_per_stream;
device_gridpoints += gridpoints_per_stream;
device_integrals += gridpoints_per_stream;
host_h += cells_per_stream;
host_d += cells_per_stream;
host_gridpoints += gridpoints_per_stream;
host_integrals += gridpoints_per_stream;
}
}
// synchronize the host with both devices
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
cudaDeviceSynchronize();
}
}
Grid1D *Grid1D::getSubGrid(int start_cell_index, int end_cell_index, StreamContainer *streamContainer) {
Grid1D *subgrid = new Grid1D(end_cell_index-start_cell_index, this->nlip, this->r_max, &this->h[start_cell_index],
&this->d[start_cell_index], &this->gridpoints[start_cell_index*(this->nlip-1)],
this->lip, this->derivative_lip, this->lower_derivative_lip, this->base_integrals, streamContainer, false);
subgrid->is_subgrid = true;
double *host_integrals = subgrid->integrals;
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// get the order number of current device in the context of the streamContainer of 'this'
int device_number = streamContainer->getDeviceNumber(device);
int device_order_number = this->streamContainer->getDeviceOrderNumber(device_number);
// get the pointers to the device arrays
subgrid->device_h[device] = &this->device_h[device_order_number][start_cell_index];
subgrid->device_d[device] = &this->device_d[device_order_number][start_cell_index];
subgrid->device_gridpoints[device] = &this->device_gridpoints[device_order_number][start_cell_index*(subgrid->nlip-1)];
subgrid->device_lip[device] = this->device_lip[device_order_number];
subgrid->device_derivative_lip[device] = this->device_derivative_lip[device_order_number];
subgrid->device_lower_derivative_lip[device] = this->device_lower_derivative_lip[device_order_number];
// allocate & upload the integrals (cannot be taken from this)
size_t sz = sizeof(double)*(subgrid->ncell * (subgrid->nlip-1) +1);
cudaMalloc(&subgrid->device_integrals[device], sz);
cudaMemcpy(subgrid->device_integrals[device], host_integrals, sz, cudaMemcpyHostToDevice);
check_grid_errors(__FILE__, __LINE__);
// set the pointers to the device copy
subgrid->gridpoints = subgrid->device_gridpoints[device];
subgrid->integrals = subgrid->device_integrals[device];
subgrid->h = subgrid->device_h[device];
subgrid->d = subgrid->device_d[device];
subgrid->lip = subgrid->device_lip[device];
subgrid->derivative_lip = subgrid->device_derivative_lip[device];
subgrid->lower_derivative_lip = subgrid->device_lower_derivative_lip[device];
cudaMalloc(&subgrid->device_copies[device], sizeof(Grid1D));
cudaMemcpy(subgrid->device_copies[device], subgrid, sizeof(Grid1D), cudaMemcpyHostToDevice);
}
// set the pointers to the host arrays
subgrid->h = &this->h[start_cell_index];
subgrid->d = &this->d[start_cell_index];
subgrid->gridpoints = &this->gridpoints[start_cell_index*(subgrid->nlip-1)];
subgrid->integrals = host_integrals;
subgrid->lip = this->lip;
return subgrid;
}
double **Grid1D::getDeviceIntegrals() {
return this->device_integrals;
}
double *Grid1D::getDeviceIntegrals(int device) {
return this->device_integrals[device];
}
int Grid1D::getShape() {
return this->ncell * (this->nlip-1) + 1;
}
double *Grid1D::getIntegrals(int first_cell) {
return &this->integrals[first_cell*(this->nlip-1)];
}
/*
* Calculates the values needed for integration of vector of with shape of this grid.
*
* NOTE: this is a host function, meaning that it does not use anything at gpus
* NOTE: first_cell and last_cell must be in C indexing (starting from 0)
*/
double *Grid1D::calculateIntegrals(int first_cell, int last_cell) {
if (last_cell == -1) last_cell = this->ncell-1;
// init the result array;
double *result = new double[(last_cell-first_cell+1)*(this->nlip-1) +1];
// init it to zero
for (int i = 0; i < (last_cell-first_cell+1)*(this->nlip-1) +1; i++) {
result[i] = 0.0;
}
// calculate the values
for (int i = 0; i < last_cell-first_cell+1; i++) {
int icell = first_cell +i;
for (int ilip = 0; ilip < this->nlip; ilip ++) {
result[icell*(this->nlip-1) + ilip] += this->base_integrals[ilip] * this->h[icell];
}
}
return result;
}
// destroy all cuda related objects
void Grid1D::destroy() {
check_errors_and_lock(__FILE__, __LINE__);
// determine whether this is a subgrid, if not delete everything normally
if (!this->is_subgrid) {
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
cudaFree(this->device_h[device]);
cudaFree(this->device_d[device]);
cudaFree(this->device_gridpoints[device]);
cudaFree(this->device_lip[device]);
cudaFree(this->device_derivative_lip[device]);
cudaFree(this->device_lower_derivative_lip[device]);
}
cudaFreeHost(this->h);
cudaFreeHost(this->d);
cudaFreeHost(this->lip);
cudaFreeHost(this->derivative_lip);
cudaFreeHost(this->lower_derivative_lip);
cudaFreeHost(this->base_integrals);
cudaFreeHost(this->gridpoints);
}
// if is a subgrid, delete only the device copy and integrals
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
cudaFree(this->device_integrals[device]);
cudaFree(this->device_copies[device]);
check_errors_and_lock(__FILE__, __LINE__);
}
delete[] this->device_integrals;
delete[] this->device_gridpoints;
delete[] this->device_copies;
delete[] this->device_h;
delete[] this->device_d;
delete[] this->device_lip;
delete[] this->device_derivative_lip;
delete[] this->device_lower_derivative_lip;
delete[] this->integrals;
check_grid_errors(__FILE__, __LINE__);
check_errors_and_lock(__FILE__, __LINE__);
}
/***************************************************
* Grid3D implementation *
* *
***************************************************/
Grid3D::Grid3D() {
}
Grid3D::Grid3D(Grid1D **axis, StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
Grid1D **temp_axis;
// set the shape parameter
this->shape[0] = axis[0]->ncell * (axis[0]->nlip-1) +1;
this->shape[1] = axis[1]->ncell * (axis[0]->nlip-1) +1;
this->shape[2] = axis[2]->ncell * (axis[0]->nlip-1) +1;
// allocate memory for the pointers of arrays
this->device_copies = new Grid3D *[streamContainer->getNumberOfDevices()];
temp_axis = new Grid1D *[3];
this->device_axis = new Grid1D **[streamContainer->getNumberOfDevices()];
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// set the device axis
temp_axis[0] = axis[0]->device_copies[device];
temp_axis[1] = axis[1]->device_copies[device];
temp_axis[2] = axis[2]->device_copies[device];
// copy the device axis to device
cudaMalloc(&this->device_axis[device], sizeof(Grid1D *)*3);
cudaMemcpy(this->device_axis[device], temp_axis, sizeof(Grid1D *) * 3, cudaMemcpyHostToDevice);
this->axis = this->device_axis[device];
// allocate the device memory and copy
cudaMalloc(&this->device_copies[device], sizeof(Grid3D));
cudaMemcpy(this->device_copies[device], this, sizeof(Grid3D), cudaMemcpyHostToDevice);
}
temp_axis[0] = axis[0];
temp_axis[1] = axis[1];
temp_axis[2] = axis[2];
// set the host pointers to the returned object
this->axis = temp_axis;
}
void Grid3D::destroy() {
// destroy the Grid1D objects owned by this object
this->axis[0]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[0];
this->axis[1]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[1];
this->axis[2]->destroy();
check_errors_and_lock(__FILE__, __LINE__);
delete this->axis[2];
// free the device_copies
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
check_errors_and_lock(__FILE__, __LINE__);
cudaFree(this->device_copies[device]);
check_errors_and_lock(__FILE__, __LINE__);
cudaFree(this->device_axis[device]);
check_errors_and_lock(__FILE__, __LINE__);
}
check_errors_and_lock(__FILE__, __LINE__);
// free the host parameters
delete[] this->device_copies;
delete[] this->device_axis;
delete[] this->axis;
check_grid_errors(__FILE__, __LINE__);
}
Grid3D *Grid3D::getSubGrid(int start_cell_indices[3], int end_cell_indices[3], StreamContainer *streamContainer) {
Grid3D *subgrid = new Grid3D();
subgrid->streamContainer = streamContainer;
Grid1D **temp_axis;
// allocate memory for the pointers of arrays
subgrid->device_copies = new Grid3D*[streamContainer->getNumberOfDevices()];
temp_axis = new Grid1D*[3];
subgrid->device_axis = new Grid1D **[streamContainer->getNumberOfDevices()];
// mark this grid to be a subgrid
subgrid->is_subgrid = true;
Grid1D **axis;
axis = new Grid1D *[3];
// init the axis
axis[0] = this->axis[0]->getSubGrid(start_cell_indices[0], end_cell_indices[0], streamContainer);
axis[1] = this->axis[1]->getSubGrid(start_cell_indices[1], end_cell_indices[1], streamContainer);
axis[2] = this->axis[2]->getSubGrid(start_cell_indices[2], end_cell_indices[2], streamContainer);
// set the shape parameter
subgrid->shape[0] = axis[0]->ncell * (axis[0]->nlip-1) +1;
subgrid->shape[1] = axis[1]->ncell * (axis[1]->nlip-1) +1;
subgrid->shape[2] = axis[2]->ncell * (axis[2]->nlip-1) +1;
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
streamContainer->setDevice(device);
// set the device copies to the axis pointers
temp_axis[0] = axis[0]->device_copies[device];
temp_axis[1] = axis[1]->device_copies[device];
temp_axis[2] = axis[2]->device_copies[device];
// copy the device axis to device
cudaMalloc(&subgrid->device_axis[device], sizeof(Grid1D *)*3);
cudaMemcpy( subgrid->device_axis[device], temp_axis, sizeof(Grid1D *) * 3, cudaMemcpyHostToDevice);
subgrid->axis = subgrid->device_axis[device];
// allocate the device memory and copy
cudaMalloc(&subgrid->device_copies[device], sizeof(Grid3D));
check_grid_errors(__FILE__, __LINE__);
cudaMemcpy(subgrid->device_copies[device], subgrid, sizeof(Grid3D), cudaMemcpyHostToDevice);
check_grid_errors(__FILE__, __LINE__);
}
temp_axis[0] = axis[0];
temp_axis[1] = axis[1];
temp_axis[2] = axis[2];
// set the host pointers to the returned object
subgrid->axis = temp_axis;
delete[] axis;
return subgrid;
}
int *Grid3D::getShape() {
return this->shape;
}
int Grid3D::getShape(int axis) {
return this->shape[axis];
}
/***************************************************
* Fortran interfaces *
* *
***************************************************/
extern "C" Grid1D *grid1d_init_cuda(int ncell, int nlip, double r_max, double *h, double *d, double *gridpoints, double *lip, double *derivative_lip, double *lower_derivative_lip, double *base_integrals, StreamContainer *streamContainer) {
Grid1D *new_grid = new Grid1D(ncell, nlip, r_max, h, d, gridpoints, lip, derivative_lip, lower_derivative_lip, base_integrals, streamContainer);
return new_grid;
}
extern "C" void grid1d_upload_cuda(Grid1D *grid) {
grid->upload();
}
extern "C" void grid1d_destroy_cuda(Grid1D *grid) {
grid->destroy();
delete grid;
}
extern "C" Grid3D *grid3d_init_cuda(Grid1D **axis, StreamContainer *streamContainer) {
Grid3D *new_grid = new Grid3D(axis, streamContainer);
return new_grid;
}
extern "C" void grid3d_destroy_cuda(Grid3D *grid) {
grid->destroy();
delete grid;
} |
0301053fafb49b301b517b0a5fbdbf14ebc739d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "accumulateRowsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int channels = 1;
int h = YSIZE;
int w = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
accumulateRowsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
accumulateRowsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
accumulateRowsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,channels,h,w);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0301053fafb49b301b517b0a5fbdbf14ebc739d2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "accumulateRowsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int channels = 1;
int h = YSIZE;
int w = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
accumulateRowsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
accumulateRowsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
accumulateRowsKernel<<<gridBlock,threadBlock>>>(input,output,channels,h,w);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dd2b059f913f910dc2a5ee0f215ff45d5f561d0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdlib.h>
#include<math.h>
#include<float.h>
#include "hip/hip_runtime.h"
#define MAX_NUM_THREADS 1024
__device__ int *resultIndexCluster;
__device__ Point *clusters, *points;
/*
* Finds the index of the closest cluster to point.
* Return an array of cluster indexes
*/
__global__ void minDistanceCluster(Point *clusters, Point *points, int* resultIndexCluster, GlobalVar data)
{
double pX, pY, cX, cY;
double currentD, minD = DBL_MAX;
int pIndex;
pIndex = blockIdx.x * MAX_NUM_THREADS + threadIdx.x;
if (pIndex < data.sizePointsArr)
{
pX = points[pIndex].x;
pY = points[pIndex].y;
for (int i = 0; i < data.sizeClusterArr; i++)
{
cX = clusters[i].x;
cY = clusters[i].y;
currentD = (cX - pX) * (cX - pX) + (cY - pY) * (cY - pY);
if (currentD < minD){
minD = currentD;
resultIndexCluster[pIndex] = i;
}
}
}
}
/*
* memcopy clusters and call kernel
*/
void closestClusterToPoint(int* indexMinCluster, Point* clusterArr, GlobalVar data)
{
int i;
int numBlock = (int)ceil((double)data.sizePointsArr / MAX_NUM_THREADS);
hipError_t cudaStatus;
dim3 dimGrid(numBlock);
dim3 dimBlock(MAX_NUM_THREADS);
//Copy cluster array to device:
cudaStatus = hipMemcpy(clusters, clusterArr, data.sizeClusterArr * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMemcpy (clusters) failed!\n");
fflush(stdout);
return;
}
hipLaunchKernelGGL(( minDistanceCluster) , dim3(dimGrid), dim3(dimBlock) , 0, 0, clusters, points, resultIndexCluster, data);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stdout, "distanceKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
fflush(stdout);
return;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fflush(stdout);
return;
}
//Read result from device:
hipMemcpy(indexMinCluster, resultIndexCluster, data.sizePointsArr*sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMemcpy failed!\n");
fflush(stdout);
return;
}
}
/*
* Malloc and memcopy only once per iteration
*/
void prepForCuda(GlobalVar data)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system. ///------------- should be according to device id?? --------------
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
fflush(stdout);
return;
}
//allocate memory in device for cluster array:
cudaStatus = hipMalloc(&clusters, data.sizeClusterArr *sizeof(Point));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc (clusters) failed!\n");
fflush(stdout);
return;
}
//allocate memory in device for points array:
cudaStatus = hipMalloc(&points, data.sizePointsArr *sizeof(Point));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc (points) failed!\n");
fflush(stdout);
return;
}
//allocate memory in device for result:
cudaStatus = hipMalloc(&resultIndexCluster, data.sizePointsArr *sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc failed!\n");
fflush(stdout);
return;
}
}
//free memory from device:
void freeAllocationCuda()
{
hipFree(resultIndexCluster);
hipFree(clusters);
hipFree(points);
}
void copyPoints(Point* pointsArr, GlobalVar data)
{
hipError_t cudaStatus;
//Copy points array to device:
cudaStatus = hipMemcpy(points, pointsArr, data.sizePointsArr * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMemcpy (points) failed!\n");
fflush(stdout);
return;
}
} | dd2b059f913f910dc2a5ee0f215ff45d5f561d0a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdlib.h>
#include<math.h>
#include<float.h>
#include "cuda.h"
#define MAX_NUM_THREADS 1024
__device__ int *resultIndexCluster;
__device__ Point *clusters, *points;
/*
* Finds the index of the closest cluster to point.
* Return an array of cluster indexes
*/
__global__ void minDistanceCluster(Point *clusters, Point *points, int* resultIndexCluster, GlobalVar data)
{
double pX, pY, cX, cY;
double currentD, minD = DBL_MAX;
int pIndex;
pIndex = blockIdx.x * MAX_NUM_THREADS + threadIdx.x;
if (pIndex < data.sizePointsArr)
{
pX = points[pIndex].x;
pY = points[pIndex].y;
for (int i = 0; i < data.sizeClusterArr; i++)
{
cX = clusters[i].x;
cY = clusters[i].y;
currentD = (cX - pX) * (cX - pX) + (cY - pY) * (cY - pY);
if (currentD < minD){
minD = currentD;
resultIndexCluster[pIndex] = i;
}
}
}
}
/*
* memcopy clusters and call kernel
*/
void closestClusterToPoint(int* indexMinCluster, Point* clusterArr, GlobalVar data)
{
int i;
int numBlock = (int)ceil((double)data.sizePointsArr / MAX_NUM_THREADS);
cudaError_t cudaStatus;
dim3 dimGrid(numBlock);
dim3 dimBlock(MAX_NUM_THREADS);
//Copy cluster array to device:
cudaStatus = cudaMemcpy(clusters, clusterArr, data.sizeClusterArr * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMemcpy (clusters) failed!\n");
fflush(stdout);
return;
}
minDistanceCluster <<<dimGrid, dimBlock >>>(clusters, points, resultIndexCluster, data);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "distanceKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
fflush(stdout);
return;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fflush(stdout);
return;
}
//Read result from device:
cudaMemcpy(indexMinCluster, resultIndexCluster, data.sizePointsArr*sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMemcpy failed!\n");
fflush(stdout);
return;
}
}
/*
* Malloc and memcopy only once per iteration
*/
void prepForCuda(GlobalVar data)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system. ///------------- should be according to device id?? --------------
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
fflush(stdout);
return;
}
//allocate memory in device for cluster array:
cudaStatus = cudaMalloc(&clusters, data.sizeClusterArr *sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc (clusters) failed!\n");
fflush(stdout);
return;
}
//allocate memory in device for points array:
cudaStatus = cudaMalloc(&points, data.sizePointsArr *sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc (points) failed!\n");
fflush(stdout);
return;
}
//allocate memory in device for result:
cudaStatus = cudaMalloc(&resultIndexCluster, data.sizePointsArr *sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc failed!\n");
fflush(stdout);
return;
}
}
//free memory from device:
void freeAllocationCuda()
{
cudaFree(resultIndexCluster);
cudaFree(clusters);
cudaFree(points);
}
void copyPoints(Point* pointsArr, GlobalVar data)
{
cudaError_t cudaStatus;
//Copy points array to device:
cudaStatus = cudaMemcpy(points, pointsArr, data.sizePointsArr * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMemcpy (points) failed!\n");
fflush(stdout);
return;
}
} |
3eb25e46c53dfcae65850a017b2ab6d19aab9042.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pow_kerneld.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *v = NULL;
hipMalloc(&v, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
double e = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pow_kerneld), dim3(gridBlock),dim3(threadBlock), 0, 0, v,n,e);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pow_kerneld), dim3(gridBlock),dim3(threadBlock), 0, 0, v,n,e);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pow_kerneld), dim3(gridBlock),dim3(threadBlock), 0, 0, v,n,e);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3eb25e46c53dfcae65850a017b2ab6d19aab9042.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pow_kerneld.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *v = NULL;
cudaMalloc(&v, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
double e = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pow_kerneld<<<gridBlock,threadBlock>>>(v,n,e);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pow_kerneld<<<gridBlock,threadBlock>>>(v,n,e);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pow_kerneld<<<gridBlock,threadBlock>>>(v,n,e);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9af323f4f0a3a1ab5f596c65b7ed9781fe39150d.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef picket_fence_cuda
#define picket_fence_cuda
#pragma once
#include <hip/hip_runtime.h>
#include <math.h>
//////// kernel version ///////////////////////////////////////////
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
__device__ void kernel_k_Ross_Freedman(double Tin, double Pin, double met, double &k_IR) {
// dependcies
//// powl from math
//// log10l from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10l from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10((double)(T));
Pl10 = log10((double)(P));
// Low pressure expression
k_lowP = c1 * atan((double)(Tl10 - c2)) -
(c3 / (Pl10 + c4)) * exp((double)(pow((double)(Tl10 - c5), 2.0))) + c6 * met + c7;
// De log10l
k_lowP = pow((double)(10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow((double)(Tl10), 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow((double)(Tl10), 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
// De log10l
k_hiP = pow((double)(10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
__device__ void Ray_dry_adj(int id, int nlay, int nlay1, double t_step, double kappa,
double* Tl, double* pl,
double* pe, double*& dT_conv, double* Tl_cc__df_l, double* d_p__df_l) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// powl -> math
//// logl10 -> math
//// expl -> math
// Input:
//
// Call by reference (Input & Output):
//
// constants & parameters
int itermax = 5;
const double small = 1e-6;
// work variables
int i, iter;
bool did_adj;
double pfact, Tbar;
double condi;
// start operations
for (i = 0; i < nlay; i++)
{
Tl_cc__df_l[id * nlay + i] = Tl[id * nlay + i];
d_p__df_l[id * nlay + i] = pe[id * nlay1 + i + 1] - pe[id * nlay1 + i];
}
for (iter = 0; iter < itermax; iter++)
{
did_adj = false;
// Downward pass
for (i = 0; i < nlay - 1; i++)
{
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// Upward pass
for (i = nlay - 2; i > -1; i--) {
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// ! If no adjustment required, exit the loop
if (did_adj == false)
{
break;
}
}
// Change in temperature is Tl_cc - Tl
// adjust on timescale of 1 timestep
for (i = 0; i < nlay; i++)
{
dT_conv[id * nlay + i] = (Tl_cc__df_l[id * nlay + i] - Tl[id * nlay + i]) / t_step;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void linear_log_interp(double xval, double x1, double x2, double y1, double y2, double& yval) {
// dependcies
//// powll from math
//// log10f from math
// work variables
double lxval;
double ly1;
double ly2;
double lx1;
double lx2;
double norm;
// start operations
lxval = log10((double)(xval));
lx1 = log10((double)(x1));
lx2 = log10((double)(x2));
ly1 = log10((double)(y1));
ly2 = log10((double)(y2));
norm = ((double)1.0) / (lx2 - lx1);
yval = pow((double)(10.0), ((ly1 * (lx2 - lxval) + ly2 * (lxval - lx1)) * norm));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void tau_struct(int id, int nlev, double grav,
double* p_half, double* kRoss,
int channel, double* tau_struc_e) {
// dependencies
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
// work variables
double tau_sum;
double tau_lay;
double delP;
int k;
// running sum of optical depth
tau_sum = 0.0;
// start operations
// Upper most tau_struc is given by some low pressure value (here 1e-9 bar = 1e-4 pa)
//dP = (p_half(1) - 1e-4)
//tau_lay = (kRoss(1) * dP) / grav
//tau_sum = tau_sum + tau_lay
tau_struc_e[id*(nlev+1)+0] = tau_sum;
// Integrate from top to bottom
for (k = 0; k < nlev; k++)
{
// Pressure difference between layer edges
delP = (p_half[id*(nlev+1)+ k + 1] - p_half[id*(nlev+1)+k]);
// Optical depth of layer assuming hydrostatic equilibirum
tau_lay = (kRoss[id*nlev*3+channel * nlev + k] * delP) / grav;
// Add to running sum
tau_sum = tau_sum + tau_lay;
// Optical depth structure is running sum
tau_struc_e[id*(nlev+1)+k + 1] = tau_sum;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void sw_grey_down(int id, int nlay1, double solar,
double* solar_tau, double* sw_down__df_e, double mu) {
// dependencies
//// expll -> math
// work variables
int i;
// start operations
for (i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = solar * mu * exp((double)(-solar_tau[id * nlay1 + i] / mu));
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_linear(int id, int nlay, int nlay1,
double* be__df_e, double* tau_IRe__df_e,
double* lw_up__df_e, double* lw_down__df_e,
double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* Am__dff_l, double* Bm__dff_l,
double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[id*nlay + k] = (tau_IRe__df_e[id*(nlay+1)+k + 1] - tau_IRe__df_e[id*(nlay +1)+k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id*(nlay+1)+k] = 0.0;
lw_up__df_e[id*nlay1 + k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[id*nlay + k] = dtau__dff_l[id * nlay + k] / uarr[g];
edel__dff_l[id * nlay + k] = exp((double)(-del__dff_l[id * nlay + k]));
e0i__dff_l[id * nlay + k] = 1.0 - edel__dff_l[id * nlay + k];
e1i__dff_l[id * nlay + k] = del__dff_l[id * nlay + k] - e0i__dff_l[id * nlay + k];
Am__dff_l[id * nlay + k] = e0i__dff_l[id * nlay + k] - e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[id * nlay + k] = e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Bm[k] = Bp[k], just indexed differently
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
for (k = 0; k < nlay; k++)
{
lw_down_g__dff_l[id * nlay + k + 1] = lw_down_g__dff_l[id * nlay + k] * edel__dff_l[id * nlay + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[id * nlay1 + nlay1 - 1] = be__df_e[id * nlay1 + nlay1 - 1];
for (k = nlay - 1; k > -1; k--)
{
lw_up_g__dff_l[id * nlay + k] = lw_up_g__dff_l[id * nlay + k + 1] * edel__dff_l[id * nlay + k] +
Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = lw_down__df_e[id * nlay1 + k] + lw_down_g__dff_l[id * nlay + k] * w[g] * uarr[g];
lw_up__df_e[id * nlay1 + k] = lw_up__df_e[id * nlay1 + k] + lw_up_g__dff_l[id * nlay + k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = twopi * lw_down__df_e[id * nlay1 + k];
lw_up__df_e[id * nlay1 + k] = twopi * lw_up__df_e[id * nlay1 + k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_poly(int nlay, int nlay1, double* be__df_e,
double* tau_IRe__df_e, double* lw_up__df_e,
double* lw_down__df_e, double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* e2i__dff_l, double* Am__dff_l, double* Bm__dff_l,
double* Gm__dff_l, double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// powll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[k] = (tau_IRe__df_e[k + 1] - tau_IRe__df_e[k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_up__df_e[k] = 0.0;
lw_down__df_e[k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[k] = dtau__dff_l[k] / uarr[g];
edel__dff_l[k] = exp((double)(-del__dff_l[k]));
e0i__dff_l[k] = ((double)(1.0)) - edel__dff_l[k];
e1i__dff_l[k] = del__dff_l[k] - e0i__dff_l[k];
e2i__dff_l[k] = pow((double)(del__dff_l[k]), 2) - 2.0 * e1i__dff_l[k];
}
for (k = 0; k < nlay; k++) {
// For boundary conditions assume linear interpolation at edges
if (k == 1 || k == nlay)
{
Am__dff_l[k] = e0i__dff_l[k] - e1i__dff_l[k] / del__dff_l[k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = e1i__dff_l[k] / del__dff_l[k]; // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = 0.0;// Gm(k) = Ap(k)
}
else
{
Am__dff_l[k] = e0i__dff_l[k] + (e2i__dff_l[k] - (del__dff_l[k + 1] + 2.0 * del__dff_l[k]) * e1i__dff_l[k]) / (del__dff_l[k] * (del__dff_l[k + 1] + del__dff_l[k])); // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = ((del__dff_l[k + 1] + del__dff_l[k]) * e1i__dff_l[k] - e2i__dff_l[k]) / (del__dff_l[k] * del__dff_l[k + 1]); // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = (e2i__dff_l[k] - del__dff_l[k] * e1i__dff_l[k]) / (del__dff_l[k + 1] * (del__dff_l[k + 1] + del__dff_l[k])); // Gm[k] = Ap[k], just indexed differently
}
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
lw_down_g__dff_l[1] = lw_down_g__dff_l[0] * edel__dff_l[0] + Am__dff_l[0] * be__df_e[0] + Bm__dff_l[0] * be__df_e[1];
for (k = 1; k < nlay - 1; k++)
{
lw_down_g__dff_l[k + 1] = lw_down_g__dff_l[k] * edel__dff_l[k] + Am__dff_l[k] * be__df_e[k] + Bm__dff_l[k] * be__df_e[k + 1] +
Gm__dff_l[k] * be__df_e[k - 1]; // TS intensity
}
lw_down_g__dff_l[nlay1 - 1] = lw_down_g__dff_l[nlay - 1] * edel__dff_l[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[nlay1 - 1] = be__df_e[nlay1 - 1];
lw_up_g__dff_l[nlay - 1] = lw_up_g__dff_l[nlay1 - 1] * edel__dff_l[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
for (k = nlay - 2; k > 0; k--)
{
lw_up_g__dff_l[k] = lw_up_g__dff_l[k + 1] * edel__dff_l[k] + Gm__dff_l[k] * be__df_e[k - 1] + Bm__dff_l[k] * be__df_e[k] + Am__dff_l[k] * be__df_e[k + 1]; // TS intensity
}
lw_up_g__dff_l[0] = lw_up_g__dff_l[1] * edel__dff_l[0] + Bm__dff_l[0] * be__df_e[0] + Am__dff_l[0] * be__df_e[1];
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = lw_down__df_e[k] + lw_down_g__dff_l[k] * w[g] * uarr[g];
lw_up__df_e[k] = lw_up__df_e[k] + lw_up_g__dff_l[k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = twopi * lw_down__df_e[k];
lw_up__df_e[k] = twopi * lw_up__df_e[k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void Kitzmann_TS_noscatt(int id, const int nlay, const int nlay1, double *Tl,
double *pl, double *pe,
double *k_V_l, double *k_IR_l,
double *Beta_V, double *Beta, double *&net_F,
double mu_s, double Finc, double Fint, double grav, double AB,
double *tau_Ve__df_e, double *tau_IRe__df_e, double *Te__df_e, double *be__df_e, //Kitzman working variables
double *sw_down__df_e, double *sw_down_b__df_e, double *sw_up__df_e,
double *lw_down__df_e, double *lw_down_b__df_e,
double *lw_up__df_e, double *lw_up_b__df_e,
double *lw_net__df_e, double *sw_net__df_e,
double *dtau__dff_l, double *del__dff_l, // lw_grey_updown_linear working variables
double *edel__dff_l, double *e0i__dff_l, double *e1i__dff_l,
double *Am__dff_l, double *Bm__dff_l,
double *lw_up_g__dff_l, double *lw_down_g__dff_l) {
// dependcies
//// powll -> include math
//// log10f -> include math
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
//// linear_log_interp -> namespace Kitsmann
//// tau_struct -> namespace Kitsmann
//// sw_grey_down -> namespace Kitsmann
//// lw_grey_updown_linear -> namespace Kitsmann
//// (lw_grey_updown_poly) -> namespace Kitsmann
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
const double StBC = 5.670374419e-8;
// work variables
double Finc_B;
// start operation
// Find temperature at layer edges through linear interpolation and extrapolation
for (int i = 1; i < nlay; i++)
{
linear_log_interp(pe[id*nlay + i], pl[id * nlay + i - 1], pl[id * nlay + i], Tl[id * nlay + i - 1], Tl[id * nlay + i], Te__df_e[id * nlay + i]);
}
Te__df_e[id * nlay + 0] = Tl[id * nlay + 0] + (pe[id * nlay + 0] - pe[id * nlay + 1]) /
(pl[id * nlay + 0] - pe[id * nlay + 1]) * (Tl[id * nlay + 0] - Te__df_e[id * nlay + 1]);
Te__df_e[id * nlay1 + nlay1 - 1] = Tl[id * nlay + nlay - 1] + (pe[id * nlay1 + nlay1 - 1] - pe[id * nlay + nlay - 1]) /
(pl[id * nlay + nlay - 1] - pe[id * nlay + nlay - 1]) *
(Tl[id * nlay + nlay - 1] - Te__df_e[id * nlay + nlay - 1]);
// Shortwave fluxes
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = 0.0;
sw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 3; channel++)
{
// Find the opacity structure
tau_struct(id, nlay, grav, pe, k_V_l, channel, tau_Ve__df_e);
// Incident flux in band
Finc_B = Finc * Beta_V[id * 3 + channel];
// Calculate sw flux
sw_grey_down(id, nlay, Finc_B, tau_Ve__df_e, sw_down_b__df_e, mu_s);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = sw_down__df_e[id * nlay1 + i] + sw_down_b__df_e[id * nlay1 + i];
}
}
// Long wave two-stream fluxes
for (int i = 0; i < nlay1; i++)
{
lw_down__df_e[id * nlay1 + i] = 0.0;
lw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 2; channel++)
{
// Find the opacity structure
tau_struct(id,nlay, grav, pe, k_IR_l, channel, tau_IRe__df_e);
// Blackbody fluxes (note divide by pi for correct units)
for (int i = 0; i < nlay1; i++)
{
be__df_e[id * nlay1 + i] = StBC * pow((double)(Te__df_e[id * nlay1 + i]), ((double)4.0)) / pi * Beta[id * 2 + channel];
}
// Calculate lw flux
lw_grey_updown_linear(id,nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
Am__dff_l, Bm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
//lw_grey_updown_poly(nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
//dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
// e2i__dff_l, Am__dff_l, Bm__dff_l, Gm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
lw_up__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] + lw_up_b__df_e[id * nlay1 + i];
lw_down__df_e[id * nlay1 + i] = lw_down__df_e[id * nlay1 + i] + lw_down_b__df_e[id * nlay1 + i];
}
}
// Net fluxes
for (int i = 0; i < nlay1; i++)
{
lw_net__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] - lw_down__df_e[id * nlay1 + i];
sw_net__df_e[id * nlay1 + i] = sw_up__df_e[id * nlay1 + i] - sw_down__df_e[id * nlay1 + i];
net_F[id * nlay1 + i] = lw_net__df_e[id * nlay1 + i] + sw_net__df_e[id * nlay1 + i];
}
net_F[id * nlay1 + nlay1 - 1] = Fint;
}
#endif // picket_fence_cuda
| 9af323f4f0a3a1ab5f596c65b7ed9781fe39150d.cu | #ifndef picket_fence_cuda
#define picket_fence_cuda
#pragma once
#include <cuda_runtime.h>
#include <math.h>
//////// kernel version ///////////////////////////////////////////
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
__device__ void kernel_k_Ross_Freedman(double Tin, double Pin, double met, double &k_IR) {
// dependcies
//// powl from math
//// log10l from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10l from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10((double)(T));
Pl10 = log10((double)(P));
// Low pressure expression
k_lowP = c1 * atan((double)(Tl10 - c2)) -
(c3 / (Pl10 + c4)) * exp((double)(pow((double)(Tl10 - c5), 2.0))) + c6 * met + c7;
// De log10l
k_lowP = pow((double)(10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow((double)(Tl10), 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow((double)(Tl10), 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
// De log10l
k_hiP = pow((double)(10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
__device__ void Ray_dry_adj(int id, int nlay, int nlay1, double t_step, double kappa,
double* Tl, double* pl,
double* pe, double*& dT_conv, double* Tl_cc__df_l, double* d_p__df_l) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// powl -> math
//// logl10 -> math
//// expl -> math
// Input:
//
// Call by reference (Input & Output):
//
// constants & parameters
int itermax = 5;
const double small = 1e-6;
// work variables
int i, iter;
bool did_adj;
double pfact, Tbar;
double condi;
// start operations
for (i = 0; i < nlay; i++)
{
Tl_cc__df_l[id * nlay + i] = Tl[id * nlay + i];
d_p__df_l[id * nlay + i] = pe[id * nlay1 + i + 1] - pe[id * nlay1 + i];
}
for (iter = 0; iter < itermax; iter++)
{
did_adj = false;
// Downward pass
for (i = 0; i < nlay - 1; i++)
{
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// Upward pass
for (i = nlay - 2; i > -1; i--) {
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// ! If no adjustment required, exit the loop
if (did_adj == false)
{
break;
}
}
// Change in temperature is Tl_cc - Tl
// adjust on timescale of 1 timestep
for (i = 0; i < nlay; i++)
{
dT_conv[id * nlay + i] = (Tl_cc__df_l[id * nlay + i] - Tl[id * nlay + i]) / t_step;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void linear_log_interp(double xval, double x1, double x2, double y1, double y2, double& yval) {
// dependcies
//// powll from math
//// log10f from math
// work variables
double lxval;
double ly1;
double ly2;
double lx1;
double lx2;
double norm;
// start operations
lxval = log10((double)(xval));
lx1 = log10((double)(x1));
lx2 = log10((double)(x2));
ly1 = log10((double)(y1));
ly2 = log10((double)(y2));
norm = ((double)1.0) / (lx2 - lx1);
yval = pow((double)(10.0), ((ly1 * (lx2 - lxval) + ly2 * (lxval - lx1)) * norm));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void tau_struct(int id, int nlev, double grav,
double* p_half, double* kRoss,
int channel, double* tau_struc_e) {
// dependencies
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
// work variables
double tau_sum;
double tau_lay;
double delP;
int k;
// running sum of optical depth
tau_sum = 0.0;
// start operations
// Upper most tau_struc is given by some low pressure value (here 1e-9 bar = 1e-4 pa)
//dP = (p_half(1) - 1e-4)
//tau_lay = (kRoss(1) * dP) / grav
//tau_sum = tau_sum + tau_lay
tau_struc_e[id*(nlev+1)+0] = tau_sum;
// Integrate from top to bottom
for (k = 0; k < nlev; k++)
{
// Pressure difference between layer edges
delP = (p_half[id*(nlev+1)+ k + 1] - p_half[id*(nlev+1)+k]);
// Optical depth of layer assuming hydrostatic equilibirum
tau_lay = (kRoss[id*nlev*3+channel * nlev + k] * delP) / grav;
// Add to running sum
tau_sum = tau_sum + tau_lay;
// Optical depth structure is running sum
tau_struc_e[id*(nlev+1)+k + 1] = tau_sum;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void sw_grey_down(int id, int nlay1, double solar,
double* solar_tau, double* sw_down__df_e, double mu) {
// dependencies
//// expll -> math
// work variables
int i;
// start operations
for (i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = solar * mu * exp((double)(-solar_tau[id * nlay1 + i] / mu));
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_linear(int id, int nlay, int nlay1,
double* be__df_e, double* tau_IRe__df_e,
double* lw_up__df_e, double* lw_down__df_e,
double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* Am__dff_l, double* Bm__dff_l,
double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[id*nlay + k] = (tau_IRe__df_e[id*(nlay+1)+k + 1] - tau_IRe__df_e[id*(nlay +1)+k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id*(nlay+1)+k] = 0.0;
lw_up__df_e[id*nlay1 + k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[id*nlay + k] = dtau__dff_l[id * nlay + k] / uarr[g];
edel__dff_l[id * nlay + k] = exp((double)(-del__dff_l[id * nlay + k]));
e0i__dff_l[id * nlay + k] = 1.0 - edel__dff_l[id * nlay + k];
e1i__dff_l[id * nlay + k] = del__dff_l[id * nlay + k] - e0i__dff_l[id * nlay + k];
Am__dff_l[id * nlay + k] = e0i__dff_l[id * nlay + k] - e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[id * nlay + k] = e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Bm[k] = Bp[k], just indexed differently
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
for (k = 0; k < nlay; k++)
{
lw_down_g__dff_l[id * nlay + k + 1] = lw_down_g__dff_l[id * nlay + k] * edel__dff_l[id * nlay + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[id * nlay1 + nlay1 - 1] = be__df_e[id * nlay1 + nlay1 - 1];
for (k = nlay - 1; k > -1; k--)
{
lw_up_g__dff_l[id * nlay + k] = lw_up_g__dff_l[id * nlay + k + 1] * edel__dff_l[id * nlay + k] +
Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = lw_down__df_e[id * nlay1 + k] + lw_down_g__dff_l[id * nlay + k] * w[g] * uarr[g];
lw_up__df_e[id * nlay1 + k] = lw_up__df_e[id * nlay1 + k] + lw_up_g__dff_l[id * nlay + k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = twopi * lw_down__df_e[id * nlay1 + k];
lw_up__df_e[id * nlay1 + k] = twopi * lw_up__df_e[id * nlay1 + k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_poly(int nlay, int nlay1, double* be__df_e,
double* tau_IRe__df_e, double* lw_up__df_e,
double* lw_down__df_e, double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* e2i__dff_l, double* Am__dff_l, double* Bm__dff_l,
double* Gm__dff_l, double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// powll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[k] = (tau_IRe__df_e[k + 1] - tau_IRe__df_e[k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_up__df_e[k] = 0.0;
lw_down__df_e[k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[k] = dtau__dff_l[k] / uarr[g];
edel__dff_l[k] = exp((double)(-del__dff_l[k]));
e0i__dff_l[k] = ((double)(1.0)) - edel__dff_l[k];
e1i__dff_l[k] = del__dff_l[k] - e0i__dff_l[k];
e2i__dff_l[k] = pow((double)(del__dff_l[k]), 2) - 2.0 * e1i__dff_l[k];
}
for (k = 0; k < nlay; k++) {
// For boundary conditions assume linear interpolation at edges
if (k == 1 || k == nlay)
{
Am__dff_l[k] = e0i__dff_l[k] - e1i__dff_l[k] / del__dff_l[k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = e1i__dff_l[k] / del__dff_l[k]; // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = 0.0;// Gm(k) = Ap(k)
}
else
{
Am__dff_l[k] = e0i__dff_l[k] + (e2i__dff_l[k] - (del__dff_l[k + 1] + 2.0 * del__dff_l[k]) * e1i__dff_l[k]) / (del__dff_l[k] * (del__dff_l[k + 1] + del__dff_l[k])); // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = ((del__dff_l[k + 1] + del__dff_l[k]) * e1i__dff_l[k] - e2i__dff_l[k]) / (del__dff_l[k] * del__dff_l[k + 1]); // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = (e2i__dff_l[k] - del__dff_l[k] * e1i__dff_l[k]) / (del__dff_l[k + 1] * (del__dff_l[k + 1] + del__dff_l[k])); // Gm[k] = Ap[k], just indexed differently
}
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
lw_down_g__dff_l[1] = lw_down_g__dff_l[0] * edel__dff_l[0] + Am__dff_l[0] * be__df_e[0] + Bm__dff_l[0] * be__df_e[1];
for (k = 1; k < nlay - 1; k++)
{
lw_down_g__dff_l[k + 1] = lw_down_g__dff_l[k] * edel__dff_l[k] + Am__dff_l[k] * be__df_e[k] + Bm__dff_l[k] * be__df_e[k + 1] +
Gm__dff_l[k] * be__df_e[k - 1]; // TS intensity
}
lw_down_g__dff_l[nlay1 - 1] = lw_down_g__dff_l[nlay - 1] * edel__dff_l[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[nlay1 - 1] = be__df_e[nlay1 - 1];
lw_up_g__dff_l[nlay - 1] = lw_up_g__dff_l[nlay1 - 1] * edel__dff_l[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
for (k = nlay - 2; k > 0; k--)
{
lw_up_g__dff_l[k] = lw_up_g__dff_l[k + 1] * edel__dff_l[k] + Gm__dff_l[k] * be__df_e[k - 1] + Bm__dff_l[k] * be__df_e[k] + Am__dff_l[k] * be__df_e[k + 1]; // TS intensity
}
lw_up_g__dff_l[0] = lw_up_g__dff_l[1] * edel__dff_l[0] + Bm__dff_l[0] * be__df_e[0] + Am__dff_l[0] * be__df_e[1];
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = lw_down__df_e[k] + lw_down_g__dff_l[k] * w[g] * uarr[g];
lw_up__df_e[k] = lw_up__df_e[k] + lw_up_g__dff_l[k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = twopi * lw_down__df_e[k];
lw_up__df_e[k] = twopi * lw_up__df_e[k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void Kitzmann_TS_noscatt(int id, const int nlay, const int nlay1, double *Tl,
double *pl, double *pe,
double *k_V_l, double *k_IR_l,
double *Beta_V, double *Beta, double *&net_F,
double mu_s, double Finc, double Fint, double grav, double AB,
double *tau_Ve__df_e, double *tau_IRe__df_e, double *Te__df_e, double *be__df_e, //Kitzman working variables
double *sw_down__df_e, double *sw_down_b__df_e, double *sw_up__df_e,
double *lw_down__df_e, double *lw_down_b__df_e,
double *lw_up__df_e, double *lw_up_b__df_e,
double *lw_net__df_e, double *sw_net__df_e,
double *dtau__dff_l, double *del__dff_l, // lw_grey_updown_linear working variables
double *edel__dff_l, double *e0i__dff_l, double *e1i__dff_l,
double *Am__dff_l, double *Bm__dff_l,
double *lw_up_g__dff_l, double *lw_down_g__dff_l) {
// dependcies
//// powll -> include math
//// log10f -> include math
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
//// linear_log_interp -> namespace Kitsmann
//// tau_struct -> namespace Kitsmann
//// sw_grey_down -> namespace Kitsmann
//// lw_grey_updown_linear -> namespace Kitsmann
//// (lw_grey_updown_poly) -> namespace Kitsmann
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
const double StBC = 5.670374419e-8;
// work variables
double Finc_B;
// start operation
// Find temperature at layer edges through linear interpolation and extrapolation
for (int i = 1; i < nlay; i++)
{
linear_log_interp(pe[id*nlay + i], pl[id * nlay + i - 1], pl[id * nlay + i], Tl[id * nlay + i - 1], Tl[id * nlay + i], Te__df_e[id * nlay + i]);
}
Te__df_e[id * nlay + 0] = Tl[id * nlay + 0] + (pe[id * nlay + 0] - pe[id * nlay + 1]) /
(pl[id * nlay + 0] - pe[id * nlay + 1]) * (Tl[id * nlay + 0] - Te__df_e[id * nlay + 1]);
Te__df_e[id * nlay1 + nlay1 - 1] = Tl[id * nlay + nlay - 1] + (pe[id * nlay1 + nlay1 - 1] - pe[id * nlay + nlay - 1]) /
(pl[id * nlay + nlay - 1] - pe[id * nlay + nlay - 1]) *
(Tl[id * nlay + nlay - 1] - Te__df_e[id * nlay + nlay - 1]);
// Shortwave fluxes
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = 0.0;
sw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 3; channel++)
{
// Find the opacity structure
tau_struct(id, nlay, grav, pe, k_V_l, channel, tau_Ve__df_e);
// Incident flux in band
Finc_B = Finc * Beta_V[id * 3 + channel];
// Calculate sw flux
sw_grey_down(id, nlay, Finc_B, tau_Ve__df_e, sw_down_b__df_e, mu_s);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = sw_down__df_e[id * nlay1 + i] + sw_down_b__df_e[id * nlay1 + i];
}
}
// Long wave two-stream fluxes
for (int i = 0; i < nlay1; i++)
{
lw_down__df_e[id * nlay1 + i] = 0.0;
lw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 2; channel++)
{
// Find the opacity structure
tau_struct(id,nlay, grav, pe, k_IR_l, channel, tau_IRe__df_e);
// Blackbody fluxes (note divide by pi for correct units)
for (int i = 0; i < nlay1; i++)
{
be__df_e[id * nlay1 + i] = StBC * pow((double)(Te__df_e[id * nlay1 + i]), ((double)4.0)) / pi * Beta[id * 2 + channel];
}
// Calculate lw flux
lw_grey_updown_linear(id,nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
Am__dff_l, Bm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
//lw_grey_updown_poly(nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
//dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
// e2i__dff_l, Am__dff_l, Bm__dff_l, Gm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
lw_up__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] + lw_up_b__df_e[id * nlay1 + i];
lw_down__df_e[id * nlay1 + i] = lw_down__df_e[id * nlay1 + i] + lw_down_b__df_e[id * nlay1 + i];
}
}
// Net fluxes
for (int i = 0; i < nlay1; i++)
{
lw_net__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] - lw_down__df_e[id * nlay1 + i];
sw_net__df_e[id * nlay1 + i] = sw_up__df_e[id * nlay1 + i] - sw_down__df_e[id * nlay1 + i];
net_F[id * nlay1 + i] = lw_net__df_e[id * nlay1 + i] + sw_net__df_e[id * nlay1 + i];
}
net_F[id * nlay1 + nlay1 - 1] = Fint;
}
#endif // picket_fence_cuda
|
4fd62d99f3c41593e0b7b245afe001cb376b4b9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "Indice2D.h"
//#include "cudaTools.h"
//#include "Device.h"
//#include "MandelbrotMath.h"
//
//#include "IndiceTools_GPU.h"
//using namespace gpu;
//
//// Attention : Choix du nom est impotant!
//// VagueDevice.cu et non Vague.cu
//// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
//// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
//
///*----------------------------------------------------------------------*\
// |* Declaration *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Imported *|
// \*-------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h,float t);
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* Implementation *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t)
// {
// MandelbrotMath mandelbrotMath = MandelbrotMath(w);
//
// const int WH=w*h;
// const int TID = Indice2D::tid();
// const int NB_THREAD = Indice2D::nbThread();
//
// int i; // in [0,h[
// int j; // in [0,w[
//
// int s = TID; // in [0,...
// while (s < WH)
// {
// IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
//
// mandelbrotMath.colorIJ(&ptrDevPixels[s],i, j, t); // update ptrDevPixels[s]
//
// s += NB_THREAD;
// }
// }
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* End *|
// \*---------------------------------------------------------------------*/
//
| 4fd62d99f3c41593e0b7b245afe001cb376b4b9c.cu | //#include "Indice2D.h"
//#include "cudaTools.h"
//#include "Device.h"
//#include "MandelbrotMath.h"
//
//#include "IndiceTools_GPU.h"
//using namespace gpu;
//
//// Attention : Choix du nom est impotant!
//// VagueDevice.cu et non Vague.cu
//// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
//// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
//
///*----------------------------------------------------------------------*\
// |* Declaration *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Imported *|
// \*-------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h,float t);
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* Implementation *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t)
// {
// MandelbrotMath mandelbrotMath = MandelbrotMath(w);
//
// const int WH=w*h;
// const int TID = Indice2D::tid();
// const int NB_THREAD = Indice2D::nbThread();
//
// int i; // in [0,h[
// int j; // in [0,w[
//
// int s = TID; // in [0,...
// while (s < WH)
// {
// IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
//
// mandelbrotMath.colorIJ(&ptrDevPixels[s],i, j, t); // update ptrDevPixels[s]
//
// s += NB_THREAD;
// }
// }
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* End *|
// \*---------------------------------------------------------------------*/
//
|
0cbef204d8be95a05d13e94b97f367f338679f3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \brief gemm: C = A * B.
*/
#include "cuda_util.h"
// Initialize the input data.
void GenMatrix(const int height, const int width, float *mat) {
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
mat[i*width + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX);
}
}
}
// Just for checking the result.
float GetMean(const float* mat, const int height, const int width) {
int num = height * width;
float total = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
total += mat[i*width + j];
}
}
return total / num;
}
// Just for checking the result too.
void MatrixPrint(const float* mat, const int height, const int width) {
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
std::cout << mat[i*width + j] << ",";
}
std::cout << std::endl;
}
}
// CPU version 1: 1583 ms
// Normal version in cpu as a reference
void MatrixMulCPUv1(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
int i, j, k;
memset(C, 0, sizeof(float) * ldc * M);
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
// CPU version 2: 3389 ms
// Block based matrix multiplication in cpu.
void MatrixMulCPUv2(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
int bi, bj, bk;
int i, j, k;
const int block_size = 32;
int block_num_M = M / block_size;
int block_num_N = N / block_size;
int block_num_K = K / block_size;
memset(C, 0, sizeof(float) * ldc * M);
// Loop over all of the blocks.
for (bi = 0; bi < block_num_M; ++bi) {
for (bj = 0; bj < block_num_N; ++bj) {
for (bk = 0; bk < block_num_K; ++bk) {
// Loop over all of the elements in a block.
for (i = bi*block_size; i < (bi + 1)*block_size; ++i) {
for (k = bk*block_size; k < (bk + 1)*block_size; ++k) {
for (j = bj*block_size; j < (bj + 1)*block_size; ++j) {
C[i*ldc + j] += A[i*lda + k] * B[k*ldb + j];
}
}
}
}
}
}
}
// CUDA version 1: 72 ms
// It is rewrited from MatrixMulCPUv2.
// bi,bj can be replaced by blockIdx.x,blockIdx.y
// i,j can be replaced by threadIdx.x,threadIdx.y
// so just bk and k left. Grid and block is related to the dst matrix.
//
// \ C[ty, tx] = A[ty, k] * B[k, tx]
// for bk -> bk_num_per_grid
// for k -> k_num_per_block
// C[bi*bs + ty, bj*bs + tx] = A[bi*bs + ty, bk*bs + k] * B[k*bs + k, bj*bs + tx]
__global__ void MatrixMulKernelv1(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
//// MatrixMulCPUv2i/jbi/bj
//float c_sub_acc = 0;
//for (int bk = 0; bk < K / BLOCK_SIZE; bk++) {
// for (int k = bk * BLOCK_SIZE; k < (bk + 1)* BLOCK_SIZE; k++) {
// c_sub_acc += A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + k] *
// B[k * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)];
// }
//}
//C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc;
//
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
float c_sub_acc = 0;
for (int k = 0; k < K; k++) {
c_sub_acc += A[i * lda + k] * B[k * ldb + j];
}
C[i * ldc + j] = c_sub_acc;
}
}
// CUDA version 2.
// Use shared memory.
// Block based. The length and width can only be an integral multiple of BLOCK_SIZE.
template <int BLOCK_SIZE>
__global__ void MatrixMulKernelv2(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
__shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE];
float c_sub_acc = 0;
// For blocks in grid.
for (int bk = 0; bk < K / BLOCK_SIZE; bk++) {
a_shared[threadIdx.y][threadIdx.x] = A[i * lda + (bk * BLOCK_SIZE + threadIdx.x)];
b_shared[threadIdx.y][threadIdx.x] = B[(bk * BLOCK_SIZE + threadIdx.y) * ldb + j];
// Wait for data to complete loading to Shared memory.
__syncthreads();
// For elements in a block.
for (int k = 0; k < BLOCK_SIZE; k++) {
c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
}
// To prevent the case from happening:
// The next round of data is loaded when the data in share memory is not used up.
__syncthreads();
}
C[i * ldc + j] += c_sub_acc;
}
}
template <int BLOCK_SIZE>
__global__ void MatrixMulKernelv3(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
__shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE];
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
a_shared[threadIdx.y][threadIdx.x] = A[i * lda + j];
b_shared[threadIdx.y][threadIdx.x] = B[i * ldb + j];
__syncthreads();
float c_sub_acc = 0;
//for (int k = 0; k < BLOCK_SIZE; k++) {
// c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
//}
//__syncthreads();
C[i * ldc + j] += c_sub_acc;
}
}
//#define TEST_CUDA_V1
float MatrixMulCUDA(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
cjmcv_cuda_util::GpuTimer gpu_timer;
const int block_side_size = 32;
dim3 threads_per_block(block_side_size, block_side_size);
dim3 blocks_per_grid((N + threads_per_block.x - 1) / threads_per_block.x, (M + threads_per_block.y - 1) / threads_per_block.y);
// Warm up.
MatrixMulKernelv1<< <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
hipMemset(C, 0, sizeof(float) * M * N);
// Record the start event
gpu_timer.Start();
#ifdef TEST_CUDA_V1
MatrixMulKernelv1<< <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
#else
MatrixMulKernelv2<block_side_size> << <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
#endif
// Record the stop event
gpu_timer.Stop();
return gpu_timer.ElapsedMillis();
}
int main() {
int ret = cjmcv_cuda_util::InitEnvironment(0);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
int height_a = 2560, width_a = 800;
int height_b = 800, width_b = 3200;
if (width_a != height_b) {
printf("width_a should be equal to height_b.\n");
return 1;
}
const int mem_size_a = sizeof(float) * height_a * width_a;
const int mem_size_b = sizeof(float) * height_b * width_b;
const int mem_size_c = sizeof(float) * height_a * width_b;
float *h_a = (float *)malloc(mem_size_a);
float *h_b = (float *)malloc(mem_size_b);
float *h_c = (float *)malloc(mem_size_c);
if (h_a == NULL || h_b == NULL || h_c == NULL) {
printf("Fail to malloc.\n");
return 1;
}
// Initialize
srand(0);
GenMatrix(height_a, width_a, h_a);
GenMatrix(height_b, width_b, h_b);
// CPU
time_t t = clock();
MatrixMulCPUv1(height_a, width_b, width_a, 1.0, h_a, width_a,h_b, width_b, h_c, width_b);
printf("In cpu version 1, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b));
//MatrixPrint(h_c, height_a, width_b);
t = clock();
MatrixMulCPUv2(height_a, width_b, width_a, 1.0, h_a, width_a, h_b, width_b, h_c, width_b);
printf("In cpu version 2, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b));
//MatrixPrint(h_c, height_a, width_b);
// GPU
// Allocate memory in host.
float msec_total;
float *d_a, *d_b, *d_c;
CUDA_CHECK(hipMalloc((void **)&d_a, mem_size_a));
CUDA_CHECK(hipMalloc((void **)&d_b, mem_size_b));
CUDA_CHECK(hipMalloc((void **)&d_c, mem_size_c));
// Copy host memory to device
CUDA_CHECK(hipMemcpy(d_a, h_a, mem_size_a, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_b, h_b, mem_size_b, hipMemcpyHostToDevice));
msec_total = MatrixMulCUDA(height_a, width_b, width_a, 1.0, d_a, width_a, d_b, width_b, d_c, width_b);
// Copy memory back to host.
CUDA_CHECK(hipMemcpy(h_c, d_c, mem_size_c, hipMemcpyDeviceToHost));
#ifdef TEST_CUDA_V1
printf("In gpu version 1, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b));
#else
printf("In gpu version 2, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b));
#endif
//MatrixPrint(h_c, height_a, width_b);
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cjmcv_cuda_util::CleanUpEnvironment();
return 0;
}
| 0cbef204d8be95a05d13e94b97f367f338679f3f.cu | /*!
* \brief gemm: C = A * B.
*/
#include "cuda_util.h"
// Initialize the input data.
void GenMatrix(const int height, const int width, float *mat) {
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
mat[i*width + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX);
}
}
}
// Just for checking the result.
float GetMean(const float* mat, const int height, const int width) {
int num = height * width;
float total = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
total += mat[i*width + j];
}
}
return total / num;
}
// Just for checking the result too.
void MatrixPrint(const float* mat, const int height, const int width) {
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
std::cout << mat[i*width + j] << ",";
}
std::cout << std::endl;
}
}
// CPU version 1: 1583 ms
// Normal version in cpu as a reference
void MatrixMulCPUv1(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
int i, j, k;
memset(C, 0, sizeof(float) * ldc * M);
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
// CPU version 2: 3389 ms
// Block based matrix multiplication in cpu.
void MatrixMulCPUv2(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
int bi, bj, bk;
int i, j, k;
const int block_size = 32;
int block_num_M = M / block_size;
int block_num_N = N / block_size;
int block_num_K = K / block_size;
memset(C, 0, sizeof(float) * ldc * M);
// Loop over all of the blocks.
for (bi = 0; bi < block_num_M; ++bi) {
for (bj = 0; bj < block_num_N; ++bj) {
for (bk = 0; bk < block_num_K; ++bk) {
// Loop over all of the elements in a block.
for (i = bi*block_size; i < (bi + 1)*block_size; ++i) {
for (k = bk*block_size; k < (bk + 1)*block_size; ++k) {
for (j = bj*block_size; j < (bj + 1)*block_size; ++j) {
C[i*ldc + j] += A[i*lda + k] * B[k*ldb + j];
}
}
}
}
}
}
}
// CUDA version 1: 72 ms
// It is rewrited from MatrixMulCPUv2.
// bi,bj can be replaced by blockIdx.x,blockIdx.y
// i,j can be replaced by threadIdx.x,threadIdx.y
// so just bk and k left. Grid and block is related to the dst matrix.
//
// \ C[ty, tx] = A[ty, k] * B[k, tx]
// for bk -> bk_num_per_grid
// for k -> k_num_per_block
// C[bi*bs + ty, bj*bs + tx] = A[bi*bs + ty, bk*bs + k] * B[k*bs + k, bj*bs + tx]
__global__ void MatrixMulKernelv1(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
//// 由MatrixMulCPUv2去掉块内线程i/j和块的bi/bj。每个线程并行,所以直接忽略。
//float c_sub_acc = 0;
//for (int bk = 0; bk < K / BLOCK_SIZE; bk++) {
// for (int k = bk * BLOCK_SIZE; k < (bk + 1)* BLOCK_SIZE; k++) {
// c_sub_acc += A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + k] *
// B[k * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)];
// }
//}
//C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc;
// 索引调整后如下,一个线程负责输出矩阵的一个元素
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
float c_sub_acc = 0;
for (int k = 0; k < K; k++) {
c_sub_acc += A[i * lda + k] * B[k * ldb + j];
}
C[i * ldc + j] = c_sub_acc;
}
}
// CUDA version 2.
// Use shared memory.
// Block based. The length and width can only be an integral multiple of BLOCK_SIZE.
template <int BLOCK_SIZE>
__global__ void MatrixMulKernelv2(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
__shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE];
float c_sub_acc = 0;
// For blocks in grid.
for (int bk = 0; bk < K / BLOCK_SIZE; bk++) {
a_shared[threadIdx.y][threadIdx.x] = A[i * lda + (bk * BLOCK_SIZE + threadIdx.x)];
b_shared[threadIdx.y][threadIdx.x] = B[(bk * BLOCK_SIZE + threadIdx.y) * ldb + j];
// Wait for data to complete loading to Shared memory.
__syncthreads();
// For elements in a block.
for (int k = 0; k < BLOCK_SIZE; k++) {
c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
}
// To prevent the case from happening:
// The next round of data is loaded when the data in share memory is not used up.
__syncthreads();
}
C[i * ldc + j] += c_sub_acc;
}
}
template <int BLOCK_SIZE>
__global__ void MatrixMulKernelv3(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
__shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE];
for (int i = blockIdx.y * blockDim.y + threadIdx.y,
j = blockIdx.x * blockDim.x + threadIdx.x;
i < M && j < N;
i += gridDim.y * blockDim.y,
j += gridDim.x * blockDim.x) {
a_shared[threadIdx.y][threadIdx.x] = A[i * lda + j];
b_shared[threadIdx.y][threadIdx.x] = B[i * ldb + j];
__syncthreads();
float c_sub_acc = 0;
//for (int k = 0; k < BLOCK_SIZE; k++) {
// c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
//}
//__syncthreads();
C[i * ldc + j] += c_sub_acc;
}
}
//#define TEST_CUDA_V1
float MatrixMulCUDA(const int M, const int N, const int K, const float ALPHA,
const float *A, const int lda,
const float *B, const int ldb,
float *C, const int ldc) {
cjmcv_cuda_util::GpuTimer gpu_timer;
const int block_side_size = 32;
dim3 threads_per_block(block_side_size, block_side_size);
dim3 blocks_per_grid((N + threads_per_block.x - 1) / threads_per_block.x, (M + threads_per_block.y - 1) / threads_per_block.y);
// Warm up.
MatrixMulKernelv1<< <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
cudaMemset(C, 0, sizeof(float) * M * N);
// Record the start event
gpu_timer.Start();
#ifdef TEST_CUDA_V1
MatrixMulKernelv1<< <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
#else
MatrixMulKernelv2<block_side_size> << <blocks_per_grid, threads_per_block >> >
(M, N, K, 1.0, A, lda, B, ldb, C, ldc);
#endif
// Record the stop event
gpu_timer.Stop();
return gpu_timer.ElapsedMillis();
}
int main() {
int ret = cjmcv_cuda_util::InitEnvironment(0);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
int height_a = 2560, width_a = 800;
int height_b = 800, width_b = 3200;
if (width_a != height_b) {
printf("width_a should be equal to height_b.\n");
return 1;
}
const int mem_size_a = sizeof(float) * height_a * width_a;
const int mem_size_b = sizeof(float) * height_b * width_b;
const int mem_size_c = sizeof(float) * height_a * width_b;
float *h_a = (float *)malloc(mem_size_a);
float *h_b = (float *)malloc(mem_size_b);
float *h_c = (float *)malloc(mem_size_c);
if (h_a == NULL || h_b == NULL || h_c == NULL) {
printf("Fail to malloc.\n");
return 1;
}
// Initialize
srand(0);
GenMatrix(height_a, width_a, h_a);
GenMatrix(height_b, width_b, h_b);
// CPU
time_t t = clock();
MatrixMulCPUv1(height_a, width_b, width_a, 1.0, h_a, width_a,h_b, width_b, h_c, width_b);
printf("In cpu version 1, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b));
//MatrixPrint(h_c, height_a, width_b);
t = clock();
MatrixMulCPUv2(height_a, width_b, width_a, 1.0, h_a, width_a, h_b, width_b, h_c, width_b);
printf("In cpu version 2, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b));
//MatrixPrint(h_c, height_a, width_b);
// GPU
// Allocate memory in host.
float msec_total;
float *d_a, *d_b, *d_c;
CUDA_CHECK(cudaMalloc((void **)&d_a, mem_size_a));
CUDA_CHECK(cudaMalloc((void **)&d_b, mem_size_b));
CUDA_CHECK(cudaMalloc((void **)&d_c, mem_size_c));
// Copy host memory to device
CUDA_CHECK(cudaMemcpy(d_a, h_a, mem_size_a, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_b, h_b, mem_size_b, cudaMemcpyHostToDevice));
msec_total = MatrixMulCUDA(height_a, width_b, width_a, 1.0, d_a, width_a, d_b, width_b, d_c, width_b);
// Copy memory back to host.
CUDA_CHECK(cudaMemcpy(h_c, d_c, mem_size_c, cudaMemcpyDeviceToHost));
#ifdef TEST_CUDA_V1
printf("In gpu version 1, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b));
#else
printf("In gpu version 2, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b));
#endif
//MatrixPrint(h_c, height_a, width_b);
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cjmcv_cuda_util::CleanUpEnvironment();
return 0;
}
|
c829e4ce25ddcc08acf3409157c96ec2b3d7c9c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "support.h"
#define NUM_LETTERS 26
#define MAX_PASS_LENGTH 11
#define MAX_LETTERS 8
int main(int argc, char *argv[]) {
Timer timer;
printf("Setting up the problem and allocating variables...\n");
startTime(&timer);
char * password;
char * found_password;
char * characters;
int * found_flag;
password = (char *) malloc(sizeof(char) * MAX_PASS_LENGTH);
found_password = (char *) malloc(sizeof(char) * MAX_PASS_LENGTH);
characters = (char *) malloc(sizeof(char) * (NUM_LETTERS+1));
found_flag = (int *) malloc(sizeof(int));
if(argc == 1) {
password = "hello";
} else if(argc == 2) {
password = argv[1];
}
characters = "abcdefghijklmnopqrstuvwxyz";
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
hipDeviceSynchronize();
printf("\nTrying to find password: %s\n\n", password);
printf("Launching CPU password finder...\n"); fflush(stdout);
startTime(&timer);
int i;
for(i=0; i < MAX_LETTERS; i++) {
uint64_t total_words = NUM_LETTERS;
int j;
for(j=0; j<i; j++) {
total_words *= NUM_LETTERS;
}
printf("Total number of words: %lu\n\n", total_words);
uint64_t curr_word = 0;
uint64_t k;
char word[MAX_PASS_LENGTH] = "";
int check = 0;
for(k=0; k<total_words; k++) {
curr_word = k;
for(j=0; j<i; j++) {
word[i-1-j] = characters[(curr_word % NUM_LETTERS)];
curr_word /= NUM_LETTERS;
}
word[i+1] = '\0';
check = 1;
for(j=0; j<i+1; j++) {
if(password[j] != word[j]){
check = 0;
}
}
if(check) {
printf("Password is found!\n");
printf("Found: %s\n", word);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
return 0;
}
}
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
| c829e4ce25ddcc08acf3409157c96ec2b3d7c9c9.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "support.h"
#define NUM_LETTERS 26
#define MAX_PASS_LENGTH 11
#define MAX_LETTERS 8
int main(int argc, char *argv[]) {
Timer timer;
printf("Setting up the problem and allocating variables...\n");
startTime(&timer);
char * password;
char * found_password;
char * characters;
int * found_flag;
password = (char *) malloc(sizeof(char) * MAX_PASS_LENGTH);
found_password = (char *) malloc(sizeof(char) * MAX_PASS_LENGTH);
characters = (char *) malloc(sizeof(char) * (NUM_LETTERS+1));
found_flag = (int *) malloc(sizeof(int));
if(argc == 1) {
password = "hello";
} else if(argc == 2) {
password = argv[1];
}
characters = "abcdefghijklmnopqrstuvwxyz";
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
cudaDeviceSynchronize();
printf("\nTrying to find password: %s\n\n", password);
printf("Launching CPU password finder...\n"); fflush(stdout);
startTime(&timer);
int i;
for(i=0; i < MAX_LETTERS; i++) {
uint64_t total_words = NUM_LETTERS;
int j;
for(j=0; j<i; j++) {
total_words *= NUM_LETTERS;
}
printf("Total number of words: %lu\n\n", total_words);
uint64_t curr_word = 0;
uint64_t k;
char word[MAX_PASS_LENGTH] = "";
int check = 0;
for(k=0; k<total_words; k++) {
curr_word = k;
for(j=0; j<i; j++) {
word[i-1-j] = characters[(curr_word % NUM_LETTERS)];
curr_word /= NUM_LETTERS;
}
word[i+1] = '\0';
check = 1;
for(j=0; j<i+1; j++) {
if(password[j] != word[j]){
check = 0;
}
}
if(check) {
printf("Password is found!\n");
printf("Found: %s\n", word);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
return 0;
}
}
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
|
89b763d6a8faf55d753d0b52b4f507181c72161d.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
*
* smooth.cu
*
* This provides a CUDA implementation of a kernel smooother.
* http://en.wikipedia.org/wiki/Kernel_smoother
* The particular smoother in this file is a nearest neighbor smoother
* in order to keep the code as simple to understand as possible.
*
* This is implemeneted for 2-d square grids.
*
* Parameters of note:
* dataWidth -- size of the data is dataWidth^2
* halfWidth -- region around point x,y to smooth
* k smooths box with corners [x-k,y-k] to [x+k,y+k]
*
* The smoothed region is only defined for the interior that has the kernel
* defined inside the boundary, e.g. for gridWidth=10, halfWidth=2 the
* region from 2,2 to 7,7 will be smoothed.
*
********************************************************************************/
/*******************************************************************************
*
* CUDA concepts
*
* This file shows how to use many features of CUDA:
* 2d grids
* pitch allocation
*
********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
// Data is of size dataWidth * dataWidth
const unsigned int dataWidth = 4112;
// Parameter to express the smoothing kernel halfwidth
const unsigned int halfWidth = 8;
// Size of the CUDA threadBlock
const unsigned int blockWidth = 16;
/* Small values good for testing */
// Data is of size dataWidth * dataWidth
//const unsigned int dataWidth = 8;
// Parameter to express the smoothing kernel halfwidth
//const unsigned int halfWidth = 1;
// Size of the CUDA threadBlock
//const unsigned int blockWidth = 2;
/*------------------------------------------------------------------------------
* Name: NNSmoothKernel
* Action: The CUDA kernel that implements kernel smoothing.
* Yuck, that's two senses of kernel.
*-----------------------------------------------------------------------------*/
__global__ void NNSmoothKernel ( float* pFieldIn, float* pFieldOut, size_t pitch, unsigned int halfwidth )
{
// pitch is in bytes, figure out the number of elements for addressing
unsigned pitchels = pitch/sizeof(float);
// The grid indexes start from
unsigned xindex = ( blockIdx.x * blockDim.x + threadIdx.x);
unsigned yindex = ( blockIdx.y * blockDim.y + threadIdx.y);
// Variable to accumulate the smoothed value
float value = 0.0;
// Get the value from the kernel
for ( unsigned j=0; j<=2*halfwidth; j++ )
{
for ( unsigned i=0; i<=2*halfwidth; i++ )
{
value += pFieldIn [ pitchels*(yindex + j) + xindex + i ];
}
}
// Divide by the number of elements in the kernel
value /= float((2*halfWidth+1)*(2*halfWidth+1));
// Write the value out
pFieldOut [ (yindex+halfwidth)*pitchels + xindex+halfwidth ] = value;
}
/*------------------------------------------------------------------------------
* Name: SmoothField
* Action: Host entry point to kernel smoother
*-----------------------------------------------------------------------------*/
bool SmoothField ( float* pHostFieldIn, float *pHostFieldOut )
{
float * pDeviceFieldIn = 0;
float * pDeviceFieldOut = 0;
size_t pitch, pitchout;
struct timeval ta, tb, tc, td;
// Check the grid dimensions and extract parameters. See top description about restrictions
// printf ( "%d, %d, %d\n", datWidth, halfWidth, blockSize
assert(((dataWidth-2*halfWidth) % blockWidth) == 0 );
gettimeofday ( &ta, NULL );
// Place the data set on device memory
hipMallocPitch((void**)&pDeviceFieldIn, &pitch, dataWidth*sizeof(float), dataWidth );
hipMemcpy2D ( pDeviceFieldIn, pitch,
pHostFieldIn, dataWidth*sizeof(float), dataWidth*sizeof(float), dataWidth,
hipMemcpyHostToDevice);
// Allocate the output
hipMallocPitch((void**)&pDeviceFieldOut, &pitchout, dataWidth*sizeof(float), dataWidth );
gettimeofday ( &tb, NULL );
// Construct a 2d grid/block
const dim3 DimBlock ( blockWidth, blockWidth );
const dim3 DimGrid ( (dataWidth-(2*halfWidth))/blockWidth,
(dataWidth-(2*halfWidth))/blockWidth );
// Invoke the kernel
hipLaunchKernelGGL(( NNSmoothKernel) , dim3(DimGrid),dim3(DimBlock), 0, 0, pDeviceFieldIn, pDeviceFieldOut, pitch, halfWidth );
gettimeofday ( &tc, NULL );
// Retrieve the results
hipMemcpy2D(pHostFieldOut, dataWidth*sizeof(float),
pDeviceFieldOut, pitchout, dataWidth*sizeof(float), dataWidth,
hipMemcpyDeviceToHost);
gettimeofday ( &td, NULL );
if ( ta.tv_usec < td.tv_usec )
{
printf ("Elapsed total time (s/m): %d:%d\n", td.tv_sec - ta.tv_sec, td.tv_usec - ta.tv_usec );
} else {
printf ("Elapsed total time (s/m): %d:%d\n", td.tv_sec - ta.tv_sec - 1, 1000000 - td.tv_usec + ta.tv_usec );
}
if ( tb.tv_usec < tc.tv_usec )
{
printf ("Elapsed kernel time (s/m): %d:%d\n", tc.tv_sec - tb.tv_sec, tc.tv_usec - tb.tv_usec );
} else {
printf ("Elapsed kernel time (s/m): %d:%d\n", tc.tv_sec - tb.tv_sec - 1, 1000000 - tc.tv_usec + tb.tv_usec );
}
return true;
}
/*------------------------------------------------------------------------------
* Name: initField
* Action: Initialize a field to predictable values.
* This is a useful format for debugging, because values
* accumulate to their initial value.
*-----------------------------------------------------------------------------*/
void initField ( unsigned dim, float* pField )
{
for ( unsigned j=0; j<dim; j++ )
{
for ( unsigned i=0; i<dim; i++ )
{
pField[j*dim+i] = j + i;
}
}
}
/*------------------------------------------------------------------------------
* Name: main
* Action: Entry point
*-----------------------------------------------------------------------------*/
int main ()
{
// Create the input field
float *field = (float *) malloc ( dataWidth * dataWidth * sizeof(float));
initField ( dataWidth, field );
// Create the output field
float *out = (float *) malloc ( dataWidth * dataWidth * sizeof(float));
// Call the kernel
SmoothField ( field, out );
// Print the output field (for debugging purposes.
for ( unsigned j=0; j< dataWidth; j++ )
{
for ( unsigned i=0; i< dataWidth; i++ )
{
if ( ( i >= halfWidth ) &&
( j >= halfWidth ) &&
( i < ( dataWidth - halfWidth )) &&
( j < ( dataWidth - halfWidth )) )
{
printf ("%4.0f, ", out[j*dataWidth + i]);
}
else
{
printf (" na, ");
}
}
printf ("\n");
}
}
| 89b763d6a8faf55d753d0b52b4f507181c72161d.cu | /*******************************************************************************
*
* smooth.cu
*
* This provides a CUDA implementation of a kernel smooother.
* http://en.wikipedia.org/wiki/Kernel_smoother
* The particular smoother in this file is a nearest neighbor smoother
* in order to keep the code as simple to understand as possible.
*
* This is implemeneted for 2-d square grids.
*
* Parameters of note:
* dataWidth -- size of the data is dataWidth^2
* halfWidth -- region around point x,y to smooth
* k smooths box with corners [x-k,y-k] to [x+k,y+k]
*
* The smoothed region is only defined for the interior that has the kernel
* defined inside the boundary, e.g. for gridWidth=10, halfWidth=2 the
* region from 2,2 to 7,7 will be smoothed.
*
********************************************************************************/
/*******************************************************************************
*
* CUDA concepts
*
* This file shows how to use many features of CUDA:
* 2d grids
* pitch allocation
*
********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
// Data is of size dataWidth * dataWidth
const unsigned int dataWidth = 4112;
// Parameter to express the smoothing kernel halfwidth
const unsigned int halfWidth = 8;
// Size of the CUDA threadBlock
const unsigned int blockWidth = 16;
/* Small values good for testing */
// Data is of size dataWidth * dataWidth
//const unsigned int dataWidth = 8;
// Parameter to express the smoothing kernel halfwidth
//const unsigned int halfWidth = 1;
// Size of the CUDA threadBlock
//const unsigned int blockWidth = 2;
/*------------------------------------------------------------------------------
* Name: NNSmoothKernel
* Action: The CUDA kernel that implements kernel smoothing.
* Yuck, that's two senses of kernel.
*-----------------------------------------------------------------------------*/
__global__ void NNSmoothKernel ( float* pFieldIn, float* pFieldOut, size_t pitch, unsigned int halfwidth )
{
// pitch is in bytes, figure out the number of elements for addressing
unsigned pitchels = pitch/sizeof(float);
// The grid indexes start from
unsigned xindex = ( blockIdx.x * blockDim.x + threadIdx.x);
unsigned yindex = ( blockIdx.y * blockDim.y + threadIdx.y);
// Variable to accumulate the smoothed value
float value = 0.0;
// Get the value from the kernel
for ( unsigned j=0; j<=2*halfwidth; j++ )
{
for ( unsigned i=0; i<=2*halfwidth; i++ )
{
value += pFieldIn [ pitchels*(yindex + j) + xindex + i ];
}
}
// Divide by the number of elements in the kernel
value /= float((2*halfWidth+1)*(2*halfWidth+1));
// Write the value out
pFieldOut [ (yindex+halfwidth)*pitchels + xindex+halfwidth ] = value;
}
/*------------------------------------------------------------------------------
* Name: SmoothField
* Action: Host entry point to kernel smoother
*-----------------------------------------------------------------------------*/
bool SmoothField ( float* pHostFieldIn, float *pHostFieldOut )
{
float * pDeviceFieldIn = 0;
float * pDeviceFieldOut = 0;
size_t pitch, pitchout;
struct timeval ta, tb, tc, td;
// Check the grid dimensions and extract parameters. See top description about restrictions
// printf ( "%d, %d, %d\n", datWidth, halfWidth, blockSize
assert(((dataWidth-2*halfWidth) % blockWidth) == 0 );
gettimeofday ( &ta, NULL );
// Place the data set on device memory
cudaMallocPitch((void**)&pDeviceFieldIn, &pitch, dataWidth*sizeof(float), dataWidth );
cudaMemcpy2D ( pDeviceFieldIn, pitch,
pHostFieldIn, dataWidth*sizeof(float), dataWidth*sizeof(float), dataWidth,
cudaMemcpyHostToDevice);
// Allocate the output
cudaMallocPitch((void**)&pDeviceFieldOut, &pitchout, dataWidth*sizeof(float), dataWidth );
gettimeofday ( &tb, NULL );
// Construct a 2d grid/block
const dim3 DimBlock ( blockWidth, blockWidth );
const dim3 DimGrid ( (dataWidth-(2*halfWidth))/blockWidth,
(dataWidth-(2*halfWidth))/blockWidth );
// Invoke the kernel
NNSmoothKernel <<<DimGrid,DimBlock>>> ( pDeviceFieldIn, pDeviceFieldOut, pitch, halfWidth );
gettimeofday ( &tc, NULL );
// Retrieve the results
cudaMemcpy2D(pHostFieldOut, dataWidth*sizeof(float),
pDeviceFieldOut, pitchout, dataWidth*sizeof(float), dataWidth,
cudaMemcpyDeviceToHost);
gettimeofday ( &td, NULL );
if ( ta.tv_usec < td.tv_usec )
{
printf ("Elapsed total time (s/m): %d:%d\n", td.tv_sec - ta.tv_sec, td.tv_usec - ta.tv_usec );
} else {
printf ("Elapsed total time (s/m): %d:%d\n", td.tv_sec - ta.tv_sec - 1, 1000000 - td.tv_usec + ta.tv_usec );
}
if ( tb.tv_usec < tc.tv_usec )
{
printf ("Elapsed kernel time (s/m): %d:%d\n", tc.tv_sec - tb.tv_sec, tc.tv_usec - tb.tv_usec );
} else {
printf ("Elapsed kernel time (s/m): %d:%d\n", tc.tv_sec - tb.tv_sec - 1, 1000000 - tc.tv_usec + tb.tv_usec );
}
return true;
}
/*------------------------------------------------------------------------------
* Name: initField
* Action: Initialize a field to predictable values.
* This is a useful format for debugging, because values
* accumulate to their initial value.
*-----------------------------------------------------------------------------*/
void initField ( unsigned dim, float* pField )
{
for ( unsigned j=0; j<dim; j++ )
{
for ( unsigned i=0; i<dim; i++ )
{
pField[j*dim+i] = j + i;
}
}
}
/*------------------------------------------------------------------------------
* Name: main
* Action: Entry point
*-----------------------------------------------------------------------------*/
int main ()
{
// Create the input field
float *field = (float *) malloc ( dataWidth * dataWidth * sizeof(float));
initField ( dataWidth, field );
// Create the output field
float *out = (float *) malloc ( dataWidth * dataWidth * sizeof(float));
// Call the kernel
SmoothField ( field, out );
// Print the output field (for debugging purposes.
for ( unsigned j=0; j< dataWidth; j++ )
{
for ( unsigned i=0; i< dataWidth; i++ )
{
if ( ( i >= halfWidth ) &&
( j >= halfWidth ) &&
( i < ( dataWidth - halfWidth )) &&
( j < ( dataWidth - halfWidth )) )
{
printf ("%4.0f, ", out[j*dataWidth + i]);
}
else
{
printf (" na, ");
}
}
printf ("\n");
}
}
|
a360dd9007c26827aa67a06c6be3cc48b4281696.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include "mmio.h"
/* #include <cusparse_v2.h> */
#define CLEANUP(s) \
do{ \
printf("%s\n", s);\
if(f) fclose(f);\
if(cooRow) free(cooRow);\
if(cooCol) free(cooCol);\
if(cooVal) free(cooVal);\
if(matrix) free(matrix);\
if(csrptr) free(csrptr);\
if(d_cooCol) hipFree(d_cooCol);\
if(d_cooRow) hipFree(d_cooRow);\
if(d_cooVal) hipFree(d_cooVal);\
if(descr) hipsparseDestroyMatDescr(descr);\
if(handle) hipsparseDestroy(handle);\
if(output1) fclose(output1);\
if(output2) fclose(output2);\
if(output3) fclose(output3);\
if(x) free(x);\
if(b) free(b);\
hipDeviceReset(); \
fflush(stdout); \
}while(0)
typedef struct{
int row;
int col;
double val;
}mat;
int comp(const void *x, const void *y)
{
if(x == y) return 0;
if(x == 0) return -1;
if(y == 0) return 1;
const mat *ptr1 = (const mat*)x, *ptr2 = (const mat*)y;
if(ptr1->row < ptr2->row) return -1;
if(ptr1->row > ptr2->row) return 1;
if(ptr1->col < ptr2->col) return -1;
if(ptr1->col > ptr2->col) return 1;
return 0;
}
int main(int argc, char *argv[]){
srand((unsigned)time(NULL));
int *cooRow=0;
int *cooCol=0;
double *cooVal=0;
mat *matrix;
int *d_cooRow = 0;
int *d_cooCol = 0;
double *d_cooVal = 0;
int N, M;
int NNZ;
hipError_t stat1, stat2, stat3;
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descr = 0;
int *d_csrptr = 0;
int *csrptr = 0;
int size_read;
MM_typecode type_read;
FILE *f;
FILE *output1;
FILE *output2;
double getMAX = 0;
double getMIN = 0;
FILE *output3;
double *x = 0;
double *b = 0;
double xvm;
if(argc<2)
{
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(0);
}else
{
if((f = fopen(argv[1], "r")) == NULL)
exit(1);
}
if(mm_read_banner(f, &type_read)!= 0)
{
printf("could not process matrix market banner.\n");
exit(1);
}
if(mm_is_complex(type_read) && mm_is_matrix(type_read) && mm_is_sparse(type_read))
{
printf("this app does not support ");
printf("Market market type '[%s]\n", mm_typecode_to_str(type_read));
}
if((size_read = mm_read_mtx_crd_size(f, &M, &N, &NNZ))!= 0)
exit(1);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~\n");
mm_write_mtx_crd_size(stdout, M, N, NNZ);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~\n");
if(M!=N){
CLEANUP("M!=N size error");
return -1;
}
printf("N=%d NNZ=%d\n",N,NNZ);
cooRow=(int *)malloc(sizeof(cooRow[0])*NNZ);
cooCol=(int *)malloc(sizeof(cooCol[0])*NNZ);
cooVal=(double *)malloc(sizeof(cooVal[0])*NNZ);
matrix = (mat *)malloc(sizeof(mat)*NNZ);
if((!cooRow)||(!cooCol)||(!cooVal)){
CLEANUP("Host malloc matrix coo failed");
return -1;
}
if((!matrix)){
CLEANUP("Host malloc matrix failed");
exit(1);
}
int r_i;
int r_j;
double r_val;
printf("Input data_______\n");
for(int i = 0;i<NNZ;i++)
{
fscanf(f, "%d %d %lg\n", &r_i, &r_j, &r_val);
/* cooRow[i] = r_i - 1; */
/* cooCol[i] = r_j - 1; */
/* cooVal[i] = r_val; */
matrix[i].row = r_i - 1;
matrix[i].col = r_j - 1;
matrix[i].val = r_val;
}
/* int NNZcount = 0; */
/* for(int i = 0;i<NNZ_old;i++) */
/* { */
/* if(matrix[i].row != matrix[i].col) */
/* { */
/* cooRow[NNZ_old+NNZcount] = cooCol[i]; */
/* cooCol[NNZ_old+NNZcount] = cooRow[i]; */
/* cooVal[NNZ_old+NNZcount] = cooVal[i]; */
/* matrix[NNZ_old+NNZcount].row = matrix[i].col; */
/* matrix[NNZ_old+NNZcount].col = matrix[i].row; */
/* matrix[NNZ_old+NNZcount].val = matrix[i].val; */
/* NNZcount++; */
/* } */
/* } */
/* for(int i = 0;i<NNZ;i++){ */
/* printf("index[%d]->", i); */
/* printf("Row=%d ", matrix[i].row); */
/* printf("Col=%d ", matrix[i].col); */
/* printf("Val=%f\n", matrix[i].val); */
/* } */
printf("Input over\n");
printf("start sort_________\n");
/* sort(cooRow, cooCol, cooVal, NNZ); */
qsort(matrix, NNZ, sizeof(mat), comp);
/* for(int i = 0;i<NNZ;i++){ */
/* printf("index[%d]->", i); */
/* printf("Row=%d ", cooRow[i]); */
/* printf("Col=%d ", cooCol[i]); */
/* printf("Val=%f\n", cooVal[i]); */
/* } */
printf("sort over\n");
printf("getMAX getMIN\n");
getMAX = getMIN = cooVal[0];
for(int i = 0;i<NNZ;i++)
{
/* if(cooVal[i]>=getMAX){ */
if(matrix[i].val>=getMAX){
/* getMAX = cooVal[i]; */
getMAX = matrix[i].val;
}
/* if(cooVal[i]<=getMIN){ */
if(matrix[i].val<=getMIN){
/* getMIN = cooVal[i]; */
getMIN = matrix[i].val;
}
}
for(int i = 0;i<NNZ;i++)
{
cooRow[i] = matrix[i].row;
cooCol[i] = matrix[i].col;
cooVal[i] = matrix[i].val;
}
printf("MAX = %f\nMIN = %f\n", getMAX, getMIN);
printf("coo device malloc\n");
stat1 = hipMalloc((void**)&d_cooRow, sizeof(d_cooRow)*NNZ);
stat2 = hipMalloc((void**)&d_cooCol, sizeof(d_cooCol)*NNZ);
stat3 = hipMalloc((void**)&d_cooVal, sizeof(d_cooVal)*NNZ);
if((stat1!= hipSuccess)||
(stat2!= hipSuccess)||
(stat3!= hipSuccess)){
CLEANUP("Device malloc failed");
return -1;
}
printf("memcpy coo to device\n");
stat1 = hipMemcpy(d_cooRow, cooRow, (size_t)(sizeof(d_cooRow[0])*NNZ), hipMemcpyHostToDevice);
stat2 = hipMemcpy(d_cooCol, cooCol, (size_t)(sizeof(d_cooCol[0])*NNZ), hipMemcpyHostToDevice);
stat3 = hipMemcpy(d_cooVal, cooVal, (size_t)(sizeof(d_cooVal[0])*NNZ), hipMemcpyHostToDevice);
if((stat1!= hipSuccess)||
(stat2!= hipSuccess)||
(stat3!= hipSuccess)){
CLEANUP("Memcpy host 2 device failed");
return -1;
}
hipsparseStatus_t custat;
printf("create handle and descriptor\n");
custat = hipsparseCreate(&handle);
if(custat!= HIPSPARSE_STATUS_SUCCESS){
CLEANUP("CUSPARSE lib initialization failed");
return 1;
}
custat = hipsparseCreateMatDescr(&descr);
if(custat!= HIPSPARSE_STATUS_SUCCESS){
CLEANUP("Matrix descriptor initialization failed");
return 1;
}
printf("set descriptor type and indexbase\n");
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
csrptr = (int *)malloc(sizeof(csrptr[0])*(N + 1));
if(!csrptr){
CLEANUP("csrptr malloc failed");
return -1;
}
stat1 = hipMalloc((void**)&d_csrptr, sizeof(d_csrptr[0])*(N+1));
if(stat1!=hipSuccess){
CLEANUP("Device malloc failed");
return 1;
}
printf("conversion ptr vec___________\n");
custat = hipsparseXcoo2csr(handle, d_cooRow, NNZ, N, d_csrptr, HIPSPARSE_INDEX_BASE_ZERO);
if(custat!=HIPSPARSE_STATUS_SUCCESS){
CLEANUP("conversion coo 2 csr failed");
return 1;
}
stat1 = hipMemcpy(csrptr, d_csrptr, (size_t)(sizeof(d_csrptr[0])*(N+1)), hipMemcpyDeviceToHost);
if(stat1!=hipSuccess){
CLEANUP("Memcpy d_csrptr ->csrptr failed ");
return 1;
}
printf("conversion done\n");
/* for(int i = 0;i<N +1;i++){ */
/* printf("csrptr[%d]=%d\n", i, csrptr[i]); */
/* } */
printf("create x random by %f~%f\n", getMIN, getMAX);
x = (double *)malloc(sizeof(x[0])*N);
b = (double *)malloc(sizeof(b[0])*N);
for(int i = 0;i<N;i++)
{
/* if(fabs(getMAX)>=RAND_MAX || fabs(getMIN)>=RAND_MAX) */
/* { */
/* x[i] = rand(); */
/* }else{ */
/* x[i] = (double)(rand()%((int)getMAX*10 - (int)getMIN*10 + 1)+(int)getMIN*10)/10.0; */
/* } */
x[i] = 1.0;
}
printf("create b by x and Matrix\n");
for(int i = 0;i<N;i++)
{
xvm = 0.0;
for(int j = csrptr[i];j<csrptr[i+1];j++)
{
xvm+=cooVal[j]*x[cooCol[j]];
}
b[i] = xvm;
}
printf("ready to output________\n");
if((output1 = fopen("./ColVal.txt", "w")) == NULL)
exit(1);
if((output2 = fopen("./Ptr.txt", "w")) == NULL)
exit(1);
if((output3 = fopen("./bx.txt", "w")) == NULL)
exit(1);
printf("output col val\n");
fprintf(output1, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<NNZ;i++)
{
fprintf(output1, "%d %.16e\n", cooCol[i], cooVal[i]);
}
printf("output ptr\n");
fprintf(output2, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<N+1; i++)
{
fprintf(output2, "%d\n", csrptr[i]);
}
printf("output b x\n");
fprintf(output3, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<N;i++)
{
fprintf(output3, "%.16e %.16e\n", b[i], x[i]);
}
printf("output over \n");
CLEANUP("Program safety end\n");
return 0;
}
| a360dd9007c26827aa67a06c6be3cc48b4281696.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include "mmio.h"
/* #include <cusparse_v2.h> */
#define CLEANUP(s) \
do{ \
printf("%s\n", s);\
if(f) fclose(f);\
if(cooRow) free(cooRow);\
if(cooCol) free(cooCol);\
if(cooVal) free(cooVal);\
if(matrix) free(matrix);\
if(csrptr) free(csrptr);\
if(d_cooCol) cudaFree(d_cooCol);\
if(d_cooRow) cudaFree(d_cooRow);\
if(d_cooVal) cudaFree(d_cooVal);\
if(descr) cusparseDestroyMatDescr(descr);\
if(handle) cusparseDestroy(handle);\
if(output1) fclose(output1);\
if(output2) fclose(output2);\
if(output3) fclose(output3);\
if(x) free(x);\
if(b) free(b);\
cudaDeviceReset(); \
fflush(stdout); \
}while(0)
typedef struct{
int row;
int col;
double val;
}mat;
int comp(const void *x, const void *y)
{
if(x == y) return 0;
if(x == 0) return -1;
if(y == 0) return 1;
const mat *ptr1 = (const mat*)x, *ptr2 = (const mat*)y;
if(ptr1->row < ptr2->row) return -1;
if(ptr1->row > ptr2->row) return 1;
if(ptr1->col < ptr2->col) return -1;
if(ptr1->col > ptr2->col) return 1;
return 0;
}
int main(int argc, char *argv[]){
srand((unsigned)time(NULL));
int *cooRow=0;
int *cooCol=0;
double *cooVal=0;
mat *matrix;
int *d_cooRow = 0;
int *d_cooCol = 0;
double *d_cooVal = 0;
int N, M;
int NNZ;
cudaError_t stat1, stat2, stat3;
cusparseHandle_t handle = 0;
cusparseMatDescr_t descr = 0;
int *d_csrptr = 0;
int *csrptr = 0;
int size_read;
MM_typecode type_read;
FILE *f;
FILE *output1;
FILE *output2;
double getMAX = 0;
double getMIN = 0;
FILE *output3;
double *x = 0;
double *b = 0;
double xvm;
if(argc<2)
{
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(0);
}else
{
if((f = fopen(argv[1], "r")) == NULL)
exit(1);
}
if(mm_read_banner(f, &type_read)!= 0)
{
printf("could not process matrix market banner.\n");
exit(1);
}
if(mm_is_complex(type_read) && mm_is_matrix(type_read) && mm_is_sparse(type_read))
{
printf("this app does not support ");
printf("Market market type '[%s]\n", mm_typecode_to_str(type_read));
}
if((size_read = mm_read_mtx_crd_size(f, &M, &N, &NNZ))!= 0)
exit(1);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~\n");
mm_write_mtx_crd_size(stdout, M, N, NNZ);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~\n");
if(M!=N){
CLEANUP("M!=N size error");
return -1;
}
printf("N=%d NNZ=%d\n",N,NNZ);
cooRow=(int *)malloc(sizeof(cooRow[0])*NNZ);
cooCol=(int *)malloc(sizeof(cooCol[0])*NNZ);
cooVal=(double *)malloc(sizeof(cooVal[0])*NNZ);
matrix = (mat *)malloc(sizeof(mat)*NNZ);
if((!cooRow)||(!cooCol)||(!cooVal)){
CLEANUP("Host malloc matrix coo failed");
return -1;
}
if((!matrix)){
CLEANUP("Host malloc matrix failed");
exit(1);
}
int r_i;
int r_j;
double r_val;
printf("Input data_______\n");
for(int i = 0;i<NNZ;i++)
{
fscanf(f, "%d %d %lg\n", &r_i, &r_j, &r_val);
/* cooRow[i] = r_i - 1; */
/* cooCol[i] = r_j - 1; */
/* cooVal[i] = r_val; */
matrix[i].row = r_i - 1;
matrix[i].col = r_j - 1;
matrix[i].val = r_val;
}
/* int NNZcount = 0; */
/* for(int i = 0;i<NNZ_old;i++) */
/* { */
/* if(matrix[i].row != matrix[i].col) */
/* { */
/* cooRow[NNZ_old+NNZcount] = cooCol[i]; */
/* cooCol[NNZ_old+NNZcount] = cooRow[i]; */
/* cooVal[NNZ_old+NNZcount] = cooVal[i]; */
/* matrix[NNZ_old+NNZcount].row = matrix[i].col; */
/* matrix[NNZ_old+NNZcount].col = matrix[i].row; */
/* matrix[NNZ_old+NNZcount].val = matrix[i].val; */
/* NNZcount++; */
/* } */
/* } */
/* for(int i = 0;i<NNZ;i++){ */
/* printf("index[%d]->", i); */
/* printf("Row=%d ", matrix[i].row); */
/* printf("Col=%d ", matrix[i].col); */
/* printf("Val=%f\n", matrix[i].val); */
/* } */
printf("Input over\n");
printf("start sort_________\n");
/* sort(cooRow, cooCol, cooVal, NNZ); */
qsort(matrix, NNZ, sizeof(mat), comp);
/* for(int i = 0;i<NNZ;i++){ */
/* printf("index[%d]->", i); */
/* printf("Row=%d ", cooRow[i]); */
/* printf("Col=%d ", cooCol[i]); */
/* printf("Val=%f\n", cooVal[i]); */
/* } */
printf("sort over\n");
printf("getMAX getMIN\n");
getMAX = getMIN = cooVal[0];
for(int i = 0;i<NNZ;i++)
{
/* if(cooVal[i]>=getMAX){ */
if(matrix[i].val>=getMAX){
/* getMAX = cooVal[i]; */
getMAX = matrix[i].val;
}
/* if(cooVal[i]<=getMIN){ */
if(matrix[i].val<=getMIN){
/* getMIN = cooVal[i]; */
getMIN = matrix[i].val;
}
}
for(int i = 0;i<NNZ;i++)
{
cooRow[i] = matrix[i].row;
cooCol[i] = matrix[i].col;
cooVal[i] = matrix[i].val;
}
printf("MAX = %f\nMIN = %f\n", getMAX, getMIN);
printf("coo device malloc\n");
stat1 = cudaMalloc((void**)&d_cooRow, sizeof(d_cooRow)*NNZ);
stat2 = cudaMalloc((void**)&d_cooCol, sizeof(d_cooCol)*NNZ);
stat3 = cudaMalloc((void**)&d_cooVal, sizeof(d_cooVal)*NNZ);
if((stat1!= cudaSuccess)||
(stat2!= cudaSuccess)||
(stat3!= cudaSuccess)){
CLEANUP("Device malloc failed");
return -1;
}
printf("memcpy coo to device\n");
stat1 = cudaMemcpy(d_cooRow, cooRow, (size_t)(sizeof(d_cooRow[0])*NNZ), cudaMemcpyHostToDevice);
stat2 = cudaMemcpy(d_cooCol, cooCol, (size_t)(sizeof(d_cooCol[0])*NNZ), cudaMemcpyHostToDevice);
stat3 = cudaMemcpy(d_cooVal, cooVal, (size_t)(sizeof(d_cooVal[0])*NNZ), cudaMemcpyHostToDevice);
if((stat1!= cudaSuccess)||
(stat2!= cudaSuccess)||
(stat3!= cudaSuccess)){
CLEANUP("Memcpy host 2 device failed");
return -1;
}
cusparseStatus_t custat;
printf("create handle and descriptor\n");
custat = cusparseCreate(&handle);
if(custat!= CUSPARSE_STATUS_SUCCESS){
CLEANUP("CUSPARSE lib initialization failed");
return 1;
}
custat = cusparseCreateMatDescr(&descr);
if(custat!= CUSPARSE_STATUS_SUCCESS){
CLEANUP("Matrix descriptor initialization failed");
return 1;
}
printf("set descriptor type and indexbase\n");
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
csrptr = (int *)malloc(sizeof(csrptr[0])*(N + 1));
if(!csrptr){
CLEANUP("csrptr malloc failed");
return -1;
}
stat1 = cudaMalloc((void**)&d_csrptr, sizeof(d_csrptr[0])*(N+1));
if(stat1!=cudaSuccess){
CLEANUP("Device malloc failed");
return 1;
}
printf("conversion ptr vec___________\n");
custat = cusparseXcoo2csr(handle, d_cooRow, NNZ, N, d_csrptr, CUSPARSE_INDEX_BASE_ZERO);
if(custat!=CUSPARSE_STATUS_SUCCESS){
CLEANUP("conversion coo 2 csr failed");
return 1;
}
stat1 = cudaMemcpy(csrptr, d_csrptr, (size_t)(sizeof(d_csrptr[0])*(N+1)), cudaMemcpyDeviceToHost);
if(stat1!=cudaSuccess){
CLEANUP("Memcpy d_csrptr ->csrptr failed ");
return 1;
}
printf("conversion done\n");
/* for(int i = 0;i<N +1;i++){ */
/* printf("csrptr[%d]=%d\n", i, csrptr[i]); */
/* } */
printf("create x random by %f~%f\n", getMIN, getMAX);
x = (double *)malloc(sizeof(x[0])*N);
b = (double *)malloc(sizeof(b[0])*N);
for(int i = 0;i<N;i++)
{
/* if(fabs(getMAX)>=RAND_MAX || fabs(getMIN)>=RAND_MAX) */
/* { */
/* x[i] = rand(); */
/* }else{ */
/* x[i] = (double)(rand()%((int)getMAX*10 - (int)getMIN*10 + 1)+(int)getMIN*10)/10.0; */
/* } */
x[i] = 1.0;
}
printf("create b by x and Matrix\n");
for(int i = 0;i<N;i++)
{
xvm = 0.0;
for(int j = csrptr[i];j<csrptr[i+1];j++)
{
xvm+=cooVal[j]*x[cooCol[j]];
}
b[i] = xvm;
}
printf("ready to output________\n");
if((output1 = fopen("./ColVal.txt", "w")) == NULL)
exit(1);
if((output2 = fopen("./Ptr.txt", "w")) == NULL)
exit(1);
if((output3 = fopen("./bx.txt", "w")) == NULL)
exit(1);
printf("output col val\n");
fprintf(output1, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<NNZ;i++)
{
fprintf(output1, "%d %.16e\n", cooCol[i], cooVal[i]);
}
printf("output ptr\n");
fprintf(output2, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<N+1; i++)
{
fprintf(output2, "%d\n", csrptr[i]);
}
printf("output b x\n");
fprintf(output3, "%d %d %d\n", N, M, NNZ);
for(int i = 0;i<N;i++)
{
fprintf(output3, "%.16e %.16e\n", b[i], x[i]);
}
printf("output over \n");
CLEANUP("Program safety end\n");
return 0;
}
|
afec55be44729cc8a224c83f32773a0289316234.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <rocblas.h>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xview.hpp>
#include <xtensor/xnpy.hpp>
#include <xtensor/xsort.hpp>
#include <boost/filesystem.hpp>
// GLOBAL VARIABLES
uint LAYER_WIDTH = 512;
uint MODEL_SEED = 52233264;
template <class _Tp>
xt::xarray<_Tp> matVec_cublas (xt::xarray<_Tp> matrix_A,
xt::xarray<_Tp> vector_B)
{
unsigned int n_rows = matrix_A.shape()[0];
unsigned int n_cols = matrix_A.shape()[1];
unsigned int size_A = n_rows * n_cols;
unsigned int size_B = n_cols;
assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch.");
assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1");
unsigned int size_C = n_rows;
//cublas handle
hipblasHandle_t handle;
hipblasCreate(&handle);
// declare matrices for GPU and allocate memory
// host copies of A,B,C
_Tp *A = new _Tp[size_A];
_Tp *B = new _Tp[size_B];
_Tp *C = new _Tp[size_C];
// gpc_id *myid = new gpc_id[size_C];
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&A, size_A*sizeof(_Tp));
hipMallocManaged(&B, size_B*sizeof(_Tp));
hipMallocManaged(&C, size_C*sizeof(_Tp));
// hipMallocManaged(&myid, size_C*sizeof(gpc_id));
// Fill the matrix values from xtensor to C++ array
for (int i = 0; i < size_A; i++)
A[i] = matrix_A.flat(i);
for (int i = 0; i < size_B; i++)
B[i] = vector_B.flat(i);
//run mat-vec multiplication
float alpha = 1.0f, beta = 0.0f;
hipDeviceSynchronize();
// time the matvel multiplication operation
// https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// https://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-gemm
// https://stackoverflow.com/questions/16376804/clarification-of-the-leading-dimension-in-cublas-when-transposing
// A (stored in row-major) is read as A_T when read in column major
// So instead of A.B (in row-major), we do B_T.A_T
// B_T = 1 x n_cols
// A_T = n_cols x n_rows
hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N, // B is read as B_T and A is read as A_T
1, // rows of matrix B_T
n_rows, // cols of A_T
n_cols, // cols of matrix B_T
&alpha,
B, 1,
A, n_cols,
&beta,
C, 1);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "Execution Time: " << milliseconds << " ms" << std::endl;
// Convert product vector to xtensor
xt::xarray<double>::shape_type C_shape = {size_C, 1};
xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape);
// free memory
hipFree(A);
hipFree(B);
hipFree(C);
return vec_C;
}
int main()
{
// load weights from npy files
boost::filesystem::path weight_folder("../weights");
const std::string dense_weights_folder = "../weights/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED);
const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED) + "_dense_weights.npy";
// std::cout << "******************************" << std::endl;
// std::cout << "Weights: " << dense_weights_file << std::endl;
xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file);
xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights);
// load input vector from npy file
uint image_no = 69999;
const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy";
// std::cout << "Input: " << input_vector_file << std::endl;
xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file);
// std::cout << "******************************" << std::endl;
// std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl;
// std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl;
// std::cout << "******************************" << std::endl;
// for (int i = 0; i < 10; ++i)
// {
// matVec_cublas(tr_dense_weights, input_vector);
// }
// std::cout << "******************************" << std::endl;
// Display Output
auto matvecproduct = matVec_cublas(tr_dense_weights, input_vector);
// std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl;
// std::cout << "Matrix-Vector Product" << std::endl;
// std::cout << matvecproduct << std::endl;
// std::cout << "******************************" << std::endl;
return 0;
}
| afec55be44729cc8a224c83f32773a0289316234.cu | #include <iostream>
#include <string>
#include <cublas_v2.h>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xview.hpp>
#include <xtensor/xnpy.hpp>
#include <xtensor/xsort.hpp>
#include <boost/filesystem.hpp>
// GLOBAL VARIABLES
uint LAYER_WIDTH = 512;
uint MODEL_SEED = 52233264;
template <class _Tp>
xt::xarray<_Tp> matVec_cublas (xt::xarray<_Tp> matrix_A,
xt::xarray<_Tp> vector_B)
{
unsigned int n_rows = matrix_A.shape()[0];
unsigned int n_cols = matrix_A.shape()[1];
unsigned int size_A = n_rows * n_cols;
unsigned int size_B = n_cols;
assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch.");
assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1");
unsigned int size_C = n_rows;
//cublas handle
cublasHandle_t handle;
cublasCreate(&handle);
// declare matrices for GPU and allocate memory
// host copies of A,B,C
_Tp *A = new _Tp[size_A];
_Tp *B = new _Tp[size_B];
_Tp *C = new _Tp[size_C];
// gpc_id *myid = new gpc_id[size_C];
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&A, size_A*sizeof(_Tp));
cudaMallocManaged(&B, size_B*sizeof(_Tp));
cudaMallocManaged(&C, size_C*sizeof(_Tp));
// cudaMallocManaged(&myid, size_C*sizeof(gpc_id));
// Fill the matrix values from xtensor to C++ array
for (int i = 0; i < size_A; i++)
A[i] = matrix_A.flat(i);
for (int i = 0; i < size_B; i++)
B[i] = vector_B.flat(i);
//run mat-vec multiplication
float alpha = 1.0f, beta = 0.0f;
cudaDeviceSynchronize();
// time the matvel multiplication operation
// https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// https://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-gemm
// https://stackoverflow.com/questions/16376804/clarification-of-the-leading-dimension-in-cublas-when-transposing
// A (stored in row-major) is read as A_T when read in column major
// So instead of A.B (in row-major), we do B_T.A_T
// B_T = 1 x n_cols
// A_T = n_cols x n_rows
cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N, // B is read as B_T and A is read as A_T
1, // rows of matrix B_T
n_rows, // cols of A_T
n_cols, // cols of matrix B_T
&alpha,
B, 1,
A, n_cols,
&beta,
C, 1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Execution Time: " << milliseconds << " ms" << std::endl;
// Convert product vector to xtensor
xt::xarray<double>::shape_type C_shape = {size_C, 1};
xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape);
// free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
return vec_C;
}
int main()
{
// load weights from npy files
boost::filesystem::path weight_folder("../weights");
const std::string dense_weights_folder = "../weights/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED);
const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" +
std::to_string(LAYER_WIDTH) +
"x" +
std::to_string(LAYER_WIDTH) +
"-" +
std::to_string(MODEL_SEED) + "_dense_weights.npy";
// std::cout << "******************************" << std::endl;
// std::cout << "Weights: " << dense_weights_file << std::endl;
xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file);
xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights);
// load input vector from npy file
uint image_no = 69999;
const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy";
// std::cout << "Input: " << input_vector_file << std::endl;
xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file);
// std::cout << "******************************" << std::endl;
// std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl;
// std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl;
// std::cout << "******************************" << std::endl;
// for (int i = 0; i < 10; ++i)
// {
// matVec_cublas(tr_dense_weights, input_vector);
// }
// std::cout << "******************************" << std::endl;
// Display Output
auto matvecproduct = matVec_cublas(tr_dense_weights, input_vector);
// std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl;
// std::cout << "Matrix-Vector Product" << std::endl;
// std::cout << matvecproduct << std::endl;
// std::cout << "******************************" << std::endl;
return 0;
}
|
2b7ba0d900889926865efbbedee4e31249dbf39a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "grid/counting_sort.h"
__global__ void CountingSort2DKernel(
const float *__restrict__ points, // (N, P, 2)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 2)
// sorted[n, i] = unsorted[n, idxs[i]]
int *__restrict__ sorted_points_idxs, // (N, P)
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
sorted_points[n * P * 2 + sorted_idx * 2] = points[n * P * 2 + p * 2];
sorted_points[n * P * 2 + sorted_idx * 2 + 1] =
points[n * P * 2 + p * 2 + 1];
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
/*
__global__ void CountingSort3DKernel(
const float *__restrict__ points, // (N, P, 3)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 3)
int *__restrict__ sorted_points_idxs, // (N, P): sorted[n, i] = unsorted[n,
// idxs[i]]
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
sorted_points[n * P * 3 + sorted_idx * 3] = points[n * P * 3 + p * 3];
sorted_points[n * P * 3 + sorted_idx * 3 + 1] =
points[n * P * 3 + p * 3 + 1];
sorted_points[n * P * 3 + sorted_idx * 3 + 2] =
points[n * P * 3 + p * 3 + 2];
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
*/
template <int D>
__global__ void CountingSortNDKernel(
const float *__restrict__ points, // (N, P, 3)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 3)
int *__restrict__ sorted_points_idxs, // (N, P):
// sorted[n, i] = unsorted[n,idxs[i]]
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
for (int d = 0; d < D; ++d) {
sorted_points[n * P * D + sorted_idx * D + d] =
points[n * P * D + p * D + d];
}
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
template <int D>
struct CountingSortNDKernelFunctor {
static void run(int blocks, int threads, const float *__restrict__ points,
const long *__restrict__ lengths,
const int *__restrict__ grid_cell,
const int *__restrict__ grid_idx,
const int *__restrict__ grid_off,
float *__restrict__ sorted_points,
int *__restrict__ sorted_points_idxs, int N, int P, int G) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( CountingSortNDKernel<D>), dim3(blocks), dim3(threads), 0, stream,
points, lengths, grid_cell, grid_idx, grid_off, sorted_points,
sorted_points_idxs, N, P, G);
}
};
void CountingSortCUDA(const at::Tensor points, const at::Tensor lengths,
const at::Tensor grid_cell, const at::Tensor grid_idx,
const at::Tensor grid_off, at::Tensor sorted_points,
at::Tensor sorted_points_idxs) {
at::TensorArg points_t{points, "points", 1};
at::TensorArg lengths_t{lengths, "lengths", 2};
at::TensorArg grid_cell_t{grid_cell, "grid_cell", 3};
at::TensorArg grid_idx_t{grid_idx, "grid_idx", 4};
at::TensorArg grid_off_t{grid_off, "grid_off", 5};
at::TensorArg sorted_points_t{sorted_points, "sorted_points", 6};
at::TensorArg sorted_points_idxs_t{sorted_points_idxs, "sorted_points_idxs",
7};
at::CheckedFrom c = "CountingSortCUDA";
at::checkAllSameGPU(c, {points_t, lengths_t, grid_cell_t, grid_idx_t,
grid_off_t, sorted_points_t, sorted_points_idxs_t});
at::checkAllSameType(
c, {grid_cell_t, grid_idx_t, grid_off_t, sorted_points_idxs_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int threads = 256;
int blocks = 256;
int N = points.size(0);
int P = points.size(1);
int D = points.size(2);
// assert(D == 2 || D == 3);
int G = grid_off.size(1);
if (D == 2) {
hipLaunchKernelGGL(( CountingSort2DKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
lengths.contiguous().data_ptr<long>(),
grid_cell.contiguous().data_ptr<int>(),
grid_idx.contiguous().data_ptr<int>(),
grid_off.contiguous().data_ptr<int>(),
sorted_points.contiguous().data_ptr<float>(),
sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
} else {
// CountingSort3DKernel<<<blocks, threads, 0, stream>>>(
// points.contiguous().data_ptr<float>(),
// lengths.contiguous().data_ptr<long>(),
// grid_cell.contiguous().data_ptr<int>(),
// grid_idx.contiguous().data_ptr<int>(),
// grid_off.contiguous().data_ptr<int>(),
// sorted_points.contiguous().data_ptr<float>(),
// sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
DispatchKernel1D<CountingSortNDKernelFunctor, V0_MIN_D, V0_MAX_D>(
D, blocks, threads, points.contiguous().data_ptr<float>(),
lengths.contiguous().data_ptr<long>(),
grid_cell.contiguous().data_ptr<int>(),
grid_idx.contiguous().data_ptr<int>(),
grid_off.contiguous().data_ptr<int>(),
sorted_points.contiguous().data_ptr<float>(),
sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
}
AT_CUDA_CHECK(hipGetLastError());
} | 2b7ba0d900889926865efbbedee4e31249dbf39a.cu | #include "grid/counting_sort.h"
__global__ void CountingSort2DKernel(
const float *__restrict__ points, // (N, P, 2)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 2)
// sorted[n, i] = unsorted[n, idxs[i]]
int *__restrict__ sorted_points_idxs, // (N, P)
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
sorted_points[n * P * 2 + sorted_idx * 2] = points[n * P * 2 + p * 2];
sorted_points[n * P * 2 + sorted_idx * 2 + 1] =
points[n * P * 2 + p * 2 + 1];
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
/*
__global__ void CountingSort3DKernel(
const float *__restrict__ points, // (N, P, 3)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 3)
int *__restrict__ sorted_points_idxs, // (N, P): sorted[n, i] = unsorted[n,
// idxs[i]]
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
sorted_points[n * P * 3 + sorted_idx * 3] = points[n * P * 3 + p * 3];
sorted_points[n * P * 3 + sorted_idx * 3 + 1] =
points[n * P * 3 + p * 3 + 1];
sorted_points[n * P * 3 + sorted_idx * 3 + 2] =
points[n * P * 3 + p * 3 + 2];
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
*/
template <int D>
__global__ void CountingSortNDKernel(
const float *__restrict__ points, // (N, P, 3)
const long *__restrict__ lengths, // (N,)
const int *__restrict__ grid_cell, // (N, P)
const int *__restrict__ grid_idx, // (N, P)
const int *__restrict__ grid_off, // (N, G)
float *__restrict__ sorted_points, // (N, P, 3)
int *__restrict__ sorted_points_idxs, // (N, P):
// sorted[n, i] = unsorted[n,idxs[i]]
int N, int P, int G) {
int chunks_per_cloud = (1 + (P - 1) / blockDim.x);
int chunks_to_do = N * chunks_per_cloud;
for (int chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
int n = chunk / chunks_per_cloud;
int start_point = blockDim.x * (chunk % chunks_per_cloud);
int p = start_point + threadIdx.x;
if (p >= lengths[n]) continue;
int cell_idx = grid_cell[n * P + p];
int idx = grid_idx[n * P + p];
assert(cell_idx < G);
int sorted_idx = grid_off[n * G + cell_idx] + idx;
assert(sorted_idx >= 0 && sorted_idx < lengths[n]);
for (int d = 0; d < D; ++d) {
sorted_points[n * P * D + sorted_idx * D + d] =
points[n * P * D + p * D + d];
}
sorted_points_idxs[n * P + sorted_idx] = p;
}
}
template <int D>
struct CountingSortNDKernelFunctor {
static void run(int blocks, int threads, const float *__restrict__ points,
const long *__restrict__ lengths,
const int *__restrict__ grid_cell,
const int *__restrict__ grid_idx,
const int *__restrict__ grid_off,
float *__restrict__ sorted_points,
int *__restrict__ sorted_points_idxs, int N, int P, int G) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
CountingSortNDKernel<D><<<blocks, threads, 0, stream>>>(
points, lengths, grid_cell, grid_idx, grid_off, sorted_points,
sorted_points_idxs, N, P, G);
}
};
void CountingSortCUDA(const at::Tensor points, const at::Tensor lengths,
const at::Tensor grid_cell, const at::Tensor grid_idx,
const at::Tensor grid_off, at::Tensor sorted_points,
at::Tensor sorted_points_idxs) {
at::TensorArg points_t{points, "points", 1};
at::TensorArg lengths_t{lengths, "lengths", 2};
at::TensorArg grid_cell_t{grid_cell, "grid_cell", 3};
at::TensorArg grid_idx_t{grid_idx, "grid_idx", 4};
at::TensorArg grid_off_t{grid_off, "grid_off", 5};
at::TensorArg sorted_points_t{sorted_points, "sorted_points", 6};
at::TensorArg sorted_points_idxs_t{sorted_points_idxs, "sorted_points_idxs",
7};
at::CheckedFrom c = "CountingSortCUDA";
at::checkAllSameGPU(c, {points_t, lengths_t, grid_cell_t, grid_idx_t,
grid_off_t, sorted_points_t, sorted_points_idxs_t});
at::checkAllSameType(
c, {grid_cell_t, grid_idx_t, grid_off_t, sorted_points_idxs_t});
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int threads = 256;
int blocks = 256;
int N = points.size(0);
int P = points.size(1);
int D = points.size(2);
// assert(D == 2 || D == 3);
int G = grid_off.size(1);
if (D == 2) {
CountingSort2DKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
lengths.contiguous().data_ptr<long>(),
grid_cell.contiguous().data_ptr<int>(),
grid_idx.contiguous().data_ptr<int>(),
grid_off.contiguous().data_ptr<int>(),
sorted_points.contiguous().data_ptr<float>(),
sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
} else {
// CountingSort3DKernel<<<blocks, threads, 0, stream>>>(
// points.contiguous().data_ptr<float>(),
// lengths.contiguous().data_ptr<long>(),
// grid_cell.contiguous().data_ptr<int>(),
// grid_idx.contiguous().data_ptr<int>(),
// grid_off.contiguous().data_ptr<int>(),
// sorted_points.contiguous().data_ptr<float>(),
// sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
DispatchKernel1D<CountingSortNDKernelFunctor, V0_MIN_D, V0_MAX_D>(
D, blocks, threads, points.contiguous().data_ptr<float>(),
lengths.contiguous().data_ptr<long>(),
grid_cell.contiguous().data_ptr<int>(),
grid_idx.contiguous().data_ptr<int>(),
grid_off.contiguous().data_ptr<int>(),
sorted_points.contiguous().data_ptr<float>(),
sorted_points_idxs.contiguous().data_ptr<int>(), N, P, G);
}
AT_CUDA_CHECK(cudaGetLastError());
} |
32bc0810e0b7f5d5adf9a7a1665e68cdd22b06d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zbcsrlugemm.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h> // include before magma.h
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_c
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
#define fetch_x_A(i) (((i)<m*m)?Aval[i]:0)
#define fetch_x_B(i) (((i)<m*m)?B[i]:0)
// every multiprocessor handles one BCSR-block
__global__ void
cbcsr_gemm_kernel32(
int m,
int n,
int kblocks,
float **Avals,
float **Bval,
float **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
float xxB[4];
float *B;
int trackA = __mul24( ty2, lda) + tx2 ;
float *Aval = Avals[blockIdx.z];
__shared__ float Abs[64][65];
__shared__ float Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 16, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
float Axs[4];
float Bxp[4];
float Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
float *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
// every multiprocessor handles one BCSR-block
__global__ void
cbcsr_gemm_kernel64(
int m,
int n,
int kblocks,
float **Avals,
float **Bval,
float **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
float xxB[4];
float *B;
int trackA = __mul24( ty2, lda) + tx2 ;
float *Aval = Avals[blockIdx.z];
__shared__ float Abs[64][65];
__shared__ float Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 4, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
float Axs[4];
float Bxp[4];
float Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
float *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_brows magma_int_t
number of block rows
@param
kblocks magma_int_t
number of blocks in row
@param
dA magmaFloatComplex**
input blocks of matrix A
@param
dB magmaFloatComplex**
input blocks of matrix B
@param
dC magmaFloatComplex**
output blocks of matrix C
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbcsrluegemm( magma_int_t size_b,
magma_int_t num_brows,
magma_int_t kblocks,
magmaFloatComplex **dA,
magmaFloatComplex **dB,
magmaFloatComplex **dC ){
#if defined(PRECISION_d)
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("error: magma_cbcsrluegemm needs a CUDA architecture"
" with at least 48K shared memory (Fermi +).\n"
"Please run cbcsrlu.cpp using CUBLAS batched.\n");
}
else {
dim3 threads( 64, 4 );
dim3 grid(1, 1, num_brows);
hipLaunchKernelGGL(( cbcsr_gemm_kernel64), dim3(grid), dim3(threads), 0, magma_stream ,
size_b, size_b, kblocks, dA, dB, dC );
}
#else
printf("error: currently only supported for real.\n"
"Please run cbcsrlu.cpp using CUBLAS batched.\n");
#endif
return MAGMA_SUCCESS;
}
| 32bc0810e0b7f5d5adf9a7a1665e68cdd22b06d9.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zbcsrlugemm.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h> // include before magma.h
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_c
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
#define fetch_x_A(i) (((i)<m*m)?Aval[i]:0)
#define fetch_x_B(i) (((i)<m*m)?B[i]:0)
// every multiprocessor handles one BCSR-block
__global__ void
cbcsr_gemm_kernel32(
int m,
int n,
int kblocks,
float **Avals,
float **Bval,
float **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
float xxB[4];
float *B;
int trackA = __mul24( ty2, lda) + tx2 ;
float *Aval = Avals[blockIdx.z];
__shared__ float Abs[64][65];
__shared__ float Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 16, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
float Axs[4];
float Bxp[4];
float Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
float *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
// every multiprocessor handles one BCSR-block
__global__ void
cbcsr_gemm_kernel64(
int m,
int n,
int kblocks,
float **Avals,
float **Bval,
float **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
float xxB[4];
float *B;
int trackA = __mul24( ty2, lda) + tx2 ;
float *Aval = Avals[blockIdx.z];
__shared__ float Abs[64][65];
__shared__ float Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 4, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
float Axs[4];
float Bxp[4];
float Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
float *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_brows magma_int_t
number of block rows
@param
kblocks magma_int_t
number of blocks in row
@param
dA magmaFloatComplex**
input blocks of matrix A
@param
dB magmaFloatComplex**
input blocks of matrix B
@param
dC magmaFloatComplex**
output blocks of matrix C
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbcsrluegemm( magma_int_t size_b,
magma_int_t num_brows,
magma_int_t kblocks,
magmaFloatComplex **dA,
magmaFloatComplex **dB,
magmaFloatComplex **dC ){
#if defined(PRECISION_d)
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("error: magma_cbcsrluegemm needs a CUDA architecture"
" with at least 48K shared memory (Fermi +).\n"
"Please run cbcsrlu.cpp using CUBLAS batched.\n");
}
else {
dim3 threads( 64, 4 );
dim3 grid(1, 1, num_brows);
cbcsr_gemm_kernel64<<< grid, threads, 0, magma_stream >>>(
size_b, size_b, kblocks, dA, dB, dC );
}
#else
printf("error: currently only supported for real.\n"
"Please run cbcsrlu.cpp using CUBLAS batched.\n");
#endif
return MAGMA_SUCCESS;
}
|
574915874a85575e8a8164e269586cc14b02e128.hip | // !!! This is a file automatically generated by hipify!!!
/**
* CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs
*
* @author Ashwin Joisa
* @author Praveen Gupta
**/
//=============================================================================================//
// Include header files
#include <iostream>
#include <hip/hip_runtime.h>
// Include custom header file for implementation of Graphs
#include "Graph.h"
//=============================================================================================//
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
//=============================================================================================//
using namespace std;
//=============================================================================================//
#define catchCudaError(error) \
{ \
gpuAssert((error), __FILE__, __LINE__); \
}
float device_time_taken;
void printTime(float ms)
{
int h = ms / (1000 * 3600);
int m = (((int)ms) / (1000 * 60)) % 60;
int s = (((int)ms) / 1000) % 60;
int intMS = ms;
intMS %= 1000;
printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS);
printf("Time Taken in milliseconds : %d\n", (int)ms);
}
// Catch Cuda errors
inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false)
{
if (error != hipSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s", error, hipGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if (abort)
exit(-1);
}
}
//=============================================================================================//
__global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount,
int *sigma, int *distance, float *dependency)
{
int idx = threadIdx.x;
if(idx >= max((2*(graph->edgeCount)), nodeCount))
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = -1;
// printf("Progress... %3d%%", 0);
}
__syncthreads();
while(s < nodeCount - 1)
{
if (idx == 0)
{
++s;
// printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount);
done = false;
current_depth = -1;
}
__syncthreads();
for (int i = idx; i < nodeCount; i += blockDim.x)
{
if (i == s)
{
distance[i] = 0;
sigma[i] = 1;
}
else
{
distance[i] = INT_MAX;
sigma[i] = 0;
}
dependency[i]= 0.0;
}
__syncthreads();
while (!done)
{
__syncthreads();
if (threadIdx.x == 0){
current_depth++;
}
done = true;
__syncthreads();
for (int i = idx; i < (2*(graph->edgeCount)); i += blockDim.x)
{
int v = graph->edgeList1[i];
if (distance[v] == current_depth)
{
int w = graph->edgeList2[i];
if (distance[w] == INT_MAX)
{
distance[w] = distance[v] + 1;
done = false;
}
if (distance[w] == (distance[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
}
}
}
__syncthreads();
}
__syncthreads();
// Reverse BFS
while(current_depth)
{
if(idx == 0){
current_depth--;
}
__syncthreads();
for (int i = idx; i < (2*(graph->edgeCount)); i += blockDim.x)
{
int v = graph->edgeList1[i];
if(distance[v] == current_depth)
{
// for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
// {
int w = graph->edgeList2[i];
if(distance[w] == (distance[v] + 1))
{
if (sigma[w] != 0) {
atomicAdd(dependency + v, (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]));
}
}
}
}
__syncthreads();
}
for(int v=idx; v<nodeCount; v+=blockDim.x){
if (v != s)
{
// Each shortest path is counted twice. So, each partial shortest path dependency is halved.
bwCentrality[v] += dependency[v] / 2;
}
}
__syncthreads();
}
}
float *betweennessCentrality(Graph *graph, int nodeCount)
{
float *bwCentrality = new float[nodeCount]();
float *device_bwCentrality, *dependency;
int *sigma, *distance;
catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount));
catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&dependency, sizeof(float) * nodeCount));
catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, hipMemcpyHostToDevice));
// Timer
hipEvent_t device_start, device_end;
catchCudaError(hipEventCreate(&device_start));
catchCudaError(hipEventCreate(&device_end));
catchCudaError(hipEventRecord(device_start));
hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(1), dim3(MAX_THREAD_COUNT), 0, 0, graph, device_bwCentrality, nodeCount, sigma, distance, dependency);
hipDeviceSynchronize();
//End of progress bar
// cout << endl;
// Timer
catchCudaError(hipEventRecord(device_end));
catchCudaError(hipEventSynchronize(device_end));
hipEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, hipMemcpyDeviceToHost));
catchCudaError(hipFree(device_bwCentrality));
catchCudaError(hipFree(sigma));
catchCudaError(hipFree(dependency));
catchCudaError(hipFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n";
return 0;
}
char choice;
cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) ";
cin >> choice;
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
host_graph->convertToCOO();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice));
// Copy edge List to device
int *edgeList1;
int *edgeList2;
// Alocate device memory and copy
catchCudaError(hipMalloc((void **)&edgeList1, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(hipMemcpy(edgeList1, host_graph->edgeList1, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice));
catchCudaError(hipMalloc((void **)&edgeList2, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(hipMemcpy(edgeList2, host_graph->edgeList2, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(hipMemcpy(&(device_graph->edgeList1), &edgeList1, sizeof(int *), hipMemcpyHostToDevice));
catchCudaError(hipMemcpy(&(device_graph->edgeList2), &edgeList2, sizeof(int *), hipMemcpyHostToDevice));
float *bwCentrality = betweennessCentrality(device_graph, nodeCount);
float maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++)
{
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
if (choice == 'y' || choice == 'Y')
printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]);
}
cout << endl;
printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness);
printTime(device_time_taken);
if (argc == 3)
{
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(hipFree(edgeList1));
catchCudaError(hipFree(edgeList2));
catchCudaError(hipFree(device_graph));
} | 574915874a85575e8a8164e269586cc14b02e128.cu | /**
* CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs
*
* @author Ashwin Joisa
* @author Praveen Gupta
**/
//=============================================================================================//
// Include header files
#include <iostream>
#include <cuda.h>
// Include custom header file for implementation of Graphs
#include "Graph.h"
//=============================================================================================//
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
//=============================================================================================//
using namespace std;
//=============================================================================================//
#define catchCudaError(error) \
{ \
gpuAssert((error), __FILE__, __LINE__); \
}
float device_time_taken;
void printTime(float ms)
{
int h = ms / (1000 * 3600);
int m = (((int)ms) / (1000 * 60)) % 60;
int s = (((int)ms) / 1000) % 60;
int intMS = ms;
intMS %= 1000;
printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS);
printf("Time Taken in milliseconds : %d\n", (int)ms);
}
// Catch Cuda errors
inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false)
{
if (error != cudaSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s", error, cudaGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if (abort)
exit(-1);
}
}
//=============================================================================================//
__global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount,
int *sigma, int *distance, float *dependency)
{
int idx = threadIdx.x;
if(idx >= max((2*(graph->edgeCount)), nodeCount))
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = -1;
// printf("Progress... %3d%%", 0);
}
__syncthreads();
while(s < nodeCount - 1)
{
if (idx == 0)
{
++s;
// printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount);
done = false;
current_depth = -1;
}
__syncthreads();
for (int i = idx; i < nodeCount; i += blockDim.x)
{
if (i == s)
{
distance[i] = 0;
sigma[i] = 1;
}
else
{
distance[i] = INT_MAX;
sigma[i] = 0;
}
dependency[i]= 0.0;
}
__syncthreads();
while (!done)
{
__syncthreads();
if (threadIdx.x == 0){
current_depth++;
}
done = true;
__syncthreads();
for (int i = idx; i < (2*(graph->edgeCount)); i += blockDim.x)
{
int v = graph->edgeList1[i];
if (distance[v] == current_depth)
{
int w = graph->edgeList2[i];
if (distance[w] == INT_MAX)
{
distance[w] = distance[v] + 1;
done = false;
}
if (distance[w] == (distance[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
}
}
}
__syncthreads();
}
__syncthreads();
// Reverse BFS
while(current_depth)
{
if(idx == 0){
current_depth--;
}
__syncthreads();
for (int i = idx; i < (2*(graph->edgeCount)); i += blockDim.x)
{
int v = graph->edgeList1[i];
if(distance[v] == current_depth)
{
// for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
// {
int w = graph->edgeList2[i];
if(distance[w] == (distance[v] + 1))
{
if (sigma[w] != 0) {
atomicAdd(dependency + v, (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]));
}
}
}
}
__syncthreads();
}
for(int v=idx; v<nodeCount; v+=blockDim.x){
if (v != s)
{
// Each shortest path is counted twice. So, each partial shortest path dependency is halved.
bwCentrality[v] += dependency[v] / 2;
}
}
__syncthreads();
}
}
float *betweennessCentrality(Graph *graph, int nodeCount)
{
float *bwCentrality = new float[nodeCount]();
float *device_bwCentrality, *dependency;
int *sigma, *distance;
catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount));
catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&dependency, sizeof(float) * nodeCount));
catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, cudaMemcpyHostToDevice));
// Timer
cudaEvent_t device_start, device_end;
catchCudaError(cudaEventCreate(&device_start));
catchCudaError(cudaEventCreate(&device_end));
catchCudaError(cudaEventRecord(device_start));
betweennessCentralityKernel<<<1, MAX_THREAD_COUNT>>>(graph, device_bwCentrality, nodeCount, sigma, distance, dependency);
cudaDeviceSynchronize();
//End of progress bar
// cout << endl;
// Timer
catchCudaError(cudaEventRecord(device_end));
catchCudaError(cudaEventSynchronize(device_end));
cudaEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, cudaMemcpyDeviceToHost));
catchCudaError(cudaFree(device_bwCentrality));
catchCudaError(cudaFree(sigma));
catchCudaError(cudaFree(dependency));
catchCudaError(cudaFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n";
return 0;
}
char choice;
cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) ";
cin >> choice;
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
host_graph->convertToCOO();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice));
// Copy edge List to device
int *edgeList1;
int *edgeList2;
// Alocate device memory and copy
catchCudaError(cudaMalloc((void **)&edgeList1, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(cudaMemcpy(edgeList1, host_graph->edgeList1, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice));
catchCudaError(cudaMalloc((void **)&edgeList2, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(cudaMemcpy(edgeList2, host_graph->edgeList2, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(cudaMemcpy(&(device_graph->edgeList1), &edgeList1, sizeof(int *), cudaMemcpyHostToDevice));
catchCudaError(cudaMemcpy(&(device_graph->edgeList2), &edgeList2, sizeof(int *), cudaMemcpyHostToDevice));
float *bwCentrality = betweennessCentrality(device_graph, nodeCount);
float maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++)
{
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
if (choice == 'y' || choice == 'Y')
printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]);
}
cout << endl;
printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness);
printTime(device_time_taken);
if (argc == 3)
{
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(cudaFree(edgeList1));
catchCudaError(cudaFree(edgeList2));
catchCudaError(cudaFree(device_graph));
} |
6661c5abff8bd049fb131d6e2f8d5a90fa039ba3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nBodyKernels.cuh"
#include <cmath>
__global__ void updateSimple(float4 * positions, float4 * velocities)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float position_j_x, position_j_y, position_j_z, position_j_w;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z;
for (int j = 0; j < N; j++) {
position_j_x = positions[j].x;
position_j_y = positions[j].y;
position_j_z = positions[j].z;
position_j_w = positions[j].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
float distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
float denom = (sqrtf(distSqr * distSqr * distSqr));
float mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
};
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateSimpleLoopUnroll(float4 * positions, float4 * velocities)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
float position_i_x = positions[i].x;
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float position_j_x, position_j_y, position_j_z, position_j_w;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
for (int j = 0; j < N; j+=2) {
position_j_x = positions[j].x;
position_j_y = positions[j].y;
position_j_z = positions[j].z;
position_j_w = positions[j].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
denom = (sqrtf(distSqr * distSqr * distSqr));
mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
position_j_x = positions[j+1].x;
position_j_y = positions[j+1].y;
position_j_z = positions[j+1].z;
position_j_w = positions[j+1].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
denom = (sqrtf(distSqr * distSqr * distSqr));
mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
};
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateShared(float4 * positions, float4 * velocities)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= N) return;
__shared__ float4 sharedPositions[BLOCK_DIM];
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
int block_i, size_curr;
int n_blocks = (N + BLOCK_DIM) / BLOCK_DIM;
for (int i = 0; i < n_blocks; i++) {
block_i = i * BLOCK_DIM;
if (block_i + threadIdx.x < N) {
sharedPositions[threadIdx.x] = positions[block_i + threadIdx.x];
}
__syncthreads();
size_curr = i == n_blocks - 1 ? N - i * BLOCK_DIM : BLOCK_DIM;
for (int j = 0; j < size_curr; j++) {
dist_ij_x = sharedPositions[j].x - position_i_x;
dist_ij_y = sharedPositions[j].y - position_i_y;
dist_ij_z = sharedPositions[j].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
}
__syncthreads();
}
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateSharedLoopUnroll(float4 * positions, float4 * velocities)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= N) return;
__shared__ float4 sharedPositions[BLOCK_DIM];
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
int block_i, size_curr;
int n_blocks = (N + BLOCK_DIM) / BLOCK_DIM;
for (int i = 0; i < n_blocks; i++) {
block_i = i * BLOCK_DIM;
if (block_i + threadIdx.x < N) {
sharedPositions[threadIdx.x] = positions[block_i + threadIdx.x];
}
__syncthreads();
size_curr = i == n_blocks - 1 ? N - i * BLOCK_DIM : BLOCK_DIM;
for (int j = 0; j < size_curr/2; j+=2) {
dist_ij_x = sharedPositions[j].x - position_i_x;
dist_ij_y = sharedPositions[j].y - position_i_y;
dist_ij_z = sharedPositions[j].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
dist_ij_x = sharedPositions[j+1].x - position_i_x;
dist_ij_y = sharedPositions[j+1].y - position_i_y;
dist_ij_z = sharedPositions[j+1].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j+1].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
}
__syncthreads();
}
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void generatePointInsideSphere(float4 * points, hiprandState_t * states)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= N) return;
hiprand_init(tid, tid, tid, &states[tid]);
float x, y, z, w, trial = 0;
do {
x = (hiprand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
y = (hiprand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
z = (hiprand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
w = hiprand_uniform(&states[tid]) * SCALE;
trial += 1;
} while (x * x + y * y + z * z > RADIUS && trial <= MAX_TRIALS);
points[tid] = { x, y, z, w };
printf("Index: %d, x: %.3f, y: %.3f, z: %.3f, mass: %.3f \n", tid, x, y, z, w);
}
| 6661c5abff8bd049fb131d6e2f8d5a90fa039ba3.cu | #include "nBodyKernels.cuh"
#include <cmath>
__global__ void updateSimple(float4 * positions, float4 * velocities)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float position_j_x, position_j_y, position_j_z, position_j_w;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z;
for (int j = 0; j < N; j++) {
position_j_x = positions[j].x;
position_j_y = positions[j].y;
position_j_z = positions[j].z;
position_j_w = positions[j].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
float distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
float denom = (sqrtf(distSqr * distSqr * distSqr));
float mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
};
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateSimpleLoopUnroll(float4 * positions, float4 * velocities)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
float position_i_x = positions[i].x;
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float position_j_x, position_j_y, position_j_z, position_j_w;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
for (int j = 0; j < N; j+=2) {
position_j_x = positions[j].x;
position_j_y = positions[j].y;
position_j_z = positions[j].z;
position_j_w = positions[j].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
denom = (sqrtf(distSqr * distSqr * distSqr));
mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
position_j_x = positions[j+1].x;
position_j_y = positions[j+1].y;
position_j_z = positions[j+1].z;
position_j_w = positions[j+1].w;
dist_ij_x = position_j_x - position_i_x;
dist_ij_y = position_j_y - position_i_y;
dist_ij_z = position_j_z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
denom = (sqrtf(distSqr * distSqr * distSqr));
mass_jOverDenom = position_j_w * MASS_MULTIPLIER / denom;
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
};
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateShared(float4 * positions, float4 * velocities)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= N) return;
__shared__ float4 sharedPositions[BLOCK_DIM];
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
int block_i, size_curr;
int n_blocks = (N + BLOCK_DIM) / BLOCK_DIM;
for (int i = 0; i < n_blocks; i++) {
block_i = i * BLOCK_DIM;
if (block_i + threadIdx.x < N) {
sharedPositions[threadIdx.x] = positions[block_i + threadIdx.x];
}
__syncthreads();
size_curr = i == n_blocks - 1 ? N - i * BLOCK_DIM : BLOCK_DIM;
for (int j = 0; j < size_curr; j++) {
dist_ij_x = sharedPositions[j].x - position_i_x;
dist_ij_y = sharedPositions[j].y - position_i_y;
dist_ij_z = sharedPositions[j].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
}
__syncthreads();
}
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void updateSharedLoopUnroll(float4 * positions, float4 * velocities)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= N) return;
__shared__ float4 sharedPositions[BLOCK_DIM];
float position_i_x = positions[i].x; //the fourth value of position is the scale
float position_i_y = positions[i].y;
float position_i_z = positions[i].z;
float position_i_w = positions[i].w;
float velocity_i_x = velocities[i].x;
float velocity_i_y = velocities[i].y;
float velocity_i_z = velocities[i].z;
float newAcc_i_x = 0, newAcc_i_y = 0, newAcc_i_z = 0;
float dist_ij_x, dist_ij_y, dist_ij_z, distSqr, denom, mass_jOverDenom;
int block_i, size_curr;
int n_blocks = (N + BLOCK_DIM) / BLOCK_DIM;
for (int i = 0; i < n_blocks; i++) {
block_i = i * BLOCK_DIM;
if (block_i + threadIdx.x < N) {
sharedPositions[threadIdx.x] = positions[block_i + threadIdx.x];
}
__syncthreads();
size_curr = i == n_blocks - 1 ? N - i * BLOCK_DIM : BLOCK_DIM;
for (int j = 0; j < size_curr/2; j+=2) {
dist_ij_x = sharedPositions[j].x - position_i_x;
dist_ij_y = sharedPositions[j].y - position_i_y;
dist_ij_z = sharedPositions[j].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
dist_ij_x = sharedPositions[j+1].x - position_i_x;
dist_ij_y = sharedPositions[j+1].y - position_i_y;
dist_ij_z = sharedPositions[j+1].z - position_i_z;
distSqr = dist_ij_x * dist_ij_x + dist_ij_y * dist_ij_y + dist_ij_z * dist_ij_z + EPS_SQUARED;
mass_jOverDenom = sharedPositions[j+1].w * MASS_MULTIPLIER / (sqrtf(distSqr * distSqr * distSqr));
newAcc_i_x += mass_jOverDenom * dist_ij_x;
newAcc_i_y += mass_jOverDenom * dist_ij_y;
newAcc_i_z += mass_jOverDenom * dist_ij_z;
}
__syncthreads();
}
newAcc_i_x *= G;
newAcc_i_y *= G;
newAcc_i_z *= G;
//Integration step
velocity_i_x += newAcc_i_x * TIME_STEP;
velocity_i_y += newAcc_i_y * TIME_STEP;
velocity_i_z += newAcc_i_z * TIME_STEP;
positions[i] = {
position_i_x + velocity_i_x * TIME_STEP,
position_i_y + velocity_i_y * TIME_STEP,
position_i_z + velocity_i_z * TIME_STEP,
position_i_w
};
velocities[i] = { velocity_i_x, velocity_i_y, velocity_i_z, 0.f };
}
__global__ void generatePointInsideSphere(float4 * points, curandState * states)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= N) return;
curand_init(tid, tid, tid, &states[tid]);
float x, y, z, w, trial = 0;
do {
x = (curand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
y = (curand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
z = (curand_uniform(&states[tid]) * 2 - 1) * RADIUS * 5;
w = curand_uniform(&states[tid]) * SCALE;
trial += 1;
} while (x * x + y * y + z * z > RADIUS && trial <= MAX_TRIALS);
points[tid] = { x, y, z, w };
printf("Index: %d, x: %.3f, y: %.3f, z: %.3f, mass: %.3f \n", tid, x, y, z, w);
}
|
9e7df1aec420a942b7283b0d5168950cb18f239f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <rocblas.h>
#include <time.h>
#define size 65536
__global__ void vector_add(int *a, int *b, int *c){
int my_id;
my_id = blockIdx.x*blockDim.x + threadIdx.x;
c[my_id] = a[my_id] + b[my_id];
}
int main(){
int i;
int *a ;
int *b ;
int *c ;
hipHostMalloc((void**)&a, sizeof(int)*size);
hipHostMalloc((void**)&b, sizeof(int)*size);
hipHostMalloc((void**)&c, sizeof(int)*size);
for(i=0; i<size; i++){
a[i]=100;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c;
hipMalloc((void**)&gpu_a, sizeof(int)*size);
hipMalloc((void**)&gpu_b, sizeof(int)*size);
hipMalloc((void**)&gpu_c, sizeof(int)*size);
dim3 dimGrid(32);
dim3 dimBlock(1024);
hipStream_t stream[2];
hipStreamCreate (&stream[0]);
hipStreamCreate (&stream[1]);
hipMemcpyAsync(gpu_a, a, sizeof(int)*size/2, hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(gpu_b, b, sizeof(int)*size/2, hipMemcpyHostToDevice, stream[0]);
hipLaunchKernelGGL(( vector_add), dim3(dimGrid), dim3(dimBlock), 0, stream[0], gpu_a, gpu_b, gpu_c);
hipMemcpyAsync(c, gpu_c, sizeof(int)*size/2, hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(&gpu_a[size/2], &a[size/2], sizeof(int)*size/2, hipMemcpyHostToDevice, stream[1]);
hipMemcpyAsync(&gpu_b[size/2], &b[size/2], sizeof(int)*size/2, hipMemcpyHostToDevice, stream[1]);
hipLaunchKernelGGL(( vector_add), dim3(dimGrid), dim3(dimBlock), 0, stream[1], &gpu_a[size/2], &gpu_b[size/2], &gpu_c[size/2]);
hipMemcpyAsync(&c[size/2], &gpu_c[size/2], sizeof(int)*size/2, hipMemcpyDeviceToHost, stream[1]);
hipStreamSynchronize(stream[0]);
hipStreamSynchronize(stream[1]);
hipStreamDestroy(stream[0]);
hipStreamDestroy(stream[1]);
for(i=size/2; i<size/2+12; i++)
printf("c[%d]=%d ", i, c[i]);
hipFree(a);
hipFree(b);
hipFree(c);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_c);
return 0;
}
| 9e7df1aec420a942b7283b0d5168950cb18f239f.cu | #include <stdlib.h>
#include <stdio.h>
#include <cublas.h>
#include <time.h>
#define size 65536
__global__ void vector_add(int *a, int *b, int *c){
int my_id;
my_id = blockIdx.x*blockDim.x + threadIdx.x;
c[my_id] = a[my_id] + b[my_id];
}
int main(){
int i;
int *a ;
int *b ;
int *c ;
cudaMallocHost((void**)&a, sizeof(int)*size);
cudaMallocHost((void**)&b, sizeof(int)*size);
cudaMallocHost((void**)&c, sizeof(int)*size);
for(i=0; i<size; i++){
a[i]=100;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, sizeof(int)*size);
cudaMalloc((void**)&gpu_b, sizeof(int)*size);
cudaMalloc((void**)&gpu_c, sizeof(int)*size);
dim3 dimGrid(32);
dim3 dimBlock(1024);
cudaStream_t stream[2];
cudaStreamCreate (&stream[0]);
cudaStreamCreate (&stream[1]);
cudaMemcpyAsync(gpu_a, a, sizeof(int)*size/2, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(gpu_b, b, sizeof(int)*size/2, cudaMemcpyHostToDevice, stream[0]);
vector_add<<<dimGrid, dimBlock, 0, stream[0]>>>(gpu_a, gpu_b, gpu_c);
cudaMemcpyAsync(c, gpu_c, sizeof(int)*size/2, cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(&gpu_a[size/2], &a[size/2], sizeof(int)*size/2, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyAsync(&gpu_b[size/2], &b[size/2], sizeof(int)*size/2, cudaMemcpyHostToDevice, stream[1]);
vector_add<<<dimGrid, dimBlock, 0, stream[1]>>>(&gpu_a[size/2], &gpu_b[size/2], &gpu_c[size/2]);
cudaMemcpyAsync(&c[size/2], &gpu_c[size/2], sizeof(int)*size/2, cudaMemcpyDeviceToHost, stream[1]);
cudaStreamSynchronize(stream[0]);
cudaStreamSynchronize(stream[1]);
cudaStreamDestroy(stream[0]);
cudaStreamDestroy(stream[1]);
for(i=size/2; i<size/2+12; i++)
printf("c[%d]=%d ", i, c[i]);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
return 0;
}
|
615e5960d77ddf7d5204c473d39b2be5ddbe20b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Portions Copyright (c) 1993-2014 NVIDIA Corporation. All rights reserved.
* Portions Copyright (c) 2009 Mike Giles, Oxford University. All rights reserved.
* Portions Copyright (c) 2008 Frances Y. Kuo and Stephen Joe. All rights reserved.
*
* Sobol Quasi-random Number Generator example
*
* Based on CUDA code submitted by Mike Giles, Oxford University, United Kingdom
* http://people.maths.ox.ac.uk/~gilesm/
*
* and C code developed by Stephen Joe, University of Waikato, New Zealand
* and Frances Kuo, University of New South Wales, Australia
* http://web.maths.unsw.edu.au/~fkuo/sobol/
*
* For theoretical background see:
*
* P. Bratley and B.L. Fox.
* Implementing Sobol's quasirandom sequence generator
* http://portal.acm.org/citation.cfm?id=42288
* ACM Trans. on Math. Software, 14(1):88-100, 1988
*
* S. Joe and F. Kuo.
* Remark on algorithm 659: implementing Sobol's quasirandom sequence generator.
* http://portal.acm.org/citation.cfm?id=641879
* ACM Trans. on Math. Software, 29(1):49-57, 2003
*
*/
#include "sobol.h"
#include "sobol_gpu.h"
#include <helper_cuda.h>
#define k_2powneg32 2.3283064E-10F
__global__ void sobolGPU_kernel(unsigned n_vectors, unsigned n_dimensions, unsigned *d_directions, float *d_output)
{
__shared__ unsigned int v[n_directions];
// Offset into the correct dimension as specified by the
// block y coordinate
d_directions = d_directions + n_directions * blockIdx.y;
d_output = d_output + n_vectors * blockIdx.y;
// Copy the direction numbers for this dimension into shared
// memory - there are only 32 direction numbers so only the
// first 32 (n_directions) threads need participate.
if (threadIdx.x < n_directions)
{
v[threadIdx.x] = d_directions[threadIdx.x];
}
__syncthreads();
// Set initial index (i.e. which vector this thread is
// computing first) and stride (i.e. step to the next vector
// for this thread)
int i0 = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
// Get the gray code of the index
// c.f. Numerical Recipes in C, chapter 20
// http://www.nrbook.com/a/bookcpdf/c20-2.pdf
unsigned int g = i0 ^ (i0 >> 1);
// Initialisation for first point x[i0]
// In the Bratley and Fox paper this is equation (*), where
// we are computing the value for x[n] without knowing the
// value of x[n-1].
unsigned int X = 0;
unsigned int mask;
for (unsigned int k = 0 ; k < __ffs(stride) - 1 ; k++)
{
// We want X ^= g_k * v[k], where g_k is one or zero.
// We do this by setting a mask with all bits equal to
// g_k. In reality we keep shifting g so that g_k is the
// LSB of g. This way we avoid multiplication.
mask = - (g & 1);
X ^= mask & v[k];
g = g >> 1;
}
if (i0 < n_vectors)
{
d_output[i0] = (float)X * k_2powneg32;
}
// Now do rest of points, using the stride
// Here we want to generate x[i] from x[i-stride] where we
// don't have any of the x in between, therefore we have to
// revisit the equation (**), this is easiest with an example
// so assume stride is 16.
// From x[n] to x[n+16] there will be:
// 8 changes in the first bit
// 4 changes in the second bit
// 2 changes in the third bit
// 1 change in the fourth
// 1 change in one of the remaining bits
//
// What this means is that in the equation:
// x[n+1] = x[n] ^ v[p]
// x[n+2] = x[n+1] ^ v[q] = x[n] ^ v[p] ^ v[q]
// ...
// We will apply xor with v[1] eight times, v[2] four times,
// v[3] twice, v[4] once and one other direction number once.
// Since two xors cancel out, we can skip even applications
// and just apply xor with v[4] (i.e. log2(16)) and with
// the current applicable direction number.
// Note that all these indices count from 1, so we need to
// subtract 1 from them all to account for C arrays counting
// from zero.
unsigned int v_log2stridem1 = v[__ffs(stride) - 2];
unsigned int v_stridemask = stride - 1;
for (unsigned int i = i0 + stride ; i < n_vectors ; i += stride)
{
// x[i] = x[i-stride] ^ v[b] ^ v[c]
// where b is log2(stride) minus 1 for C array indexing
// where c is the index of the rightmost zero bit in i,
// not including the bottom log2(stride) bits, minus 1
// for C array indexing
// In the Bratley and Fox paper this is equation (**)
X ^= v_log2stridem1 ^ v[__ffs(~((i - stride) | v_stridemask)) - 1];
d_output[i] = (float)X * k_2powneg32;
}
}
extern "C"
void sobolGPU(int n_vectors, int n_dimensions, unsigned int *d_directions, float *d_output)
{
const int threadsperblock = 64;
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
int device;
hipDeviceProp_t prop;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&prop, device));
// This implementation of the generator outputs all the draws for
// one dimension in a contiguous region of memory, followed by the
// next dimension and so on.
// Therefore all threads within a block will be processing different
// vectors from the same dimension. As a result we want the total
// number of blocks to be a multiple of the number of dimensions.
dimGrid.y = n_dimensions;
// If the number of dimensions is large then we will set the number
// of blocks to equal the number of dimensions (i.e. dimGrid.x = 1)
// but if the number of dimensions is small (e.g. less than four per
// multiprocessor) then we'll partition the vectors across blocks
// (as well as threads).
if (n_dimensions < (4 * prop.multiProcessorCount))
{
dimGrid.x = 4 * prop.multiProcessorCount;
}
else
{
dimGrid.x = 1;
}
// Cap the dimGrid.x if the number of vectors is small
if (dimGrid.x > (unsigned int)(n_vectors / threadsperblock))
{
dimGrid.x = (n_vectors + threadsperblock - 1) / threadsperblock;
}
// Round up to a power of two, required for the algorithm so that
// stride is a power of two.
unsigned int targetDimGridX = dimGrid.x;
for (dimGrid.x = 1 ; dimGrid.x < targetDimGridX ; dimGrid.x *= 2);
// Fix the number of threads
dimBlock.x = threadsperblock;
// Execute GPU kernel
hipLaunchKernelGGL(( sobolGPU_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, n_vectors, n_dimensions, d_directions, d_output);
}
| 615e5960d77ddf7d5204c473d39b2be5ddbe20b0.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Portions Copyright (c) 1993-2014 NVIDIA Corporation. All rights reserved.
* Portions Copyright (c) 2009 Mike Giles, Oxford University. All rights reserved.
* Portions Copyright (c) 2008 Frances Y. Kuo and Stephen Joe. All rights reserved.
*
* Sobol Quasi-random Number Generator example
*
* Based on CUDA code submitted by Mike Giles, Oxford University, United Kingdom
* http://people.maths.ox.ac.uk/~gilesm/
*
* and C code developed by Stephen Joe, University of Waikato, New Zealand
* and Frances Kuo, University of New South Wales, Australia
* http://web.maths.unsw.edu.au/~fkuo/sobol/
*
* For theoretical background see:
*
* P. Bratley and B.L. Fox.
* Implementing Sobol's quasirandom sequence generator
* http://portal.acm.org/citation.cfm?id=42288
* ACM Trans. on Math. Software, 14(1):88-100, 1988
*
* S. Joe and F. Kuo.
* Remark on algorithm 659: implementing Sobol's quasirandom sequence generator.
* http://portal.acm.org/citation.cfm?id=641879
* ACM Trans. on Math. Software, 29(1):49-57, 2003
*
*/
#include "sobol.h"
#include "sobol_gpu.h"
#include <helper_cuda.h>
#define k_2powneg32 2.3283064E-10F
__global__ void sobolGPU_kernel(unsigned n_vectors, unsigned n_dimensions, unsigned *d_directions, float *d_output)
{
__shared__ unsigned int v[n_directions];
// Offset into the correct dimension as specified by the
// block y coordinate
d_directions = d_directions + n_directions * blockIdx.y;
d_output = d_output + n_vectors * blockIdx.y;
// Copy the direction numbers for this dimension into shared
// memory - there are only 32 direction numbers so only the
// first 32 (n_directions) threads need participate.
if (threadIdx.x < n_directions)
{
v[threadIdx.x] = d_directions[threadIdx.x];
}
__syncthreads();
// Set initial index (i.e. which vector this thread is
// computing first) and stride (i.e. step to the next vector
// for this thread)
int i0 = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
// Get the gray code of the index
// c.f. Numerical Recipes in C, chapter 20
// http://www.nrbook.com/a/bookcpdf/c20-2.pdf
unsigned int g = i0 ^ (i0 >> 1);
// Initialisation for first point x[i0]
// In the Bratley and Fox paper this is equation (*), where
// we are computing the value for x[n] without knowing the
// value of x[n-1].
unsigned int X = 0;
unsigned int mask;
for (unsigned int k = 0 ; k < __ffs(stride) - 1 ; k++)
{
// We want X ^= g_k * v[k], where g_k is one or zero.
// We do this by setting a mask with all bits equal to
// g_k. In reality we keep shifting g so that g_k is the
// LSB of g. This way we avoid multiplication.
mask = - (g & 1);
X ^= mask & v[k];
g = g >> 1;
}
if (i0 < n_vectors)
{
d_output[i0] = (float)X * k_2powneg32;
}
// Now do rest of points, using the stride
// Here we want to generate x[i] from x[i-stride] where we
// don't have any of the x in between, therefore we have to
// revisit the equation (**), this is easiest with an example
// so assume stride is 16.
// From x[n] to x[n+16] there will be:
// 8 changes in the first bit
// 4 changes in the second bit
// 2 changes in the third bit
// 1 change in the fourth
// 1 change in one of the remaining bits
//
// What this means is that in the equation:
// x[n+1] = x[n] ^ v[p]
// x[n+2] = x[n+1] ^ v[q] = x[n] ^ v[p] ^ v[q]
// ...
// We will apply xor with v[1] eight times, v[2] four times,
// v[3] twice, v[4] once and one other direction number once.
// Since two xors cancel out, we can skip even applications
// and just apply xor with v[4] (i.e. log2(16)) and with
// the current applicable direction number.
// Note that all these indices count from 1, so we need to
// subtract 1 from them all to account for C arrays counting
// from zero.
unsigned int v_log2stridem1 = v[__ffs(stride) - 2];
unsigned int v_stridemask = stride - 1;
for (unsigned int i = i0 + stride ; i < n_vectors ; i += stride)
{
// x[i] = x[i-stride] ^ v[b] ^ v[c]
// where b is log2(stride) minus 1 for C array indexing
// where c is the index of the rightmost zero bit in i,
// not including the bottom log2(stride) bits, minus 1
// for C array indexing
// In the Bratley and Fox paper this is equation (**)
X ^= v_log2stridem1 ^ v[__ffs(~((i - stride) | v_stridemask)) - 1];
d_output[i] = (float)X * k_2powneg32;
}
}
extern "C"
void sobolGPU(int n_vectors, int n_dimensions, unsigned int *d_directions, float *d_output)
{
const int threadsperblock = 64;
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
int device;
cudaDeviceProp prop;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&prop, device));
// This implementation of the generator outputs all the draws for
// one dimension in a contiguous region of memory, followed by the
// next dimension and so on.
// Therefore all threads within a block will be processing different
// vectors from the same dimension. As a result we want the total
// number of blocks to be a multiple of the number of dimensions.
dimGrid.y = n_dimensions;
// If the number of dimensions is large then we will set the number
// of blocks to equal the number of dimensions (i.e. dimGrid.x = 1)
// but if the number of dimensions is small (e.g. less than four per
// multiprocessor) then we'll partition the vectors across blocks
// (as well as threads).
if (n_dimensions < (4 * prop.multiProcessorCount))
{
dimGrid.x = 4 * prop.multiProcessorCount;
}
else
{
dimGrid.x = 1;
}
// Cap the dimGrid.x if the number of vectors is small
if (dimGrid.x > (unsigned int)(n_vectors / threadsperblock))
{
dimGrid.x = (n_vectors + threadsperblock - 1) / threadsperblock;
}
// Round up to a power of two, required for the algorithm so that
// stride is a power of two.
unsigned int targetDimGridX = dimGrid.x;
for (dimGrid.x = 1 ; dimGrid.x < targetDimGridX ; dimGrid.x *= 2);
// Fix the number of threads
dimBlock.x = threadsperblock;
// Execute GPU kernel
sobolGPU_kernel<<<dimGrid, dimBlock>>>(n_vectors, n_dimensions, d_directions, d_output);
}
|
2c961eb5e6f09857aad7a34a64d486cc98be7f8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// WRITER //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_pack( const size_t N, T *__restrict__ input, unsigned char *__restrict__ output ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < N; tid += stride ) {
output[tid] = reinterpret_cast<unsigned char *>( input )[tid];
}
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int8( const size_t N, char *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<char>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint8( const size_t N, unsigned char *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned char>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int16( const size_t N, short *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<short>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint16( const size_t N, unsigned short *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned short>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int32( const size_t N, int *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<int>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint32( const size_t N, unsigned int *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned int>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_float32( const size_t N, float *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<float>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_float64( const size_t N, double *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<double>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex64( const size_t N,
thrust::complex<float> *__restrict__ input,
unsigned char *__restrict__ output ) {
_cupy_pack<thrust::complex<float>>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex128( const size_t N,
thrust::complex<double> *__restrict__ input,
unsigned char *__restrict__ output ) {
_cupy_pack<thrust::complex<double>>( N, input, output );
}
| 2c961eb5e6f09857aad7a34a64d486cc98be7f8d.cu | // Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// WRITER //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_pack( const size_t N, T *__restrict__ input, unsigned char *__restrict__ output ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < N; tid += stride ) {
output[tid] = reinterpret_cast<unsigned char *>( input )[tid];
}
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int8( const size_t N, char *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<char>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint8( const size_t N, unsigned char *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned char>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int16( const size_t N, short *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<short>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint16( const size_t N, unsigned short *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned short>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_int32( const size_t N, int *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<int>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_uint32( const size_t N, unsigned int *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<unsigned int>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_float32( const size_t N, float *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<float>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_pack_float64( const size_t N, double *__restrict__ input, unsigned char *__restrict__ output ) {
_cupy_pack<double>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex64( const size_t N,
thrust::complex<float> *__restrict__ input,
unsigned char *__restrict__ output ) {
_cupy_pack<thrust::complex<float>>( N, input, output );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex128( const size_t N,
thrust::complex<double> *__restrict__ input,
unsigned char *__restrict__ output ) {
_cupy_pack<thrust::complex<double>>( N, input, output );
}
|
b4c4a25bdee14014e44afbcd678fd0d3e4e341ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/balanced_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void BSMLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void BSM_scale_GPU(const int nthreads,
Dtype* bottom_diff, const Dtype* his_stat_, int bin_num_, bool b_cls_, bool b_his_){
CUDA_KERNEL_LOOP(i, nthreads) {
if ( b_his_ )
bottom_diff[ i ] = bottom_diff[ i ] * his_stat_[ (int)floorf(fabs(bottom_diff[ i ]) * bin_num_) ];
}
}
template <typename Dtype>
void BSMLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
CUDA_POST_KERNEL_CHECK;
BSM_statistics(bottom[ 0 ]->mutable_cpu_diff(), bottom[0]->count());
BSM_scale_GPU<Dtype> << <CAFFE_GET_BLOCKS(bottom[ 0 ]->count()),
CAFFE_CUDA_NUM_THREADS >> >( bottom[ 0 ]->count(), bottom[ 0 ]->mutable_gpu_diff(), his_stat_.gpu_data(), bin_num_, b_cls_, b_his_ );
CUDA_POST_KERNEL_CHECK;
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BSMLossLayer);
}// namespace caffe | b4c4a25bdee14014e44afbcd678fd0d3e4e341ed.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/balanced_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void BSMLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void BSM_scale_GPU(const int nthreads,
Dtype* bottom_diff, const Dtype* his_stat_, int bin_num_, bool b_cls_, bool b_his_){
CUDA_KERNEL_LOOP(i, nthreads) {
if ( b_his_ )
bottom_diff[ i ] = bottom_diff[ i ] * his_stat_[ (int)floorf(fabs(bottom_diff[ i ]) * bin_num_) ];
}
}
template <typename Dtype>
void BSMLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
CUDA_POST_KERNEL_CHECK;
BSM_statistics(bottom[ 0 ]->mutable_cpu_diff(), bottom[0]->count());
BSM_scale_GPU<Dtype> << <CAFFE_GET_BLOCKS(bottom[ 0 ]->count()),
CAFFE_CUDA_NUM_THREADS >> >( bottom[ 0 ]->count(), bottom[ 0 ]->mutable_gpu_diff(), his_stat_.gpu_data(), bin_num_, b_cls_, b_his_ );
CUDA_POST_KERNEL_CHECK;
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BSMLossLayer);
}// namespace caffe |
b106d47d3176fd148c62558cffe8c2b4ccc4220c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
// -----------------------------------------------------------------------
// Fast CUDA Radix Sort Implementation
//
// The parallel radix sort algorithm implemented by this code is described
// in the following paper.
//
// Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting
// Algorithms for Manycore GPUs". In Proceedings of IEEE International
// Parallel & Distributed Processing Symposium 2009 (IPDPS 2009).
//
// -----------------------------------------------------------------------
#include "radixsort.h"
#include "cudpp/cudpp.h"
#include <stdio.h>
#include <assert.h>
#if (CUDART_VERSION < 2020)
#error CUDA runtime version 2.2 or later required!
#endif
namespace nvRadixSort
{
// Used for creating a mapping of kernel functions to the number of CTAs to launch for each
typedef void* KernelPointer;
int getNumCTAs(KernelPointer kernel);
void setNumCTAs(KernelPointer kernel, int numCTAs);
void computeNumCTAs(KernelPointer kernel, int smemDynamicBytes, bool bManualCoalesce);
bool bManualCoalesce = false;
bool bUsePersistentCTAs = false;
unsigned int persistentCTAThreshold[2] = { 0, 0 };
unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 };
template <typename T>
int numCTAs(T kernel)
{
return getNumCTAs((KernelPointer)kernel);
}
template <typename T>
void numCTAs(T kernel, int numCTAs)
{
setNumCTAs((KernelPointer)kernel, numCTAs);
}
template <typename T>
void computeNumCTAs(T kernel, int smemDynamicBytes)
{
computeNumCTAs((KernelPointer)kernel, smemDynamicBytes, bManualCoalesce);
}
// In emulation mode, we need __syncthreads() inside warp-synchronous code,
// but we don't in code running on the GPU, so we define this macro to use
// in the warp-scan portion of the radix sort (see CUDPP for information
// on the warp scan algorithm.
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
__global__ void emptyKernel() {}
// -----------------------------------------------------------------------------------------------
// The floatFlip and floatUnflip functions below are based on code in the web article
// "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert
// floating point values into sortable unsigned integers (and back).
//
// Paraphrasing Michael: Binary single-precision floating point numbers have two features that
// keep them from being directly sortable. First, the sign bit is set when the value is negative,
// which means that all negative numbers are bigger than positive ones. Second, the values are
// signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal
// bitwise comparison.
//
// "To fix our floating point numbers, we define the following rules:
//
// 1. Always flip the sign bit.
// 2. If the sign bit was set, flip the other bits too.
//
// To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other
// bits too."
//
// This is a very inexpensive operation and it is only done on the first and last steps of the
// sort.
// -----------------------------------------------------------------------------------------------
// ================================================================================================
// Flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// Kernel to flip all floats in an array (see floatFlip, above)
// Each thread flips four values (each 256-thread CTA flips 1024 values).
// ================================================================================================
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
// ================================================================================================
// Kernel to unflip all floats in an array (see floatUnflip, above)
// Each thread unflips four values (each 256-thread CTA unflips 1024 values).
// ================================================================================================
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, volatile T* sData)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += RadixSort::WARP_SIZE;
T t = sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < RadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//
// Each thread sorts 4 elements by nbits bits
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<RadixSort::CTA_SIZE>(lsb);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. It then writes the radix
// counts to the counters array, and the starting offsets to the blockOffsets array.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] =
threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == RadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] =
2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint2 sValues2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sValues[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
hipError_t e = hipDeviceSynchronize();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
e = hipGetLastError();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//
// Uses cudppScan() for the prefix sum of radix counters.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
hipLaunchKernelGGL(( emptyKernel), dim3(numCTAs(emptyKernel)), dim3(RadixSort::CTA_SIZE), 0, 0, );
}
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
checkCudaError("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
checkCudaError("findRadixOffsets");
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) :
numCTAs(reorderData<0, true, true, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) :
numCTAs(reorderData<0, true, false, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, true, true, true>) :
numCTAs(reorderData<0, false, true, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, false, true, true>) :
numCTAs(reorderData<0, false, false, false, true>);
}
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers.
//----------------------------------------------------------------------------
void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSort");
}
void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
//bool loop2 = numBlocks2 > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>);
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, true, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, false, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, true, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, false, false, true>);
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip, false>)
, dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSortKeysOnly");
}
//----------------------------------------------------------------------------
// Main float key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
void initDeviceParameters(bool keysOnly)
{
int deviceID = -1;
if (hipSuccess == hipGetDevice(&deviceID))
{
hipDeviceProp_t devprop;
hipGetDeviceProperties(&devprop, deviceID);
int smVersion = devprop.major * 10 + devprop.minor;
// sm_12 and later devices don't need help with coalesce in reorderData kernel
bManualCoalesce = (smVersion < 12);
bUsePersistentCTAs = (smVersion < 20);
if (bUsePersistentCTAs)
{
// Empirically we have found on pre-Fermi GPUs that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288;
persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288;
persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608;
persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, false, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, true, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(reorderData<0, false, false, false, true>, 0);
computeNumCTAs(reorderData<0, false, false, true, true>, 0);
computeNumCTAs(reorderData<0, false, true, false, true>, 0);
computeNumCTAs(reorderData<0, false, true, true, true>, 0);
computeNumCTAs(reorderData<0, true, false, false, true>, 0);
computeNumCTAs(reorderData<0, true, false, true, true>, 0);
computeNumCTAs(reorderData<0, true, true, false, true>, 0);
computeNumCTAs(reorderData<0, true, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0);
computeNumCTAs(emptyKernel, 0);
}
}
}
} // namespace nvRadixSort
| b106d47d3176fd148c62558cffe8c2b4ccc4220c.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
// -----------------------------------------------------------------------
// Fast CUDA Radix Sort Implementation
//
// The parallel radix sort algorithm implemented by this code is described
// in the following paper.
//
// Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting
// Algorithms for Manycore GPUs". In Proceedings of IEEE International
// Parallel & Distributed Processing Symposium 2009 (IPDPS 2009).
//
// -----------------------------------------------------------------------
#include "radixsort.h"
#include "cudpp/cudpp.h"
#include <stdio.h>
#include <assert.h>
#if (CUDART_VERSION < 2020)
#error CUDA runtime version 2.2 or later required!
#endif
namespace nvRadixSort
{
// Used for creating a mapping of kernel functions to the number of CTAs to launch for each
typedef void* KernelPointer;
int getNumCTAs(KernelPointer kernel);
void setNumCTAs(KernelPointer kernel, int numCTAs);
void computeNumCTAs(KernelPointer kernel, int smemDynamicBytes, bool bManualCoalesce);
bool bManualCoalesce = false;
bool bUsePersistentCTAs = false;
unsigned int persistentCTAThreshold[2] = { 0, 0 };
unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 };
template <typename T>
int numCTAs(T kernel)
{
return getNumCTAs((KernelPointer)kernel);
}
template <typename T>
void numCTAs(T kernel, int numCTAs)
{
setNumCTAs((KernelPointer)kernel, numCTAs);
}
template <typename T>
void computeNumCTAs(T kernel, int smemDynamicBytes)
{
computeNumCTAs((KernelPointer)kernel, smemDynamicBytes, bManualCoalesce);
}
// In emulation mode, we need __syncthreads() inside warp-synchronous code,
// but we don't in code running on the GPU, so we define this macro to use
// in the warp-scan portion of the radix sort (see CUDPP for information
// on the warp scan algorithm.
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
__global__ void emptyKernel() {}
// -----------------------------------------------------------------------------------------------
// The floatFlip and floatUnflip functions below are based on code in the web article
// "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert
// floating point values into sortable unsigned integers (and back).
//
// Paraphrasing Michael: Binary single-precision floating point numbers have two features that
// keep them from being directly sortable. First, the sign bit is set when the value is negative,
// which means that all negative numbers are bigger than positive ones. Second, the values are
// signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal
// bitwise comparison.
//
// "To fix our floating point numbers, we define the following rules:
//
// 1. Always flip the sign bit.
// 2. If the sign bit was set, flip the other bits too.
//
// To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other
// bits too."
//
// This is a very inexpensive operation and it is only done on the first and last steps of the
// sort.
// -----------------------------------------------------------------------------------------------
// ================================================================================================
// Flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// Kernel to flip all floats in an array (see floatFlip, above)
// Each thread flips four values (each 256-thread CTA flips 1024 values).
// ================================================================================================
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
// ================================================================================================
// Kernel to unflip all floats in an array (see floatUnflip, above)
// Each thread unflips four values (each 256-thread CTA unflips 1024 values).
// ================================================================================================
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, volatile T* sData)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += RadixSort::WARP_SIZE;
T t = sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < RadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//
// Each thread sorts 4 elements by nbits bits
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<RadixSort::CTA_SIZE>(lsb);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. It then writes the radix
// counts to the counters array, and the starting offsets to the blockOffsets array.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] =
threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == RadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] =
2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint2 sValues2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sValues[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[RadixSort::WARP_SIZE];
volatile __shared__ uint sFlags[RadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts all blocks of data independently in shared
// memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
//
// The radix sort is done in two stages. This stage calls radixSortBlock on each
// block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
// differently than arrays that are not. "flip" is used to only compile in the
// float flip code when float keys are used. "loop" is used when persistent CTAs
// are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[RadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
cudaError_t e = cudaThreadSynchronize();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
e = cudaGetLastError();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//
// Uses cudppScan() for the prefix sum of radix counters.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
emptyKernel<<<numCTAs(emptyKernel), RadixSort::CTA_SIZE>>>();
}
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>);
}
radixSortBlocks<nbits, startbit, true, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, true, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>);
}
radixSortBlocks<nbits, startbit, false, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, false, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
checkCudaError("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, true, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, false, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
}
checkCudaError("findRadixOffsets");
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) :
numCTAs(reorderData<0, true, true, false, true>);
}
reorderData<startbit, true, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) :
numCTAs(reorderData<0, true, false, false, true>);
}
reorderData<startbit, true, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, true, true, true>) :
numCTAs(reorderData<0, false, true, false, true>);
}
reorderData<startbit, false, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderData<0, false, false, true, true>) :
numCTAs(reorderData<0, false, false, false, true>);
}
reorderData<startbit, false, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocks<32, 0, true, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
else
{
radixSortBlocks<32, 0, false, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 1 );
}
if (flip)
unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers.
//----------------------------------------------------------------------------
void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarp<true><<<1, numElements>>>(keys, values, numElements);
else
radixSortSingleWarp<false><<<1, numElements>>>(keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSort");
}
void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements)
{
const uint eltsPerBlock = RadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
//bool loop2 = numBlocks2 > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1];
if (bUsePersistentCTAs && (numElements >= threshold))
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
}
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>);
}
radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) :
numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>);
}
radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>
<<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, true, true>);
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, true, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksFind = numCTAs(findRadixOffsets<0, false, true>);
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, false, false>
<<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2);
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
if (fullBlocks)
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, true, false, true>);
}
reorderDataKeysOnly<startbit, true, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, true, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, true, false, false, true>);
}
reorderDataKeysOnly<startbit, true, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
else
{
if (bManualCoalesce)
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, true, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, true, false, true>);
}
reorderDataKeysOnly<startbit, false, true, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, true, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (bUsePersistentCTAs && (numElements >= threshold))
{
blocksReorder = unflip ?
numCTAs(reorderDataKeysOnly<0, false, false, true, true>) :
numCTAs(reorderDataKeysOnly<0, false, false, false, true>);
}
reorderDataKeysOnly<startbit, false, false, unflip, true>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, false, unflip, false>
<<<blocksReorder, RadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2);
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocksKeysOnly<32, 0, true, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
radixSortBlocksKeysOnly<32, 0, false, flip, false>
<<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Main key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool flipBits = false)
{
if(numElements <= RadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements);
else
radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= RadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements);
}
}
checkCudaError("radixSortKeysOnly");
}
//----------------------------------------------------------------------------
// Main float key-only radix sort function. Sorts in place in the keys and values
// arrays, but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
void initDeviceParameters(bool keysOnly)
{
int deviceID = -1;
if (cudaSuccess == cudaGetDevice(&deviceID))
{
cudaDeviceProp devprop;
cudaGetDeviceProperties(&devprop, deviceID);
int smVersion = devprop.major * 10 + devprop.minor;
// sm_12 and later devices don't need help with coalesce in reorderData kernel
bManualCoalesce = (smVersion < 12);
bUsePersistentCTAs = (smVersion < 20);
if (bUsePersistentCTAs)
{
// Empirically we have found on pre-Fermi GPUs that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288;
persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288;
persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608;
persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, false, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(findRadixOffsets<0, true, true>, 2 * RadixSort::CTA_SIZE * sizeof(uint));
computeNumCTAs(reorderData<0, false, false, false, true>, 0);
computeNumCTAs(reorderData<0, false, false, true, true>, 0);
computeNumCTAs(reorderData<0, false, true, false, true>, 0);
computeNumCTAs(reorderData<0, false, true, true, true>, 0);
computeNumCTAs(reorderData<0, true, false, false, true>, 0);
computeNumCTAs(reorderData<0, true, false, true, true>, 0);
computeNumCTAs(reorderData<0, true, true, false, true>, 0);
computeNumCTAs(reorderData<0, true, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0);
computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0);
computeNumCTAs(emptyKernel, 0);
}
}
}
} // namespace nvRadixSort
|
38e01b4d7b25a2fcb2e91875165a1791697d4f52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
using namespace std;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
hipFuncAttributes attrs;
cudaSafeCall( hipFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, std::vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
| 38e01b4d7b25a2fcb2e91875165a1791697d4f52.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "internal.hpp"
#include "utils/boxutils.hpp"
#include<algorithm>
#include<limits>
using namespace pcl::gpu;
using namespace pcl::device;
using namespace std;
namespace pcl
{
namespace device
{
__global__ void get_cc_kernel(int *data)
{
data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x;
}
}
}
void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx)
{
cudaFuncAttributes attrs;
cudaSafeCall( cudaFuncGetAttributes(&attrs, get_cc_kernel) );
bin = attrs.binaryVersion;
ptx = attrs.ptxVersion;
}
void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points)
{
points = input_points;
}
void pcl::device::OctreeImpl::internalDownload()
{
int number;
DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number);
DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs);
DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends);
DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes);
DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes);
points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step);
indices.download(host_octree.indices);
host_octree.downloaded = true;
}
namespace
{
int getBitsNum(int integer)
{
int count = 0;
while(integer > 0)
{
if (integer & 1)
++count;
integer>>=1;
}
return count;
}
struct OctreeIteratorHost
{
const static int MAX_LEVELS_PLUS_ROOT = 11;
int paths[MAX_LEVELS_PLUS_ROOT];
int level;
OctreeIteratorHost()
{
level = 0; // root level
paths[level] = (0 << 8) + 1;
}
void gotoNextLevel(int first, int len)
{
++level;
paths[level] = (first << 8) + len;
}
int operator*() const
{
return paths[level] >> 8;
}
void operator++()
{
while(level >= 0)
{
int data = paths[level];
if ((data & 0xFF) > 1) // there are another siblings, can goto there
{
data += (1 << 8) - 1; // +1 to first and -1 from len
paths[level] = data;
break;
}
else
--level; //goto parent;
}
}
};
}
void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, std::vector<int>& out, int max_nn) const
{
out.clear();
float3 center = make_float3(query.x, query.y, query.z);
OctreeIteratorHost iterator;
while(iterator.level >= 0)
{
int node_idx = *iterator;
int code = host_octree.codes[node_idx];
float3 node_minp = octreeGlobal.minp;
float3 node_maxp = octreeGlobal.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius))
{
++iterator;
continue;
}
//if true, take all, and go to next
if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius))
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (size_t)max_nn)
return;
++iterator;
continue;
}
// test children
int children_mask = host_octree.nodes[node_idx] & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
const int beg = host_octree.begs[node_idx];
const int end = host_octree.ends[node_idx];
for(int j = beg; j < end; ++j)
{
int index = host_octree.indices[j];
float point_x = host_octree.points_sorted[j ];
float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2];
float dx = (point_x - center.x);
float dy = (point_y - center.y);
float dz = (point_z - center.z);
float dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < radius * radius)
out.push_back(index);
if (out.size() == (size_t)max_nn)
return;
}
++iterator;
continue;
}
int first = host_octree.nodes[node_idx] >> 8;
iterator.gotoNextLevel(first, getBitsNum(children_mask));
}
}
void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const
{
float3 minp = octreeGlobal.minp;
float3 maxp = octreeGlobal.maxp;
int node_idx = 0;
bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z;
if(!out_of_root)
{
int code = CalcMorton(minp, maxp)(query);
int level = 0;
for(;;)
{
int mask_pos = 1 << Morton::extractLevelCode(code, level);
int node = host_octree.nodes[node_idx];
int mask = node & 0xFF;
if(getBitsNum(mask) == 0) // leaf
break;
if ( (mask & mask_pos) == 0) // no child
break;
node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1));
++level;
}
}
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
sqr_dist = std::numeric_limits<float>::max();
for(int i = beg; i < end; ++i)
{
float point_x = host_octree.points_sorted[i ];
float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ];
float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2];
float dx = (point_x - query.x);
float dy = (point_y - query.y);
float dz = (point_z - query.z);
float d2 = dx * dx + dy * dy + dz * dz;
if (sqr_dist > d2)
{
sqr_dist = d2;
out_index = i;
}
}
out_index = host_octree.indices[out_index];
}
|
cf148a64dc112c382c30e71e9d0020b603f9de33.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_timer.h>
#include <sstream>
#include <algorithm>
#ifdef AMGX_WITH_MPI
#include <mpi.h>
#endif
#ifdef AMGX_USE_VAMPIR_TRACE
#include <vt_user.h>
#endif
namespace amgx
{
int nvtxRange::color_counter = 0;
nvtxRange::nvtxRange(const char* name, int color)
{
#ifdef NVTX_RANGES
static const uint32_t colors[] = { 0xff00ff00, 0xff0000ff, 0xffffff00, 0xffff00ff, 0xff00ffff, 0xffff0000, 0xffffffff };
static const int num_colors = sizeof(colors)/sizeof(uint32_t);
int color_id = color;
if (color_id < 0 || color_id >= num_colors)
{
color_id = color_counter++ % num_colors;
}
color_id = color_id%num_colors;
nvtxEventAttributes_t eventAttrib = {0};
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = colors[color_id];
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = name;
nvtxRangePushEx(&eventAttrib);
#endif
};
nvtxRange::~nvtxRange()
{
#ifdef NVTX_RANGES
roctxRangePop();
#endif
}
#ifdef AMGX_USE_CPU_PROFILER
std::ostream &Profiler_entry::print(std::ostream &out, int depth, int max_depth, double total_time, double parent_time) const
{
for ( int i = 0 ; i < depth ; ++i )
{
out << "| ";
}
out << std::setw(WIDTH) << std::setfill('.') << std::left << m_name;
for ( int i = 0 ; i < max_depth - depth ; ++i )
{
out << "..";
}
out << " |" << std::setw(10) << std::setfill(' ') << std::right << std::fixed << std::setprecision(3) << get_time_in_ms() << " |";
double abs_prct = 100.0 * get_time_in_ms() / total_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << abs_prct << " % |";
double rel_prct = 100.0 * get_time_in_ms() / parent_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << rel_prct << " % |" << std::endl;
// Early exit if no child.
if ( m_children.empty() )
{
return out;
}
// Children.
double sum(0.0);
for ( int i = 0, n_children = m_children.size() ; i < n_children ; ++i )
{
m_children[i].print(out, depth + 1, max_depth, total_time, get_time_in_ms());
sum += m_children[i].get_time_in_ms();
}
// Unknown fraction of time.
for ( int i = 0 ; i < depth + 1 ; ++i )
{
out << "| ";
}
out << std::setw(WIDTH) << std::setfill('.') << std::left << "self (excluding children)";
for ( int i = 0 ; i < max_depth - depth - 1 ; ++i )
{
out << "..";
}
double unknown_time = get_time_in_ms() - sum;
out << " |" << std::setw(10) << std::setfill(' ') << std::right << std::fixed << std::setprecision(3) << unknown_time << " |";
abs_prct = 100.0 * unknown_time / total_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << abs_prct << " % |";
rel_prct = 100.0 * unknown_time / get_time_in_ms();
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << rel_prct << " % |";
if ( rel_prct >= 0.10 ) // Add a marker if >10% is unknown.
{
out << " *!!*";
}
out << std::endl;
return out;
}
Profiler_tree &Profiler_tree::get_instance()
{
static Profiler_tree s_instance;
return s_instance;
}
Profiler_tree::Profiler_tree() : m_max_depth(0), m_root("amgx")
#ifdef AMGX_WITH_MPI
, m_rank(-1) // We can't guarantee that MPI will be Initialized when it is built or destroyed.
#endif
{
m_stack.reserve(32);
m_stack.push_back(&m_root);
m_root.start();
m_first_start = high_resolution_clock::now();
}
Profiler_tree::~Profiler_tree()
{
m_root.set_time((std::chrono::duration_cast<std::chrono::nanoseconds>(m_last_stop - m_first_start)).count());
std::ostringstream buffer;
#ifdef AMGX_WITH_MPI
if (m_rank != -1)
{
buffer << "amgx_cpu_profile." << m_rank << ".txt";
}
else
#endif
buffer << "amgx_cpu_profile.txt";
std::ofstream file( buffer.str().c_str(), std::ios::out );
for ( int i = 0, end = 64 + 2 * m_max_depth ; i < end ; ++i )
{
file << " ";
}
file << " | Time (ms) | Absolute | Relative |" << std::endl;
m_root.print(file, 0, m_max_depth, m_root.get_time_in_ms(), m_root.get_time_in_ms() );
file.close();
}
void Profiler_tree::push( const char *name )
{
#ifdef AMGX_WITH_MPI
if (m_rank == -1)
{
int flag = 0;
MPI_Initialized(&flag); // We want to make sure MPI_Init has been called.
if (flag)
{
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
}
}
#endif
Profiler_entry *top_of_stack = m_stack.back();
Profiler_entry *node = top_of_stack->add_child(name);
m_stack.push_back(node);
m_max_depth = ::max( m_max_depth, (int) m_stack.size() );
node->start();
}
void Profiler_tree::pop()
{
assert(!m_stack.empty());
Profiler_entry *top_of_stack = m_stack.back();
top_of_stack->stop();
m_stack.pop_back();
m_last_stop = high_resolution_clock::now();
}
void Profiler_tree::mark(const char *c_name, const char *msg)
{
#ifdef AMGX_USE_VAMPIR_TRACE
typedef std::map<std::string, int>::iterator Iterator;
std::string name(c_name);
Iterator it = m_markers.find(name);
int tag = 0;
if ( it == m_markers.end() )
{
tag = VT_User_marker_def__(c_name, VT_MARKER_TYPE_HINT);
m_markers[name] = tag;
}
else
{
tag = it->second;
}
VT_User_marker__(tag, msg);
#endif
}
Profiler_raii::Profiler_raii( const char *name, const char *filename, int lineno ) : m_name(name)
{
Profiler_tree &tree = Profiler_tree::get_instance();
tree.push(name);
#ifdef AMGX_USE_VAMPIR_TRACE
VT_User_start__(m_name.c_str(), filename, lineno);
#endif
}
Profiler_raii::~Profiler_raii()
{
#ifdef AMGX_USE_VAMPIR_TRACE
VT_User_end__(m_name.c_str());
#endif
Profiler_tree &tree = Profiler_tree::get_instance();
tree.pop();
}
#endif // defined AMGX_USE_CPU_PROFILER
static TimerMap amgxTimers;
TimerMap &getTimers()
{
return amgxTimers;
}
} // end namespace amgx
| cf148a64dc112c382c30e71e9d0020b603f9de33.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_timer.h>
#include <sstream>
#include <algorithm>
#ifdef AMGX_WITH_MPI
#include <mpi.h>
#endif
#ifdef AMGX_USE_VAMPIR_TRACE
#include <vt_user.h>
#endif
namespace amgx
{
int nvtxRange::color_counter = 0;
nvtxRange::nvtxRange(const char* name, int color)
{
#ifdef NVTX_RANGES
static const uint32_t colors[] = { 0xff00ff00, 0xff0000ff, 0xffffff00, 0xffff00ff, 0xff00ffff, 0xffff0000, 0xffffffff };
static const int num_colors = sizeof(colors)/sizeof(uint32_t);
int color_id = color;
if (color_id < 0 || color_id >= num_colors)
{
color_id = color_counter++ % num_colors;
}
color_id = color_id%num_colors;
nvtxEventAttributes_t eventAttrib = {0};
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = colors[color_id];
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = name;
nvtxRangePushEx(&eventAttrib);
#endif
};
nvtxRange::~nvtxRange()
{
#ifdef NVTX_RANGES
nvtxRangePop();
#endif
}
#ifdef AMGX_USE_CPU_PROFILER
std::ostream &Profiler_entry::print(std::ostream &out, int depth, int max_depth, double total_time, double parent_time) const
{
for ( int i = 0 ; i < depth ; ++i )
{
out << "| ";
}
out << std::setw(WIDTH) << std::setfill('.') << std::left << m_name;
for ( int i = 0 ; i < max_depth - depth ; ++i )
{
out << "..";
}
out << " |" << std::setw(10) << std::setfill(' ') << std::right << std::fixed << std::setprecision(3) << get_time_in_ms() << " |";
double abs_prct = 100.0 * get_time_in_ms() / total_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << abs_prct << " % |";
double rel_prct = 100.0 * get_time_in_ms() / parent_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << rel_prct << " % |" << std::endl;
// Early exit if no child.
if ( m_children.empty() )
{
return out;
}
// Children.
double sum(0.0);
for ( int i = 0, n_children = m_children.size() ; i < n_children ; ++i )
{
m_children[i].print(out, depth + 1, max_depth, total_time, get_time_in_ms());
sum += m_children[i].get_time_in_ms();
}
// Unknown fraction of time.
for ( int i = 0 ; i < depth + 1 ; ++i )
{
out << "| ";
}
out << std::setw(WIDTH) << std::setfill('.') << std::left << "self (excluding children)";
for ( int i = 0 ; i < max_depth - depth - 1 ; ++i )
{
out << "..";
}
double unknown_time = get_time_in_ms() - sum;
out << " |" << std::setw(10) << std::setfill(' ') << std::right << std::fixed << std::setprecision(3) << unknown_time << " |";
abs_prct = 100.0 * unknown_time / total_time;
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << abs_prct << " % |";
rel_prct = 100.0 * unknown_time / get_time_in_ms();
out << std::setw(7) << std::right << std::fixed << std::setprecision(2) << rel_prct << " % |";
if ( rel_prct >= 0.10 ) // Add a marker if >10% is unknown.
{
out << " *!!*";
}
out << std::endl;
return out;
}
Profiler_tree &Profiler_tree::get_instance()
{
static Profiler_tree s_instance;
return s_instance;
}
Profiler_tree::Profiler_tree() : m_max_depth(0), m_root("amgx")
#ifdef AMGX_WITH_MPI
, m_rank(-1) // We can't guarantee that MPI will be Initialized when it is built or destroyed.
#endif
{
m_stack.reserve(32);
m_stack.push_back(&m_root);
m_root.start();
m_first_start = high_resolution_clock::now();
}
Profiler_tree::~Profiler_tree()
{
m_root.set_time((std::chrono::duration_cast<std::chrono::nanoseconds>(m_last_stop - m_first_start)).count());
std::ostringstream buffer;
#ifdef AMGX_WITH_MPI
if (m_rank != -1)
{
buffer << "amgx_cpu_profile." << m_rank << ".txt";
}
else
#endif
buffer << "amgx_cpu_profile.txt";
std::ofstream file( buffer.str().c_str(), std::ios::out );
for ( int i = 0, end = 64 + 2 * m_max_depth ; i < end ; ++i )
{
file << " ";
}
file << " | Time (ms) | Absolute | Relative |" << std::endl;
m_root.print(file, 0, m_max_depth, m_root.get_time_in_ms(), m_root.get_time_in_ms() );
file.close();
}
void Profiler_tree::push( const char *name )
{
#ifdef AMGX_WITH_MPI
if (m_rank == -1)
{
int flag = 0;
MPI_Initialized(&flag); // We want to make sure MPI_Init has been called.
if (flag)
{
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
}
}
#endif
Profiler_entry *top_of_stack = m_stack.back();
Profiler_entry *node = top_of_stack->add_child(name);
m_stack.push_back(node);
m_max_depth = std::max( m_max_depth, (int) m_stack.size() );
node->start();
}
void Profiler_tree::pop()
{
assert(!m_stack.empty());
Profiler_entry *top_of_stack = m_stack.back();
top_of_stack->stop();
m_stack.pop_back();
m_last_stop = high_resolution_clock::now();
}
void Profiler_tree::mark(const char *c_name, const char *msg)
{
#ifdef AMGX_USE_VAMPIR_TRACE
typedef std::map<std::string, int>::iterator Iterator;
std::string name(c_name);
Iterator it = m_markers.find(name);
int tag = 0;
if ( it == m_markers.end() )
{
tag = VT_User_marker_def__(c_name, VT_MARKER_TYPE_HINT);
m_markers[name] = tag;
}
else
{
tag = it->second;
}
VT_User_marker__(tag, msg);
#endif
}
Profiler_raii::Profiler_raii( const char *name, const char *filename, int lineno ) : m_name(name)
{
Profiler_tree &tree = Profiler_tree::get_instance();
tree.push(name);
#ifdef AMGX_USE_VAMPIR_TRACE
VT_User_start__(m_name.c_str(), filename, lineno);
#endif
}
Profiler_raii::~Profiler_raii()
{
#ifdef AMGX_USE_VAMPIR_TRACE
VT_User_end__(m_name.c_str());
#endif
Profiler_tree &tree = Profiler_tree::get_instance();
tree.pop();
}
#endif // defined AMGX_USE_CPU_PROFILER
static TimerMap amgxTimers;
TimerMap &getTimers()
{
return amgxTimers;
}
} // end namespace amgx
|
e8099a402f144b2f2d90d023d7ad2b09eceb9eef.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| e8099a402f144b2f2d90d023d7ad2b09eceb9eef.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
53f74f1f73cbe1c7ce95f3dd8e801dc87266b696.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
const char* dgemm_desc = "Dlaiton, best way dgemm.";
#define THREADSPERBLOCK 256
__global__
void dlaitonComeback(int n,float *A,float *B,float *C){
int tid=threadIdx.x+(blockDim.x*blockIdx.x);
if(tid>=n*n) return;
int i=tid%n;
int j=tid/n;
for(int k=0;k<n;k++){
C[i+j*n]+=A[i+k*n]*B[k+j*n];
}
}
__host__
void square_dgemm(int n, double* A, double* B, double* C){
float *d_A,*d_B,*d_C;
int n2=n*n;
int size=n*sizeof(float);
hipMalloc((void**)&d_A,size);
hipMalloc((void**)&d_B,size);
hipMalloc((void**)&d_C,size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipMemcpy(d_C,C,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dlaitonComeback), dim3((n2+THREADSPERBLOCK-1)/THREADSPERBLOCK),dim3(THREADSPERBLOCK), 0, 0, n,d_A,d_B,d_C);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} | 53f74f1f73cbe1c7ce95f3dd8e801dc87266b696.cu | const char* dgemm_desc = "Dlaiton, best way dgemm.";
#define THREADSPERBLOCK 256
__global__
void dlaitonComeback(int n,float *A,float *B,float *C){
int tid=threadIdx.x+(blockDim.x*blockIdx.x);
if(tid>=n*n) return;
int i=tid%n;
int j=tid/n;
for(int k=0;k<n;k++){
C[i+j*n]+=A[i+k*n]*B[k+j*n];
}
}
__host__
void square_dgemm(int n, double* A, double* B, double* C){
float *d_A,*d_B,*d_C;
int n2=n*n;
int size=n*sizeof(float);
cudaMalloc((void**)&d_A,size);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_C,C,size,cudaMemcpyHostToDevice);
dlaitonComeback<<<(n2+THREADSPERBLOCK-1)/THREADSPERBLOCK,THREADSPERBLOCK>>>(n,d_A,d_B,d_C);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.